source
stringlengths
3
86
python
stringlengths
75
1.04M
_render.py
from MA import * from MSV import * from threading import Thread from bokeh.models.tickers import FixedTicker def render(self, render_all=False, ignorable=True): self.widgets.show_spinner(self) self.reset_runtimes() self.reset_cds() if self.xs < 0: self.xs = 0 if self.ys < 0: self.ys = 0 if self.xe < 0: self.xe = 0 if self.xe > self.pack.unpacked_size_single_strand: self.xe = self.pack.unpacked_size_single_strand if self.ye < 0: self.ye = 0 if self.ye > self.pack.unpacked_size_single_strand: self.ye = self.pack.unpacked_size_single_strand self.w = int(self.xe - self.xs) self.h = int(self.ye - self.ys) s = max(min(self.xs - self.w, self.ys - self.h), 0) e = min(max(self.xe + self.w, self.ye + self.h), self.pack.unpacked_size_single_strand) xs = [s, e] ys = [s, e] for p in [*self.pack.contigStarts(), self.pack.unpacked_size_single_strand]: if self.xs <= p and p <= self.xe: xs.extend([float("NaN"), self.xs, self.xe]) ys.extend([float("NaN"), p, p]) if self.ys <= p and p <= self.ye: xs.extend([float("NaN"), p, p]) ys.extend([float("NaN"), self.ys, self.ye]) # plot diagonal; we need s and e since too large lines sometimes do not render... self.main_plot.diagonal_line.data = {"xs":xs, "ys":ys} def blocking_task(): with self.cv2: my_num_in_queue = self.num_renders_queue self.num_renders_queue += 1 with self.cv: if my_num_in_queue == 0: if not self.widgets.run_id_dropdown.value is None: with self.measure("get_call_overview_area"): if self.do_overview_cache(): num_ele = self.get_max_num_ele() + 1 else: num_ele = get_call_overview_area(self.db_conn, self.pack, self.get_run_id(), self.get_min_score(), int(self.xs - self.w), int(self.ys - self.h), self.w*3, self.h*3, self.get_max_num_ele() + 1) if num_ele > self.get_max_num_ele() and not render_all: self.render_overview() else: self.render_calls(render_all) def callback(): self.widgets.hide_spinner(self) self.do_callback(callback) with self.cv2: self.num_renders_queue -= 1 thread = Thread(target=blocking_task) thread.start()
copyutil.py
# cython: profile=True # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ConfigParser import csv import datetime import json import glob import multiprocessing as mp import os import platform import random import re import struct import sys import threading import time import traceback from bisect import bisect_right from calendar import timegm from collections import defaultdict, namedtuple from decimal import Decimal from Queue import Queue from random import randint from StringIO import StringIO from select import select from uuid import UUID from util import profile_on, profile_off from cassandra.cluster import Cluster, DefaultConnection from cassandra.cqltypes import ReversedType, UserType from cassandra.metadata import protect_name, protect_names, protect_value from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, FallthroughRetryPolicy from cassandra.query import BatchStatement, BatchType, SimpleStatement, tuple_factory from cassandra.util import Date, Time from cql3handling import CqlRuleSet from displaying import NO_COLOR_MAP from formatting import format_value_default, CqlType, DateTimeFormat, EMPTY, get_formatter from sslhandling import ssl_settings PROFILE_ON = False STRACE_ON = False DEBUG = False # This may be set to True when initializing the task IS_LINUX = platform.system() == 'Linux' IS_WINDOWS = platform.system() == 'Windows' CopyOptions = namedtuple('CopyOptions', 'copy dialect unrecognized') def safe_normpath(fname): """ :return the normalized path but only if there is a filename, we don't want to convert an empty string (which means no file name) to a dot. Also expand any user variables such as ~ to the full path """ return os.path.normpath(os.path.expanduser(fname)) if fname else fname def printdebugmsg(msg): if DEBUG: printmsg(msg) def printmsg(msg, eol='\n', encoding='utf8'): sys.stdout.write(msg.encode(encoding)) sys.stdout.write(eol) sys.stdout.flush() class OneWayChannel(object): """ A one way pipe protected by two process level locks, one for reading and one for writing. """ def __init__(self): self.reader, self.writer = mp.Pipe(duplex=False) self.rlock = mp.Lock() self.wlock = mp.Lock() self.feeding_thread = None self.pending_messages = None def init_feeding_thread(self): """ Initialize a thread that fetches messages from a queue and sends them to the channel. We initialize the feeding thread lazily to avoid the fork(), since the channels are passed to child processes. """ if self.feeding_thread is not None or self.pending_messages is not None: raise RuntimeError("Feeding thread already initialized") self.pending_messages = Queue() def feed(): send = self._send pending_messages = self.pending_messages while True: try: msg = pending_messages.get() send(msg) except Exception, e: printmsg('%s: %s' % (e.__class__.__name__, e.message)) feeding_thread = threading.Thread(target=feed) feeding_thread.setDaemon(True) feeding_thread.start() self.feeding_thread = feeding_thread def send(self, obj): if self.feeding_thread is None: self.init_feeding_thread() self.pending_messages.put(obj) def _send(self, obj): with self.wlock: self.writer.send(obj) def num_pending(self): return self.pending_messages.qsize() if self.pending_messages else 0 def recv(self): with self.rlock: return self.reader.recv() def close(self): self.reader.close() self.writer.close() class OneWayChannels(object): """ A group of one way channels. """ def __init__(self, num_channels): self.channels = [OneWayChannel() for _ in xrange(num_channels)] self._readers = [ch.reader for ch in self.channels] self._rlocks = [ch.rlock for ch in self.channels] self._rlocks_by_readers = dict([(ch.reader, ch.rlock) for ch in self.channels]) self.num_channels = num_channels self.recv = self.recv_select if IS_LINUX else self.recv_polling def recv_select(self, timeout): """ Implementation of the recv method for Linux, where select is available. Receive an object from all pipes that are ready for reading without blocking. """ readable, _, _ = select(self._readers, [], [], timeout) for r in readable: with self._rlocks_by_readers[r]: try: yield r.recv() except EOFError: continue def recv_polling(self, timeout): """ Implementation of the recv method for platforms where select() is not available for pipes. We poll on all of the readers with a very small timeout. We stop when the timeout specified has been received but we may exceed it since we check all processes during each sweep. """ start = time.time() while True: for i, r in enumerate(self._readers): with self._rlocks[i]: if r.poll(0.000000001): try: yield r.recv() except EOFError: continue if time.time() - start > timeout: break def close(self): for ch in self.channels: try: ch.close() except: pass class CopyTask(object): """ A base class for ImportTask and ExportTask """ def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, direction): self.shell = shell self.ks = ks self.table = table self.table_meta = self.shell.get_table_meta(self.ks, self.table) self.host = shell.conn.get_control_connection_host() self.fname = safe_normpath(fname) self.protocol_version = protocol_version self.config_file = config_file # if cqlsh is invoked with --debug then set the global debug flag to True if shell.debug: global DEBUG DEBUG = True # do not display messages when exporting to STDOUT unless --debug is set self.printmsg = printmsg if self.fname is not None or direction == 'from' or DEBUG \ else lambda _, eol='\n': None self.options = self.parse_options(opts, direction) self.num_processes = self.options.copy['numprocesses'] self.encoding = self.options.copy['encoding'] self.printmsg('Using %d child processes' % (self.num_processes,)) if direction == 'from': self.num_processes += 1 # add the feeder process self.processes = [] self.inmsg = OneWayChannels(self.num_processes) self.outmsg = OneWayChannels(self.num_processes) self.columns = CopyTask.get_columns(shell, ks, table, columns) self.time_start = time.time() def maybe_read_config_file(self, opts, direction): """ Read optional sections from a configuration file that was specified in the command options or from the default cqlshrc configuration file if none was specified. """ config_file = opts.pop('configfile', '') if not config_file: config_file = self.config_file if not os.path.isfile(config_file): return opts configs = ConfigParser.RawConfigParser() configs.readfp(open(config_file)) ret = dict() config_sections = list(['copy', 'copy-%s' % (direction,), 'copy:%s.%s' % (self.ks, self.table), 'copy-%s:%s.%s' % (direction, self.ks, self.table)]) for section in config_sections: if configs.has_section(section): options = dict(configs.items(section)) self.printmsg("Reading options from %s:[%s]: %s" % (config_file, section, options)) ret.update(options) # Update this last so the command line options take precedence over the configuration file options if opts: self.printmsg("Reading options from the command line: %s" % (opts,)) ret.update(opts) if self.shell.debug: # this is important for testing, do not remove self.printmsg("Using options: '%s'" % (ret,)) return ret @staticmethod def clean_options(opts): """ Convert all option values to valid string literals unless they are path names """ return dict([(k, v.decode('string_escape') if k not in ['errfile', 'ratefile'] else v) for k, v, in opts.iteritems()]) def parse_options(self, opts, direction): """ Parse options for import (COPY FROM) and export (COPY TO) operations. Extract from opts csv and dialect options. :return: 3 dictionaries: the csv options, the dialect options, any unrecognized options. """ shell = self.shell opts = self.clean_options(self.maybe_read_config_file(opts, direction)) dialect_options = dict() dialect_options['quotechar'] = opts.pop('quote', '"') dialect_options['escapechar'] = opts.pop('escape', '\\') dialect_options['delimiter'] = opts.pop('delimiter', ',') if dialect_options['quotechar'] == dialect_options['escapechar']: dialect_options['doublequote'] = True del dialect_options['escapechar'] else: dialect_options['doublequote'] = False copy_options = dict() copy_options['nullval'] = opts.pop('null', '') copy_options['header'] = bool(opts.pop('header', '').lower() == 'true') copy_options['encoding'] = opts.pop('encoding', 'utf8') copy_options['maxrequests'] = int(opts.pop('maxrequests', 6)) copy_options['pagesize'] = int(opts.pop('pagesize', 1000)) # by default the page timeout is 10 seconds per 1000 entries # in the page size or 10 seconds if pagesize is smaller copy_options['pagetimeout'] = int(opts.pop('pagetimeout', max(10, 10 * (copy_options['pagesize'] / 1000)))) copy_options['maxattempts'] = int(opts.pop('maxattempts', 5)) copy_options['dtformats'] = DateTimeFormat(opts.pop('datetimeformat', shell.display_timestamp_format), shell.display_date_format, shell.display_nanotime_format, milliseconds_only=True) copy_options['floatprecision'] = int(opts.pop('floatprecision', '5')) copy_options['doubleprecision'] = int(opts.pop('doubleprecision', '12')) copy_options['chunksize'] = int(opts.pop('chunksize', 5000)) copy_options['ingestrate'] = int(opts.pop('ingestrate', 100000)) copy_options['maxbatchsize'] = int(opts.pop('maxbatchsize', 20)) copy_options['minbatchsize'] = int(opts.pop('minbatchsize', 10)) copy_options['reportfrequency'] = float(opts.pop('reportfrequency', 0.25)) copy_options['consistencylevel'] = shell.consistency_level copy_options['decimalsep'] = opts.pop('decimalsep', '.') copy_options['thousandssep'] = opts.pop('thousandssep', '') copy_options['boolstyle'] = [s.strip() for s in opts.pop('boolstyle', 'True, False').split(',')] copy_options['numprocesses'] = int(opts.pop('numprocesses', self.get_num_processes(16))) copy_options['begintoken'] = opts.pop('begintoken', '') copy_options['endtoken'] = opts.pop('endtoken', '') copy_options['maxrows'] = int(opts.pop('maxrows', '-1')) copy_options['skiprows'] = int(opts.pop('skiprows', '0')) copy_options['skipcols'] = opts.pop('skipcols', '') copy_options['maxparseerrors'] = int(opts.pop('maxparseerrors', '-1')) copy_options['maxinserterrors'] = int(opts.pop('maxinserterrors', '1000')) copy_options['errfile'] = safe_normpath(opts.pop('errfile', 'import_%s_%s.err' % (self.ks, self.table,))) copy_options['ratefile'] = safe_normpath(opts.pop('ratefile', '')) copy_options['maxoutputsize'] = int(opts.pop('maxoutputsize', '-1')) copy_options['preparedstatements'] = bool(opts.pop('preparedstatements', 'true').lower() == 'true') copy_options['ttl'] = int(opts.pop('ttl', -1)) # Hidden properties, they do not appear in the documentation but can be set in config files # or on the cmd line but w/o completion copy_options['maxinflightmessages'] = int(opts.pop('maxinflightmessages', '512')) copy_options['maxbackoffattempts'] = int(opts.pop('maxbackoffattempts', '12')) copy_options['maxpendingchunks'] = int(opts.pop('maxpendingchunks', '24')) self.check_options(copy_options) return CopyOptions(copy=copy_options, dialect=dialect_options, unrecognized=opts) @staticmethod def check_options(copy_options): """ Check any options that require a sanity check beyond a simple type conversion and if required raise a value error: - boolean styles must be exactly 2, they must be different and they cannot be empty """ bool_styles = copy_options['boolstyle'] if len(bool_styles) != 2 or bool_styles[0] == bool_styles[1] or not bool_styles[0] or not bool_styles[1]: raise ValueError("Invalid boolean styles %s" % copy_options['boolstyle']) @staticmethod def get_num_processes(cap): """ Pick a reasonable number of child processes. We need to leave at least one core for the parent or feeder process. """ return max(1, min(cap, CopyTask.get_num_cores() - 1)) @staticmethod def get_num_cores(): """ Return the number of cores if available. If the test environment variable is set, then return the number carried by this variable. This is to test single-core machine more easily. """ try: num_cores_for_testing = os.environ.get('CQLSH_COPY_TEST_NUM_CORES', '') ret = int(num_cores_for_testing) if num_cores_for_testing else mp.cpu_count() printdebugmsg("Detected %d core(s)" % (ret,)) return ret except NotImplementedError: printdebugmsg("Failed to detect number of cores, returning 1") return 1 @staticmethod def describe_interval(seconds): desc = [] for length, unit in ((86400, 'day'), (3600, 'hour'), (60, 'minute')): num = int(seconds) / length if num > 0: desc.append('%d %s' % (num, unit)) if num > 1: desc[-1] += 's' seconds %= length words = '%.03f seconds' % seconds if len(desc) > 1: words = ', '.join(desc) + ', and ' + words elif len(desc) == 1: words = desc[0] + ' and ' + words return words @staticmethod def get_columns(shell, ks, table, columns): """ Return all columns if none were specified or only the columns specified. Possible enhancement: introduce a regex like syntax (^) to allow users to specify all columns except a few. """ return shell.get_column_names(ks, table) if not columns else columns def close(self): self.stop_processes() self.inmsg.close() self.outmsg.close() def num_live_processes(self): return sum(1 for p in self.processes if p.is_alive()) @staticmethod def get_pid(): return os.getpid() if hasattr(os, 'getpid') else None @staticmethod def trace_process(pid): if pid and STRACE_ON: os.system("strace -vvvv -c -o strace.{pid}.out -e trace=all -p {pid}&".format(pid=pid)) def start_processes(self): for i, process in enumerate(self.processes): process.start() self.trace_process(process.pid) self.trace_process(self.get_pid()) def stop_processes(self): for process in self.processes: process.terminate() def make_params(self): """ Return a dictionary of parameters to be used by the worker processes. On Windows this dictionary must be pickle-able, therefore we do not pass the parent connection since it may not be pickle-able. Also, on Windows child processes are spawned and not forked, and therefore we don't need to shutdown the parent connection anyway, see CASSANDRA-11749 for more details. """ shell = self.shell return dict(ks=self.ks, table=self.table, local_dc=self.host.datacenter, columns=self.columns, options=self.options, connect_timeout=shell.conn.connect_timeout, hostname=self.host.address, port=shell.port, ssl=shell.ssl, auth_provider=shell.auth_provider, parent_cluster=shell.conn if not IS_WINDOWS else None, cql_version=shell.conn.cql_version, config_file=self.config_file, protocol_version=self.protocol_version, debug=shell.debug ) def validate_columns(self): shell = self.shell if not self.columns: shell.printerr("No column specified") return False for c in self.columns: if c not in self.table_meta.columns: shell.printerr('Invalid column name %s' % (c,)) return False return True def update_params(self, params, i): """ Add the communication channels to the parameters to be passed to the worker process: inmsg is the message queue flowing from parent to child process, so outmsg from the parent point of view and, vice-versa, outmsg is the message queue flowing from child to parent, so inmsg from the parent point of view, hence the two are swapped below. """ params['inmsg'] = self.outmsg.channels[i] params['outmsg'] = self.inmsg.channels[i] return params class ExportWriter(object): """ A class that writes to one or more csv files, or STDOUT """ def __init__(self, fname, shell, columns, options): self.fname = fname self.shell = shell self.columns = columns self.options = options self.header = options.copy['header'] self.max_output_size = long(options.copy['maxoutputsize']) self.current_dest = None self.num_files = 0 if self.max_output_size > 0: if fname is not None: self.write = self._write_with_split self.num_written = 0 else: shell.printerr("WARNING: maxoutputsize {} ignored when writing to STDOUT".format(self.max_output_size)) self.write = self._write_without_split else: self.write = self._write_without_split def open(self): self.current_dest = self._get_dest(self.fname) if self.current_dest is None: return False if self.header: writer = csv.writer(self.current_dest.output, **self.options.dialect) writer.writerow(self.columns) return True def close(self): self._close_current_dest() def _next_dest(self): self._close_current_dest() self.current_dest = self._get_dest(self.fname + '.%d' % (self.num_files,)) def _get_dest(self, source_name): """ Open the output file if any or else use stdout. Return a namedtuple containing the out and a boolean indicating if the output should be closed. """ CsvDest = namedtuple('CsvDest', 'output close') if self.fname is None: return CsvDest(output=sys.stdout, close=False) else: try: ret = CsvDest(output=open(source_name, 'wb'), close=True) self.num_files += 1 return ret except IOError, e: self.shell.printerr("Can't open %r for writing: %s" % (source_name, e)) return None def _close_current_dest(self): if self.current_dest and self.current_dest.close: self.current_dest.output.close() self.current_dest = None def _write_without_split(self, data, _): """ Write the data to the current destination output. """ self.current_dest.output.write(data) def _write_with_split(self, data, num): """ Write the data to the current destination output if we still haven't reached the maximum number of rows. Otherwise split the rows between the current destination and the next. """ if (self.num_written + num) > self.max_output_size: num_remaining = self.max_output_size - self.num_written last_switch = 0 for i, row in enumerate(filter(None, data.split(os.linesep))): if i == num_remaining: self._next_dest() last_switch = i num_remaining += self.max_output_size self.current_dest.output.write(row + '\n') self.num_written = num - last_switch else: self.num_written += num self.current_dest.output.write(data) class ExportTask(CopyTask): """ A class that exports data to .csv by instantiating one or more processes that work in parallel (ExportProcess). """ def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file): CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'to') options = self.options self.begin_token = long(options.copy['begintoken']) if options.copy['begintoken'] else None self.end_token = long(options.copy['endtoken']) if options.copy['endtoken'] else None self.writer = ExportWriter(fname, shell, columns, options) def run(self): """ Initiates the export by starting the worker processes. Then hand over control to export_records. """ shell = self.shell if self.options.unrecognized: shell.printerr('Unrecognized COPY TO options: %s' % ', '.join(self.options.unrecognized.keys())) return if not self.validate_columns(): return 0 ranges = self.get_ranges() if not ranges: return 0 if not self.writer.open(): return 0 columns = u"[" + u", ".join(self.columns) + u"]" self.printmsg(u"\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding) params = self.make_params() for i in xrange(self.num_processes): self.processes.append(ExportProcess(self.update_params(params, i))) self.start_processes() try: self.export_records(ranges) finally: self.close() def close(self): CopyTask.close(self) self.writer.close() def get_ranges(self): """ return a queue of tuples, where the first tuple entry is a token range (from, to] and the second entry is a list of hosts that own that range. Each host is responsible for all the tokens in the range (from, to]. The ring information comes from the driver metadata token map, which is built by querying System.PEERS. We only consider replicas that are in the local datacenter. If there are no local replicas we use the cqlsh session host. """ shell = self.shell hostname = self.host.address local_dc = self.host.datacenter ranges = dict() min_token = self.get_min_token() begin_token = self.begin_token end_token = self.end_token def make_range(prev, curr): """ Return the intersection of (prev, curr) and (begin_token, end_token), return None if the intersection is empty """ ret = (prev, curr) if begin_token: if ret[1] < begin_token: return None elif ret[0] < begin_token: ret = (begin_token, ret[1]) if end_token: if ret[0] > end_token: return None elif ret[1] > end_token: ret = (ret[0], end_token) return ret def make_range_data(replicas=None): hosts = [] if replicas: for r in replicas: if r.is_up is not False and r.datacenter == local_dc: hosts.append(r.address) if not hosts: hosts.append(hostname) # fallback to default host if no replicas in current dc return {'hosts': tuple(hosts), 'attempts': 0, 'rows': 0, 'workerno': -1} if begin_token and begin_token < min_token: shell.printerr('Begin token %d must be bigger or equal to min token %d' % (begin_token, min_token)) return ranges if begin_token and end_token and begin_token > end_token: shell.printerr('Begin token %d must be smaller than end token %d' % (begin_token, end_token)) return ranges if shell.conn.metadata.token_map is None or min_token is None: ranges[(begin_token, end_token)] = make_range_data() return ranges ring = shell.get_ring(self.ks).items() ring.sort() if not ring: # If the ring is empty we get the entire ring from the host we are currently connected to ranges[(begin_token, end_token)] = make_range_data() elif len(ring) == 1: # If there is only one token we get the entire ring from the replicas for that token ranges[(begin_token, end_token)] = make_range_data(ring[0][1]) else: # else we loop on the ring first_range_data = None previous = None for token, replicas in ring: if not first_range_data: first_range_data = make_range_data(replicas) # we use it at the end when wrapping around if token.value == min_token: continue # avoids looping entire ring current_range = make_range(previous, token.value) if not current_range: continue ranges[current_range] = make_range_data(replicas) previous = token.value # For the last ring interval we query the same replicas that hold the first token in the ring if previous is not None and (not end_token or previous < end_token): ranges[(previous, end_token)] = first_range_data if not ranges: shell.printerr('Found no ranges to query, check begin and end tokens: %s - %s' % (begin_token, end_token)) return ranges def get_min_token(self): """ :return the minimum token, which depends on the partitioner. For partitioners that do not support tokens we return None, in this cases we will not work in parallel, we'll just send all requests to the cqlsh session host. """ partitioner = self.shell.conn.metadata.partitioner if partitioner.endswith('RandomPartitioner'): return -1 elif partitioner.endswith('Murmur3Partitioner'): return -(2 ** 63) # Long.MIN_VALUE in Java else: return None def send_work(self, ranges, tokens_to_send): prev_worker_no = ranges[tokens_to_send[0]]['workerno'] i = prev_worker_no + 1 if -1 <= prev_worker_no < (self.num_processes - 1) else 0 for token_range in tokens_to_send: ranges[token_range]['workerno'] = i self.outmsg.channels[i].send((token_range, ranges[token_range])) ranges[token_range]['attempts'] += 1 i = i + 1 if i < self.num_processes - 1 else 0 def export_records(self, ranges): """ Send records to child processes and monitor them by collecting their results or any errors. We terminate when we have processed all the ranges or when one child process has died (since in this case we will never get any ACK for the ranges processed by it and at the moment we don't keep track of which ranges a process is handling). """ shell = self.shell processes = self.processes meter = RateMeter(log_fcn=self.printmsg, update_interval=self.options.copy['reportfrequency'], log_file=self.options.copy['ratefile']) total_requests = len(ranges) max_attempts = self.options.copy['maxattempts'] self.send_work(ranges, ranges.keys()) num_processes = len(processes) succeeded = 0 failed = 0 while (failed + succeeded) < total_requests and self.num_live_processes() == num_processes: for token_range, result in self.inmsg.recv(timeout=0.1): if token_range is None and result is None: # a request has finished succeeded += 1 elif isinstance(result, Exception): # an error occurred # This token_range failed, retry up to max_attempts if no rows received yet, # If rows were already received we'd risk duplicating data. # Note that there is still a slight risk of duplicating data, even if we have # an error with no rows received yet, it's just less likely. To avoid retrying on # all timeouts would however mean we could risk not exporting some rows. if ranges[token_range]['attempts'] < max_attempts and ranges[token_range]['rows'] == 0: shell.printerr('Error for %s: %s (will try again later attempt %d of %d)' % (token_range, result, ranges[token_range]['attempts'], max_attempts)) self.send_work(ranges, [token_range]) else: shell.printerr('Error for %s: %s (permanently given up after %d rows and %d attempts)' % (token_range, result, ranges[token_range]['rows'], ranges[token_range]['attempts'])) failed += 1 else: # partial result received data, num = result self.writer.write(data, num) meter.increment(n=num) ranges[token_range]['rows'] += num if self.num_live_processes() < len(processes): for process in processes: if not process.is_alive(): shell.printerr('Child process %d died with exit code %d' % (process.pid, process.exitcode)) if succeeded < total_requests: shell.printerr('Exported %d ranges out of %d total ranges, some records might be missing' % (succeeded, total_requests)) self.printmsg("\n%d rows exported to %d files in %s." % (meter.get_total_records(), self.writer.num_files, self.describe_interval(time.time() - self.time_start))) class FilesReader(object): """ A wrapper around a csv reader to keep track of when we have exhausted reading input files. We are passed a comma separated list of paths, where each path is a valid glob expression. We generate a source generator and we read each source one by one. """ def __init__(self, fname, options): self.chunk_size = options.copy['chunksize'] self.header = options.copy['header'] self.max_rows = options.copy['maxrows'] self.skip_rows = options.copy['skiprows'] self.fname = fname self.sources = None # must be created later due to pickle problems on Windows self.num_sources = 0 self.current_source = None self.num_read = 0 def get_source(self, paths): """ Return a source generator. Each source is a named tuple wrapping the source input, file name and a boolean indicating if it requires closing. """ def make_source(fname): try: return open(fname, 'rb') except IOError, e: raise IOError("Can't open %r for reading: %s" % (fname, e)) for path in paths.split(','): path = path.strip() if os.path.isfile(path): yield make_source(path) else: result = glob.glob(path) if len(result) == 0: raise IOError("Can't open %r for reading: no matching file found" % (path,)) for f in result: yield (make_source(f)) def start(self): self.sources = self.get_source(self.fname) self.next_source() @property def exhausted(self): return not self.current_source def next_source(self): """ Close the current source, if any, and open the next one. Return true if there is another source, false otherwise. """ self.close_current_source() while self.current_source is None: try: self.current_source = self.sources.next() if self.current_source: self.num_sources += 1 except StopIteration: return False if self.header: self.current_source.next() return True def close_current_source(self): if not self.current_source: return self.current_source.close() self.current_source = None def close(self): self.close_current_source() def read_rows(self, max_rows): if not self.current_source: return [] rows = [] for i in xrange(min(max_rows, self.chunk_size)): try: row = self.current_source.next() self.num_read += 1 if 0 <= self.max_rows < self.num_read: self.next_source() break if self.num_read > self.skip_rows: rows.append(row) except StopIteration: self.next_source() break return filter(None, rows) class PipeReader(object): """ A class for reading rows received on a pipe, this is used for reading input from STDIN """ def __init__(self, inmsg, options): self.inmsg = inmsg self.chunk_size = options.copy['chunksize'] self.header = options.copy['header'] self.max_rows = options.copy['maxrows'] self.skip_rows = options.copy['skiprows'] self.num_read = 0 self.exhausted = False self.num_sources = 1 def start(self): pass def read_rows(self, max_rows): rows = [] for i in xrange(min(max_rows, self.chunk_size)): row = self.inmsg.recv() if row is None: self.exhausted = True break self.num_read += 1 if 0 <= self.max_rows < self.num_read: self.exhausted = True break # max rows exceeded if self.header or self.num_read < self.skip_rows: self.header = False # skip header or initial skip_rows rows continue rows.append(row) return rows class ImportProcessResult(object): """ An object sent from ImportProcess instances to the parent import task in order to indicate progress. """ def __init__(self, imported=0): self.imported = imported class FeedingProcessResult(object): """ An object sent from FeedingProcess instances to the parent import task in order to indicate progress. """ def __init__(self, sent, reader): self.sent = sent self.num_sources = reader.num_sources self.skip_rows = reader.skip_rows class ImportTaskError(object): """ An object sent from child processes (feeder or workers) to the parent import task to indicate an error. """ def __init__(self, name, msg, rows=None, attempts=1, final=True): self.name = name self.msg = msg self.rows = rows if rows else [] self.attempts = attempts self.final = final def is_parse_error(self): """ We treat read and parse errors as unrecoverable and we have different global counters for giving up when a maximum has been reached. We consider value and type errors as parse errors as well since they are typically non recoverable. """ name = self.name return name.startswith('ValueError') or name.startswith('TypeError') or \ name.startswith('ParseError') or name.startswith('IndexError') or name.startswith('ReadError') class ImportErrorHandler(object): """ A class for managing import errors """ def __init__(self, task): self.shell = task.shell self.options = task.options self.max_attempts = self.options.copy['maxattempts'] self.max_parse_errors = self.options.copy['maxparseerrors'] self.max_insert_errors = self.options.copy['maxinserterrors'] self.err_file = self.options.copy['errfile'] self.parse_errors = 0 self.insert_errors = 0 self.num_rows_failed = 0 if os.path.isfile(self.err_file): now = datetime.datetime.now() old_err_file = self.err_file + now.strftime('.%Y%m%d_%H%M%S') printdebugmsg("Renaming existing %s to %s\n" % (self.err_file, old_err_file)) os.rename(self.err_file, old_err_file) def max_exceeded(self): if self.insert_errors > self.max_insert_errors >= 0: self.shell.printerr("Exceeded maximum number of insert errors %d" % self.max_insert_errors) return True if self.parse_errors > self.max_parse_errors >= 0: self.shell.printerr("Exceeded maximum number of parse errors %d" % self.max_parse_errors) return True return False def add_failed_rows(self, rows): self.num_rows_failed += len(rows) with open(self.err_file, "a") as f: writer = csv.writer(f, **self.options.dialect) for row in rows: writer.writerow(row) def handle_error(self, err): """ Handle an error by printing the appropriate error message and incrementing the correct counter. """ shell = self.shell if err.is_parse_error(): self.parse_errors += len(err.rows) self.add_failed_rows(err.rows) shell.printerr("Failed to import %d rows: %s - %s, given up without retries" % (len(err.rows), err.name, err.msg)) else: self.insert_errors += len(err.rows) if not err.final: shell.printerr("Failed to import %d rows: %s - %s, will retry later, attempt %d of %d" % (len(err.rows), err.name, err.msg, err.attempts, self.max_attempts)) else: self.add_failed_rows(err.rows) shell.printerr("Failed to import %d rows: %s - %s, given up after %d attempts" % (len(err.rows), err.name, err.msg, err.attempts)) class ImportTask(CopyTask): """ A class to import data from .csv by instantiating one or more processes that work in parallel (ImportProcess). """ def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file): CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'from') options = self.options self.skip_columns = [c.strip() for c in self.options.copy['skipcols'].split(',')] self.valid_columns = [c for c in self.columns if c not in self.skip_columns] self.receive_meter = RateMeter(log_fcn=self.printmsg, update_interval=options.copy['reportfrequency'], log_file=options.copy['ratefile']) self.error_handler = ImportErrorHandler(self) self.feeding_result = None self.sent = 0 def make_params(self): ret = CopyTask.make_params(self) ret['skip_columns'] = self.skip_columns ret['valid_columns'] = self.valid_columns return ret def validate_columns(self): if not CopyTask.validate_columns(self): return False shell = self.shell if not self.valid_columns: shell.printerr("No valid column specified") return False for c in self.table_meta.primary_key: if c.name not in self.valid_columns: shell.printerr("Primary key column '%s' missing or skipped" % (c.name,)) return False return True def run(self): shell = self.shell if self.options.unrecognized: shell.printerr('Unrecognized COPY FROM options: %s' % ', '.join(self.options.unrecognized.keys())) return if not self.validate_columns(): return 0 columns = u"[" + u", ".join(self.valid_columns) + u"]" self.printmsg(u"\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding) try: params = self.make_params() for i in range(self.num_processes - 1): self.processes.append(ImportProcess(self.update_params(params, i))) feeder = FeedingProcess(self.outmsg.channels[-1], self.inmsg.channels[-1], self.outmsg.channels[:-1], self.fname, self.options, self.shell.conn if not IS_WINDOWS else None) self.processes.append(feeder) self.start_processes() pr = profile_on() if PROFILE_ON else None self.import_records() if pr: profile_off(pr, file_name='parent_profile_%d.txt' % (os.getpid(),)) except Exception, exc: shell.printerr(unicode(exc)) if shell.debug: traceback.print_exc() return 0 finally: self.close() def send_stdin_rows(self): """ We need to pass stdin rows to the feeder process as it is not safe to pickle or share stdin directly (in case of file the child process would close it). This is a very primitive support for STDIN import in that we we won't start reporting progress until STDIN is fully consumed. I think this is reasonable. """ shell = self.shell self.printmsg("[Use \. on a line by itself to end input]") for row in shell.use_stdin_reader(prompt='[copy] ', until=r'.'): self.outmsg.channels[-1].send(row) self.outmsg.channels[-1].send(None) if shell.tty: print def import_records(self): """ Keep on running until we have stuff to receive or send and until all processes are running. Send data (batches or retries) up to the max ingest rate. If we are waiting for stuff to receive check the incoming queue. """ if not self.fname: self.send_stdin_rows() while self.feeding_result is None or self.receive_meter.total_records < self.feeding_result.sent: self.receive_results() if self.error_handler.max_exceeded() or not self.all_processes_running(): break if self.error_handler.num_rows_failed: self.shell.printerr("Failed to process %d rows; failed rows written to %s" % (self.error_handler.num_rows_failed, self.error_handler.err_file)) if not self.all_processes_running(): self.shell.printerr("{} child process(es) died unexpectedly, aborting" .format(self.num_processes - self.num_live_processes())) else: if self.error_handler.max_exceeded(): self.processes[-1].terminate() # kill the feeder for i, _ in enumerate(self.processes): if self.processes[i].is_alive(): self.outmsg.channels[i].send(None) # allow time for worker processes to exit cleanly attempts = 50 # 100 milliseconds per attempt, so 5 seconds total while attempts > 0 and self.num_live_processes() > 0: time.sleep(0.1) attempts -= 1 self.printmsg("\n%d rows imported from %d files in %s (%d skipped)." % (self.receive_meter.get_total_records(), self.feeding_result.num_sources if self.feeding_result else 0, self.describe_interval(time.time() - self.time_start), self.feeding_result.skip_rows if self.feeding_result else 0)) def all_processes_running(self): return self.num_live_processes() == len(self.processes) def receive_results(self): """ Receive results from the worker processes, which will send the number of rows imported or from the feeder process, which will send the number of rows sent when it has finished sending rows. """ aggregate_result = ImportProcessResult() try: for result in self.inmsg.recv(timeout=0.1): if isinstance(result, ImportProcessResult): aggregate_result.imported += result.imported elif isinstance(result, ImportTaskError): self.error_handler.handle_error(result) elif isinstance(result, FeedingProcessResult): self.feeding_result = result else: raise ValueError("Unexpected result: %s" % (result,)) finally: self.receive_meter.increment(aggregate_result.imported) class FeedingProcess(mp.Process): """ A process that reads from import sources and sends chunks to worker processes. """ def __init__(self, inmsg, outmsg, worker_channels, fname, options, parent_cluster): mp.Process.__init__(self, target=self.run) self.inmsg = inmsg self.outmsg = outmsg self.worker_channels = worker_channels self.reader = FilesReader(fname, options) if fname else PipeReader(inmsg, options) self.send_meter = RateMeter(log_fcn=None, update_interval=1) self.ingest_rate = options.copy['ingestrate'] self.num_worker_processes = options.copy['numprocesses'] self.max_pending_chunks = options.copy['maxpendingchunks'] self.chunk_id = 0 self.parent_cluster = parent_cluster def on_fork(self): """ Release any parent connections after forking, see CASSANDRA-11749 for details. """ if self.parent_cluster: printdebugmsg("Closing parent cluster sockets") self.parent_cluster.shutdown() def run(self): pr = profile_on() if PROFILE_ON else None self.inner_run() if pr: profile_off(pr, file_name='feeder_profile_%d.txt' % (os.getpid(),)) def inner_run(self): """ Send one batch per worker process to the queue unless we have exceeded the ingest rate. In the export case we queue everything and let the worker processes throttle using max_requests, here we throttle using the ingest rate in the feeding process because of memory usage concerns. When finished we send back to the parent process the total number of rows sent. """ self.on_fork() reader = self.reader try: reader.start() except IOError, exc: self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message)) channels = self.worker_channels max_pending_chunks = self.max_pending_chunks sent = 0 failed_attempts = 0 while not reader.exhausted: channels_eligible = filter(lambda c: c.num_pending() < max_pending_chunks, channels) if not channels_eligible: failed_attempts += 1 delay = randint(1, pow(2, failed_attempts)) printdebugmsg("All workers busy, sleeping for %d second(s)" % (delay,)) time.sleep(delay) continue elif failed_attempts > 0: failed_attempts = 0 for ch in channels_eligible: try: max_rows = self.ingest_rate - self.send_meter.current_record if max_rows <= 0: self.send_meter.maybe_update(sleep=False) continue rows = reader.read_rows(max_rows) if rows: sent += self.send_chunk(ch, rows) except Exception, exc: self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message)) if reader.exhausted: break # send back to the parent process the number of rows sent to the worker processes self.outmsg.send(FeedingProcessResult(sent, reader)) # wait for poison pill (None) self.inmsg.recv() def send_chunk(self, ch, rows): self.chunk_id += 1 num_rows = len(rows) self.send_meter.increment(num_rows) ch.send({'id': self.chunk_id, 'rows': rows, 'imported': 0, 'num_rows_sent': num_rows}) return num_rows def close(self): self.reader.close() self.inmsg.close() self.outmsg.close() for ch in self.worker_channels: ch.close() class ChildProcess(mp.Process): """ An child worker process, this is for common functionality between ImportProcess and ExportProcess. """ def __init__(self, params, target): mp.Process.__init__(self, target=target) self.inmsg = params['inmsg'] self.outmsg = params['outmsg'] self.ks = params['ks'] self.table = params['table'] self.local_dc = params['local_dc'] self.columns = params['columns'] self.debug = params['debug'] self.port = params['port'] self.hostname = params['hostname'] self.connect_timeout = params['connect_timeout'] self.cql_version = params['cql_version'] self.auth_provider = params['auth_provider'] self.parent_cluster = params['parent_cluster'] self.ssl = params['ssl'] self.protocol_version = params['protocol_version'] self.config_file = params['config_file'] options = params['options'] self.date_time_format = options.copy['dtformats'] self.consistency_level = options.copy['consistencylevel'] self.decimal_sep = options.copy['decimalsep'] self.thousands_sep = options.copy['thousandssep'] self.boolean_styles = options.copy['boolstyle'] self.max_attempts = options.copy['maxattempts'] self.encoding = options.copy['encoding'] # Here we inject some failures for testing purposes, only if this environment variable is set if os.environ.get('CQLSH_COPY_TEST_FAILURES', ''): self.test_failures = json.loads(os.environ.get('CQLSH_COPY_TEST_FAILURES', '')) else: self.test_failures = None def on_fork(self): """ Release any parent connections after forking, see CASSANDRA-11749 for details. """ if self.parent_cluster: printdebugmsg("Closing parent cluster sockets") self.parent_cluster.shutdown() def close(self): printdebugmsg("Closing queues...") self.inmsg.close() self.outmsg.close() class ExpBackoffRetryPolicy(RetryPolicy): """ A retry policy with exponential back-off for read timeouts and write timeouts """ def __init__(self, parent_process): RetryPolicy.__init__(self) self.max_attempts = parent_process.max_attempts def on_read_timeout(self, query, consistency, required_responses, received_responses, data_retrieved, retry_num): return self._handle_timeout(consistency, retry_num) def on_write_timeout(self, query, consistency, write_type, required_responses, received_responses, retry_num): return self._handle_timeout(consistency, retry_num) def _handle_timeout(self, consistency, retry_num): delay = self.backoff(retry_num) if delay > 0: printdebugmsg("Timeout received, retrying after %d seconds" % (delay,)) time.sleep(delay) return self.RETRY, consistency elif delay == 0: printdebugmsg("Timeout received, retrying immediately") return self.RETRY, consistency else: printdebugmsg("Timeout received, giving up after %d attempts" % (retry_num + 1)) return self.RETHROW, None def backoff(self, retry_num): """ Perform exponential back-off up to a maximum number of times, where this maximum is per query. To back-off we should wait a random number of seconds between 0 and 2^c - 1, where c is the number of total failures. :return : the number of seconds to wait for, -1 if we should not retry """ if retry_num >= self.max_attempts: return -1 delay = randint(0, pow(2, retry_num + 1) - 1) return delay class ExportSession(object): """ A class for connecting to a cluster and storing the number of requests that this connection is processing. It wraps the methods for executing a query asynchronously and for shutting down the connection to the cluster. """ def __init__(self, cluster, export_process): session = cluster.connect(export_process.ks) session.row_factory = tuple_factory session.default_fetch_size = export_process.options.copy['pagesize'] session.default_timeout = export_process.options.copy['pagetimeout'] printdebugmsg("Created connection to %s with page size %d and timeout %d seconds per page" % (cluster.contact_points, session.default_fetch_size, session.default_timeout)) self.cluster = cluster self.session = session self.requests = 1 self.lock = threading.Lock() self.consistency_level = export_process.consistency_level def add_request(self): with self.lock: self.requests += 1 def complete_request(self): with self.lock: self.requests -= 1 def num_requests(self): with self.lock: return self.requests def execute_async(self, query): return self.session.execute_async(SimpleStatement(query, consistency_level=self.consistency_level)) def shutdown(self): self.cluster.shutdown() class ExportProcess(ChildProcess): """ An child worker process for the export task, ExportTask. """ def __init__(self, params): ChildProcess.__init__(self, params=params, target=self.run) options = params['options'] self.float_precision = options.copy['floatprecision'] self.double_precision = options.copy['doubleprecision'] self.nullval = options.copy['nullval'] self.max_requests = options.copy['maxrequests'] self.hosts_to_sessions = dict() self.formatters = dict() self.options = options def run(self): try: self.inner_run() finally: self.close() def inner_run(self): """ The parent sends us (range, info) on the inbound queue (inmsg) in order to request us to process a range, for which we can select any of the hosts in info, which also contains other information for this range such as the number of attempts already performed. We can signal errors on the outbound queue (outmsg) by sending (range, error) or we can signal a global error by sending (None, error). We terminate when the inbound queue is closed. """ self.on_fork() while True: if self.num_requests() > self.max_requests: time.sleep(0.001) # 1 millisecond continue token_range, info = self.inmsg.recv() self.start_request(token_range, info) @staticmethod def get_error_message(err, print_traceback=False): if isinstance(err, str): msg = err elif isinstance(err, BaseException): msg = "%s - %s" % (err.__class__.__name__, err) if print_traceback and sys.exc_info()[1] == err: traceback.print_exc() else: msg = unicode(err) return msg def report_error(self, err, token_range): msg = self.get_error_message(err, print_traceback=self.debug) printdebugmsg(msg) self.send((token_range, Exception(msg))) def send(self, response): self.outmsg.send(response) def start_request(self, token_range, info): """ Begin querying a range by executing an async query that will later on invoke the callbacks attached in attach_callbacks. """ session = self.get_session(info['hosts'], token_range) if session: metadata = session.cluster.metadata.keyspaces[self.ks].tables[self.table] query = self.prepare_query(metadata.partition_key, token_range, info['attempts']) future = session.execute_async(query) self.attach_callbacks(token_range, future, session) def num_requests(self): return sum(session.num_requests() for session in self.hosts_to_sessions.values()) def get_session(self, hosts, token_range): """ We return a session connected to one of the hosts passed in, which are valid replicas for the token range. We sort replicas by favouring those without any active requests yet or with the smallest number of requests. If we fail to connect we report an error so that the token will be retried again later. :return: An ExportSession connected to the chosen host. """ # sorted replicas favouring those with no connections yet hosts = sorted(hosts, key=lambda hh: 0 if hh not in self.hosts_to_sessions else self.hosts_to_sessions[hh].requests) errors = [] ret = None for host in hosts: try: ret = self.connect(host) except Exception, e: errors.append(self.get_error_message(e)) if ret: if errors: printdebugmsg("Warning: failed to connect to some replicas: %s" % (errors,)) return ret self.report_error("Failed to connect to all replicas %s for %s, errors: %s" % (hosts, token_range, errors), token_range) return None def connect(self, host): if host in self.hosts_to_sessions.keys(): session = self.hosts_to_sessions[host] session.add_request() return session new_cluster = Cluster( contact_points=(host,), port=self.port, cql_version=self.cql_version, protocol_version=self.protocol_version, auth_provider=self.auth_provider, ssl_options=ssl_settings(host, self.config_file) if self.ssl else None, load_balancing_policy=WhiteListRoundRobinPolicy([host]), default_retry_policy=ExpBackoffRetryPolicy(self), compression=None, control_connection_timeout=self.connect_timeout, connect_timeout=self.connect_timeout, idle_heartbeat_interval=0) session = ExportSession(new_cluster, self) self.hosts_to_sessions[host] = session return session def attach_callbacks(self, token_range, future, session): metadata = session.cluster.metadata ks_meta = metadata.keyspaces[self.ks] table_meta = ks_meta.tables[self.table] cql_types = [CqlType(table_meta.columns[c].cql_type, ks_meta) for c in self.columns] def result_callback(rows): if future.has_more_pages: future.start_fetching_next_page() self.write_rows_to_csv(token_range, rows, cql_types) else: self.write_rows_to_csv(token_range, rows, cql_types) self.send((None, None)) session.complete_request() def err_callback(err): self.report_error(err, token_range) session.complete_request() future.add_callbacks(callback=result_callback, errback=err_callback) def write_rows_to_csv(self, token_range, rows, cql_types): if not rows: return # no rows in this range try: output = StringIO() writer = csv.writer(output, **self.options.dialect) for row in rows: writer.writerow(map(self.format_value, row, cql_types)) data = (output.getvalue(), len(rows)) self.send((token_range, data)) output.close() except Exception, e: self.report_error(e, token_range) def format_value(self, val, cqltype): if val is None or val == EMPTY: return format_value_default(self.nullval, colormap=NO_COLOR_MAP) formatter = self.formatters.get(cqltype, None) if not formatter: formatter = get_formatter(val, cqltype) self.formatters[cqltype] = formatter if not hasattr(cqltype, 'precision'): cqltype.precision = self.double_precision if cqltype.type_name == 'double' else self.float_precision return formatter(val, cqltype=cqltype, encoding=self.encoding, colormap=NO_COLOR_MAP, date_time_format=self.date_time_format, float_precision=cqltype.precision, nullval=self.nullval, quote=False, decimal_sep=self.decimal_sep, thousands_sep=self.thousands_sep, boolean_styles=self.boolean_styles) def close(self): ChildProcess.close(self) for session in self.hosts_to_sessions.values(): session.shutdown() def prepare_query(self, partition_key, token_range, attempts): """ Return the export query or a fake query with some failure injected. """ if self.test_failures: return self.maybe_inject_failures(partition_key, token_range, attempts) else: return self.prepare_export_query(partition_key, token_range) def maybe_inject_failures(self, partition_key, token_range, attempts): """ Examine self.test_failures and see if token_range is either a token range supposed to cause a failure (failing_range) or to terminate the worker process (exit_range). If not then call prepare_export_query(), which implements the normal behavior. """ start_token, end_token = token_range if not start_token or not end_token: # exclude first and last ranges to make things simpler return self.prepare_export_query(partition_key, token_range) if 'failing_range' in self.test_failures: failing_range = self.test_failures['failing_range'] if start_token >= failing_range['start'] and end_token <= failing_range['end']: if attempts < failing_range['num_failures']: return 'SELECT * from bad_table' if 'exit_range' in self.test_failures: exit_range = self.test_failures['exit_range'] if start_token >= exit_range['start'] and end_token <= exit_range['end']: sys.exit(1) return self.prepare_export_query(partition_key, token_range) def prepare_export_query(self, partition_key, token_range): """ Return a query where we select all the data for this token range """ pk_cols = ", ".join(protect_names(col.name for col in partition_key)) columnlist = ', '.join(protect_names(self.columns)) start_token, end_token = token_range query = 'SELECT %s FROM %s.%s' % (columnlist, protect_name(self.ks), protect_name(self.table)) if start_token is not None or end_token is not None: query += ' WHERE' if start_token is not None: query += ' token(%s) > %s' % (pk_cols, start_token) if start_token is not None and end_token is not None: query += ' AND' if end_token is not None: query += ' token(%s) <= %s' % (pk_cols, end_token) return query class ParseError(Exception): """ We failed to parse an import record """ pass class ImportConversion(object): """ A class for converting strings to values when importing from csv, used by ImportProcess, the parent. """ def __init__(self, parent, table_meta, statement=None): self.ks = parent.ks self.table = parent.table self.columns = parent.valid_columns self.nullval = parent.nullval self.decimal_sep = parent.decimal_sep self.thousands_sep = parent.thousands_sep self.boolean_styles = parent.boolean_styles self.date_time_format = parent.date_time_format.timestamp_format self.debug = parent.debug self.encoding = parent.encoding self.table_meta = table_meta self.primary_key_indexes = [self.columns.index(col.name) for col in self.table_meta.primary_key] self.partition_key_indexes = [self.columns.index(col.name) for col in self.table_meta.partition_key] if statement is None: self.use_prepared_statements = False statement = self._get_primary_key_statement(parent, table_meta) else: self.use_prepared_statements = True self.proto_version = statement.protocol_version # the cql types and converters for the prepared statement, either the full statement or only the primary keys self.cqltypes = [c.type for c in statement.column_metadata] self.converters = [self._get_converter(c.type) for c in statement.column_metadata] # the cql types for the entire statement, these are the same as the types above but # only when using prepared statements self.coltypes = [table_meta.columns[name].cql_type for name in parent.valid_columns] # these functions are used for non-prepared statements to protect values with quotes if required self.protectors = [self._get_protector(t) for t in self.coltypes] def _get_protector(self, t): if t in ('ascii', 'text', 'timestamp', 'date', 'time', 'inet'): return lambda v: unicode(protect_value(v), self.encoding) else: return lambda v: v @staticmethod def _get_primary_key_statement(parent, table_meta): """ We prepare a query statement to find out the types of the partition key columns so we can route the update query to the correct replicas. As far as I understood this is the easiest way to find out the types of the partition columns, we will never use this prepared statement """ where_clause = ' AND '.join(['%s = ?' % (protect_name(c.name)) for c in table_meta.partition_key]) select_query = 'SELECT * FROM %s.%s WHERE %s' % (protect_name(parent.ks), protect_name(parent.table), where_clause) return parent.session.prepare(select_query) def _get_converter(self, cql_type): """ Return a function that converts a string into a value the can be passed into BoundStatement.bind() for the given cql type. See cassandra.cqltypes for more details. """ def unprotect(v): if v is not None: return CqlRuleSet.dequote_value(v) def convert(t, v): v = unprotect(v) if v == self.nullval: return self.get_null_val() return converters.get(t.typename, convert_unknown)(v, ct=t) def convert_mandatory(t, v): v = unprotect(v) if v == self.nullval: raise ParseError('Empty values are not allowed') return converters.get(t.typename, convert_unknown)(v, ct=t) def convert_blob(v, **_): return bytearray.fromhex(v[2:]) def convert_text(v, **_): return v def convert_uuid(v, **_): return UUID(v) def convert_bool(v, **_): return True if v.lower() == self.boolean_styles[0].lower() else False def get_convert_integer_fcn(adapter=int): """ Return a slow and a fast integer conversion function depending on self.thousands_sep """ if self.thousands_sep: return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, '')) else: return lambda v, ct=cql_type: adapter(v) def get_convert_decimal_fcn(adapter=float): """ Return a slow and a fast decimal conversion function depending on self.thousands_sep and self.decimal_sep """ if self.thousands_sep and self.decimal_sep: return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, '').replace(self.decimal_sep, '.')) elif self.thousands_sep: return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, '')) elif self.decimal_sep: return lambda v, ct=cql_type: adapter(v.replace(self.decimal_sep, '.')) else: return lambda v, ct=cql_type: adapter(v) def split(val, sep=','): """ Split into a list of values whenever we encounter a separator but ignore separators inside parentheses or single quotes, except for the two outermost parentheses, which will be ignored. We expect val to be at least 2 characters long (the two outer parentheses). """ ret = [] last = 1 level = 0 quote = False for i, c in enumerate(val): if c == '\'': quote = not quote elif not quote: if c == '{' or c == '[' or c == '(': level += 1 elif c == '}' or c == ']' or c == ')': level -= 1 elif c == sep and level == 1: ret.append(val[last:i]) last = i + 1 else: if last < len(val) - 1: ret.append(val[last:-1]) return ret # this should match all possible CQL and CQLSH datetime formats p = re.compile("(\d{4})\-(\d{2})\-(\d{2})\s?(?:'T')?" + # YYYY-MM-DD[( |'T')] "(?:(\d{2}):(\d{2})(?::(\d{2})(?:\.(\d{1,6}))?))?" + # [HH:MM[:SS[.NNNNNN]]] "(?:([+\-])(\d{2}):?(\d{2}))?") # [(+|-)HH[:]MM]] def convert_datetime(val, **_): try: tval = time.strptime(val, self.date_time_format) return timegm(tval) * 1e3 # scale seconds to millis for the raw value except ValueError: pass # if it's not in the default format we try CQL formats m = p.match(val) if not m: try: # in case of overflow COPY TO prints dates as milliseconds from the epoch, see # deserialize_date_fallback_int in cqlsh.py return int(val) except ValueError: raise ValueError("can't interpret %r as a date with format %s or as int" % (val, self.date_time_format)) # https://docs.python.org/2/library/time.html#time.struct_time tval = time.struct_time((int(m.group(1)), int(m.group(2)), int(m.group(3)), # year, month, day int(m.group(4)) if m.group(4) else 0, # hour int(m.group(5)) if m.group(5) else 0, # minute int(m.group(6)) if m.group(6) else 0, # second 0, 1, -1)) # day of week, day of year, dst-flag # convert sub-seconds (a number between 1 and 6 digits) to milliseconds milliseconds = 0 if not m.group(7) else int(m.group(7)) * pow(10, 3 - len(m.group(7))) if m.group(8): offset = (int(m.group(9)) * 3600 + int(m.group(10)) * 60) * int(m.group(8) + '1') else: offset = -time.timezone # scale seconds to millis for the raw value return ((timegm(tval) + offset) * 1e3) + milliseconds def convert_date(v, **_): return Date(v) def convert_time(v, **_): return Time(v) def convert_tuple(val, ct=cql_type): return tuple(convert_mandatory(t, v) for t, v in zip(ct.subtypes, split(val))) def convert_list(val, ct=cql_type): return list(convert_mandatory(ct.subtypes[0], v) for v in split(val)) def convert_set(val, ct=cql_type): return frozenset(convert_mandatory(ct.subtypes[0], v) for v in split(val)) def convert_map(val, ct=cql_type): """ We need to pass to BoundStatement.bind() a dict() because it calls iteritems(), except we can't create a dict with another dict as the key, hence we use a class that adds iteritems to a frozen set of tuples (which is how dict are normally made immutable in python). """ class ImmutableDict(frozenset): iteritems = frozenset.__iter__ return ImmutableDict(frozenset((convert_mandatory(ct.subtypes[0], v[0]), convert(ct.subtypes[1], v[1])) for v in [split('{%s}' % vv, sep=':') for vv in split(val)])) def convert_user_type(val, ct=cql_type): """ A user type is a dictionary except that we must convert each key into an attribute, so we are using named tuples. It must also be hashable, so we cannot use dictionaries. Maybe there is a way to instantiate ct directly but I could not work it out. """ vals = [v for v in [split('{%s}' % vv, sep=':') for vv in split(val)]] ret_type = namedtuple(ct.typename, [unprotect(v[0]) for v in vals]) return ret_type(*tuple(convert(t, v[1]) for t, v in zip(ct.subtypes, vals))) def convert_single_subtype(val, ct=cql_type): return converters.get(ct.subtypes[0].typename, convert_unknown)(val, ct=ct.subtypes[0]) def convert_unknown(val, ct=cql_type): if issubclass(ct, UserType): return convert_user_type(val, ct=ct) elif issubclass(ct, ReversedType): return convert_single_subtype(val, ct=ct) printdebugmsg("Unknown type %s (%s) for val %s" % (ct, ct.typename, val)) return val converters = { 'blob': convert_blob, 'decimal': get_convert_decimal_fcn(adapter=Decimal), 'uuid': convert_uuid, 'boolean': convert_bool, 'tinyint': get_convert_integer_fcn(), 'ascii': convert_text, 'float': get_convert_decimal_fcn(), 'double': get_convert_decimal_fcn(), 'bigint': get_convert_integer_fcn(adapter=long), 'int': get_convert_integer_fcn(), 'varint': get_convert_integer_fcn(), 'inet': convert_text, 'counter': get_convert_integer_fcn(adapter=long), 'timestamp': convert_datetime, 'timeuuid': convert_uuid, 'date': convert_date, 'smallint': get_convert_integer_fcn(), 'time': convert_time, 'text': convert_text, 'varchar': convert_text, 'list': convert_list, 'set': convert_set, 'map': convert_map, 'tuple': convert_tuple, 'frozen': convert_single_subtype, } return converters.get(cql_type.typename, convert_unknown) def get_null_val(self): return None if self.use_prepared_statements else "NULL" def convert_row(self, row): """ Convert the row into a list of parsed values if using prepared statements, else simply apply the protection functions to escape values with quotes when required. Also check on the row length and make sure primary partition key values aren't missing. """ converters = self.converters if self.use_prepared_statements else self.protectors if len(row) != len(converters): raise ParseError('Invalid row length %d should be %d' % (len(row), len(converters))) for i in self.primary_key_indexes: if row[i] == self.nullval: raise ParseError(self.get_null_primary_key_message(i)) def convert(c, v): try: return c(v) if v != self.nullval else self.get_null_val() except Exception, e: if self.debug: traceback.print_exc() raise ParseError("Failed to parse %s : %s" % (val, e.message)) return [convert(conv, val) for conv, val in zip(converters, row)] def get_null_primary_key_message(self, idx): message = "Cannot insert null value for primary key column '%s'." % (self.columns[idx],) if self.nullval == '': message += " If you want to insert empty strings, consider using" \ " the WITH NULL=<marker> option for COPY." return message def get_row_partition_key_values_fcn(self): """ Return a function to convert a row into a string composed of the partition key values serialized and binary packed (the tokens on the ring). Depending on whether we are using prepared statements, we may have to convert the primary key values first, so we have two different serialize_value implementations. We also return different functions depending on how many partition key indexes we have (single or multiple). See also BoundStatement.routing_key. """ def serialize_value_prepared(n, v): return self.cqltypes[n].serialize(v, self.proto_version) def serialize_value_not_prepared(n, v): return self.cqltypes[n].serialize(self.converters[n](v), self.proto_version) partition_key_indexes = self.partition_key_indexes serialize = serialize_value_prepared if self.use_prepared_statements else serialize_value_not_prepared def serialize_row_single(row): return serialize(partition_key_indexes[0], row[partition_key_indexes[0]]) def serialize_row_multiple(row): pk_values = [] for i in partition_key_indexes: val = serialize(i, row[i]) l = len(val) pk_values.append(struct.pack(">H%dsB" % l, l, val, 0)) return b"".join(pk_values) if len(partition_key_indexes) == 1: return serialize_row_single return serialize_row_multiple class TokenMap(object): """ A wrapper around the metadata token map to speed things up by caching ring token *values* and replicas. It is very important that we use the token values, which are primitive types, rather than the tokens classes when calling bisect_right() in split_batches(). If we use primitive values, the bisect is done in compiled code whilst with token classes each comparison requires a call into the interpreter to perform the cmp operation defined in Python. A simple test with 1 million bisect operations on an array of 2048 tokens was done in 0.37 seconds with primitives and 2.25 seconds with token classes. This is significant for large datasets because we need to do a bisect for each single row, and if VNODES are used, the size of the token map can get quite large too. """ def __init__(self, ks, hostname, local_dc, session): self.ks = ks self.hostname = hostname self.local_dc = local_dc self.metadata = session.cluster.metadata self._initialize_ring() # Note that refresh metadata is disabled by default and we currenlty do not intercept it # If hosts are added, removed or moved during a COPY operation our token map is no longer optimal # However we can cope with hosts going down and up since we filter for replicas that are up when # making each batch def _initialize_ring(self): token_map = self.metadata.token_map if token_map is None: self.ring = [0] self.replicas = [(self.metadata.get_host(self.hostname),)] self.pk_to_token_value = lambda pk: 0 return token_map.rebuild_keyspace(self.ks, build_if_absent=True) tokens_to_hosts = token_map.tokens_to_hosts_by_ks.get(self.ks, None) from_key = token_map.token_class.from_key self.ring = [token.value for token in token_map.ring] self.replicas = [tuple(tokens_to_hosts[token]) for token in token_map.ring] self.pk_to_token_value = lambda pk: from_key(pk).value @staticmethod def get_ring_pos(ring, val): idx = bisect_right(ring, val) return idx if idx < len(ring) else 0 def filter_replicas(self, hosts): shuffled = tuple(sorted(hosts, key=lambda k: random.random())) return filter(lambda r: r.is_up is not False and r.datacenter == self.local_dc, shuffled) if hosts else () class FastTokenAwarePolicy(DCAwareRoundRobinPolicy): """ Send to any replicas attached to the query, or else fall back to DCAwareRoundRobinPolicy. Perform exponential back-off if too many in flight requests to all replicas are already in progress. """ def __init__(self, parent): DCAwareRoundRobinPolicy.__init__(self, parent.local_dc, 0) self.max_backoff_attempts = parent.max_backoff_attempts self.max_inflight_messages = parent.max_inflight_messages def make_query_plan(self, working_keyspace=None, query=None): """ Extend TokenAwarePolicy.make_query_plan() so that we choose the same replicas in preference and most importantly we avoid repeating the (slow) bisect. We also implement a backoff policy by sleeping an exponentially larger delay in case all connections to eligible replicas have too many in flight requests. """ connections = ConnectionWrapper.connections replicas = list(query.replicas) if hasattr(query, 'replicas') else [] replicas.extend([r for r in DCAwareRoundRobinPolicy.make_query_plan(self, working_keyspace, query) if r not in replicas]) if replicas: def replica_is_not_overloaded(r): if r.address in connections: conn = connections[r.address] return conn.in_flight < min(conn.max_request_id, self.max_inflight_messages) return True for i in xrange(self.max_backoff_attempts): for r in filter(replica_is_not_overloaded, replicas): yield r # the back-off starts at 10 ms (0.01) and it can go up to to 2^max_backoff_attempts, # which is currently 12, so 2^12 = 4096 = ~40 seconds when dividing by 0.01 delay = randint(1, pow(2, i + 1)) * 0.01 printdebugmsg("All replicas busy, sleeping for %d second(s)..." % (delay,)) time.sleep(delay) printdebugmsg("Replicas too busy, given up") class ConnectionWrapper(DefaultConnection): """ A wrapper to the driver default connection that helps in keeping track of messages in flight. The newly created connection is registered into a global dictionary so that FastTokenAwarePolicy is able to determine if a connection has too many in flight requests. """ connections = {} def __init__(self, *args, **kwargs): DefaultConnection.__init__(self, *args, **kwargs) self.connections[self.host] = self class ImportProcess(ChildProcess): def __init__(self, params): ChildProcess.__init__(self, params=params, target=self.run) self.skip_columns = params['skip_columns'] self.valid_columns = params['valid_columns'] self.skip_column_indexes = [i for i, c in enumerate(self.columns) if c in self.skip_columns] options = params['options'] self.nullval = options.copy['nullval'] self.max_attempts = options.copy['maxattempts'] self.min_batch_size = options.copy['minbatchsize'] self.max_batch_size = options.copy['maxbatchsize'] self.use_prepared_statements = options.copy['preparedstatements'] self.ttl = options.copy['ttl'] self.max_inflight_messages = options.copy['maxinflightmessages'] self.max_backoff_attempts = options.copy['maxbackoffattempts'] self.dialect_options = options.dialect self._session = None self.query = None self.conv = None self.make_statement = None @property def session(self): if not self._session: cluster = Cluster( contact_points=(self.hostname,), port=self.port, cql_version=self.cql_version, protocol_version=self.protocol_version, auth_provider=self.auth_provider, load_balancing_policy=FastTokenAwarePolicy(self), ssl_options=ssl_settings(self.hostname, self.config_file) if self.ssl else None, default_retry_policy=FallthroughRetryPolicy(), # we throw on timeouts and retry in the error callback compression=None, control_connection_timeout=self.connect_timeout, connect_timeout=self.connect_timeout, idle_heartbeat_interval=0, connection_class=ConnectionWrapper) self._session = cluster.connect(self.ks) self._session.default_timeout = None return self._session def run(self): try: pr = profile_on() if PROFILE_ON else None self.on_fork() self.inner_run(*self.make_params()) if pr: profile_off(pr, file_name='worker_profile_%d.txt' % (os.getpid(),)) except Exception, exc: self.report_error(exc) finally: self.close() def close(self): if self._session: self._session.cluster.shutdown() ChildProcess.close(self) def make_params(self): metadata = self.session.cluster.metadata table_meta = metadata.keyspaces[self.ks].tables[self.table] prepared_statement = None is_counter = ("counter" in [table_meta.columns[name].cql_type for name in self.valid_columns]) if is_counter: query = 'UPDATE %s.%s SET %%s WHERE %%s' % (protect_name(self.ks), protect_name(self.table)) make_statement = self.wrap_make_statement(self.make_counter_batch_statement) elif self.use_prepared_statements: query = 'INSERT INTO %s.%s (%s) VALUES (%s)' % (protect_name(self.ks), protect_name(self.table), ', '.join(protect_names(self.valid_columns),), ', '.join(['?' for _ in self.valid_columns])) if self.ttl >= 0: query += 'USING TTL %s' % (self.ttl,) query = self.session.prepare(query) query.consistency_level = self.consistency_level prepared_statement = query make_statement = self.wrap_make_statement(self.make_prepared_batch_statement) else: query = 'INSERT INTO %s.%s (%s) VALUES (%%s)' % (protect_name(self.ks), protect_name(self.table), ', '.join(protect_names(self.valid_columns),)) if self.ttl >= 0: query += 'USING TTL %s' % (self.ttl,) make_statement = self.wrap_make_statement(self.make_non_prepared_batch_statement) conv = ImportConversion(self, table_meta, prepared_statement) tm = TokenMap(self.ks, self.hostname, self.local_dc, self.session) return query, conv, tm, make_statement def inner_run(self, query, conv, tm, make_statement): """ Main run method. Note that we bind self methods that are called inside loops for performance reasons. """ self.query = query self.conv = conv self.make_statement = make_statement convert_rows = self.convert_rows split_into_batches = self.split_into_batches result_callback = self.result_callback err_callback = self.err_callback session = self.session while True: chunk = self.inmsg.recv() if chunk is None: break try: chunk['rows'] = convert_rows(conv, chunk) for replicas, batch in split_into_batches(chunk, conv, tm): statement = make_statement(query, conv, chunk, batch, replicas) if statement: future = session.execute_async(statement) future.add_callbacks(callback=result_callback, callback_args=(batch, chunk), errback=err_callback, errback_args=(batch, chunk, replicas)) except Exception, exc: self.report_error(exc, chunk, chunk['rows']) def wrap_make_statement(self, inner_make_statement): def make_statement(query, conv, chunk, batch, replicas): try: return inner_make_statement(query, conv, batch, replicas) except Exception, exc: print "Failed to make batch statement: {}".format(exc) self.report_error(exc, chunk, batch['rows']) return None def make_statement_with_failures(query, conv, chunk, batch, replicas): failed_batch = self.maybe_inject_failures(batch) if failed_batch: return failed_batch return make_statement(query, conv, chunk, batch, replicas) return make_statement_with_failures if self.test_failures else make_statement def make_counter_batch_statement(self, query, conv, batch, replicas): statement = BatchStatement(batch_type=BatchType.COUNTER, consistency_level=self.consistency_level) statement.replicas = replicas statement.keyspace = self.ks for row in batch['rows']: where_clause = [] set_clause = [] for i, value in enumerate(row): if i in conv.primary_key_indexes: where_clause.append("%s=%s" % (self.valid_columns[i], value)) else: set_clause.append("%s=%s+%s" % (self.valid_columns[i], self.valid_columns[i], value)) full_query_text = query % (','.join(set_clause), ' AND '.join(where_clause)) statement.add(full_query_text) return statement def make_prepared_batch_statement(self, query, _, batch, replicas): """ Return a batch statement. This is an optimized version of: statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level) for row in batch['rows']: statement.add(query, row) We could optimize further by removing bound_statements altogether but we'd have to duplicate much more driver's code (BoundStatement.bind()). """ statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level) statement.replicas = replicas statement.keyspace = self.ks statement._statements_and_parameters = [(True, query.query_id, query.bind(r).values) for r in batch['rows']] return statement def make_non_prepared_batch_statement(self, query, _, batch, replicas): statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level) statement.replicas = replicas statement.keyspace = self.ks statement._statements_and_parameters = [(False, query % (','.join(r),), ()) for r in batch['rows']] return statement def convert_rows(self, conv, chunk): """ Return converted rows and report any errors during conversion. """ def filter_row_values(row): return [v for i, v in enumerate(row) if i not in self.skip_column_indexes] if self.skip_column_indexes: rows = [filter_row_values(r) for r in list(csv.reader(chunk['rows'], **self.dialect_options))] else: rows = list(csv.reader(chunk['rows'], **self.dialect_options)) errors = defaultdict(list) def convert_row(r): try: return conv.convert_row(r) except Exception, err: errors[err.message].append(r) return None converted_rows = filter(None, [convert_row(r) for r in rows]) if errors: for msg, rows in errors.iteritems(): self.report_error(ParseError(msg), chunk, rows) return converted_rows def maybe_inject_failures(self, batch): """ Examine self.test_failures and see if token_range is either a token range supposed to cause a failure (failing_range) or to terminate the worker process (exit_range). If not then call prepare_export_query(), which implements the normal behavior. """ if 'failing_batch' in self.test_failures: failing_batch = self.test_failures['failing_batch'] if failing_batch['id'] == batch['id']: if batch['attempts'] < failing_batch['failures']: statement = SimpleStatement("INSERT INTO badtable (a, b) VALUES (1, 2)", consistency_level=self.consistency_level) return statement if 'exit_batch' in self.test_failures: exit_batch = self.test_failures['exit_batch'] if exit_batch['id'] == batch['id']: sys.exit(1) return None # carry on as normal @staticmethod def make_batch(batch_id, rows, attempts=1): return {'id': batch_id, 'rows': rows, 'attempts': attempts} def split_into_batches(self, chunk, conv, tm): """ Batch rows by ring position or replica. If there are at least min_batch_size rows for a ring position then split these rows into groups of max_batch_size and send a batch for each group, using all replicas for this ring position. Otherwise, we are forced to batch by replica, and here unfortunately we can only choose one replica to guarantee common replicas across partition keys. We are typically able to batch by ring position for small clusters or when VNODES are not used. For large clusters with VNODES it may not be possible, in this case it helps to increase the CHUNK SIZE but up to a limit, otherwise we may choke the cluster. """ rows_by_ring_pos = defaultdict(list) errors = defaultdict(list) min_batch_size = self.min_batch_size max_batch_size = self.max_batch_size ring = tm.ring get_row_partition_key_values = conv.get_row_partition_key_values_fcn() pk_to_token_value = tm.pk_to_token_value get_ring_pos = tm.get_ring_pos make_batch = self.make_batch for row in chunk['rows']: try: pk = get_row_partition_key_values(row) rows_by_ring_pos[get_ring_pos(ring, pk_to_token_value(pk))].append(row) except Exception, e: errors[e.message].append(row) if errors: for msg, rows in errors.iteritems(): self.report_error(ParseError(msg), chunk, rows) replicas = tm.replicas filter_replicas = tm.filter_replicas rows_by_replica = defaultdict(list) for ring_pos, rows in rows_by_ring_pos.iteritems(): if len(rows) > min_batch_size: for i in xrange(0, len(rows), max_batch_size): yield filter_replicas(replicas[ring_pos]), make_batch(chunk['id'], rows[i:i + max_batch_size]) else: # select only the first valid replica to guarantee more overlap or none at all rows_by_replica[filter_replicas(replicas[ring_pos])[:1]].extend(rows) # Now send the batches by replica for replicas, rows in rows_by_replica.iteritems(): for i in xrange(0, len(rows), max_batch_size): yield replicas, make_batch(chunk['id'], rows[i:i + max_batch_size]) def result_callback(self, _, batch, chunk): self.update_chunk(batch['rows'], chunk) def err_callback(self, response, batch, chunk, replicas): err_is_final = batch['attempts'] >= self.max_attempts self.report_error(response, chunk, batch['rows'], batch['attempts'], err_is_final) if not err_is_final: batch['attempts'] += 1 statement = self.make_statement(self.query, self.conv, chunk, batch, replicas) future = self.session.execute_async(statement) future.add_callbacks(callback=self.result_callback, callback_args=(batch, chunk), errback=self.err_callback, errback_args=(batch, chunk, replicas)) def report_error(self, err, chunk=None, rows=None, attempts=1, final=True): if self.debug and sys.exc_info()[1] == err: traceback.print_exc() self.outmsg.send(ImportTaskError(err.__class__.__name__, err.message, rows, attempts, final)) if final and chunk is not None: self.update_chunk(rows, chunk) def update_chunk(self, rows, chunk): chunk['imported'] += len(rows) if chunk['imported'] == chunk['num_rows_sent']: self.outmsg.send(ImportProcessResult(chunk['num_rows_sent'])) class RateMeter(object): def __init__(self, log_fcn, update_interval=0.25, log_file=''): self.log_fcn = log_fcn # the function for logging, may be None to disable logging self.update_interval = update_interval # how often we update in seconds self.log_file = log_file # an optional file where to log statistics in addition to stdout self.start_time = time.time() # the start time self.last_checkpoint_time = self.start_time # last time we logged self.current_rate = 0.0 # rows per second self.current_record = 0 # number of records since we last updated self.total_records = 0 # total number of records if os.path.isfile(self.log_file): os.unlink(self.log_file) def increment(self, n=1): self.current_record += n self.maybe_update() def maybe_update(self, sleep=False): if self.current_record == 0: return new_checkpoint_time = time.time() time_difference = new_checkpoint_time - self.last_checkpoint_time if time_difference >= self.update_interval: self.update(new_checkpoint_time) self.log_message() elif sleep: remaining_time = time_difference - self.update_interval if remaining_time > 0.000001: time.sleep(remaining_time) def update(self, new_checkpoint_time): time_difference = new_checkpoint_time - self.last_checkpoint_time if time_difference >= 1e-09: self.current_rate = self.get_new_rate(self.current_record / time_difference) self.last_checkpoint_time = new_checkpoint_time self.total_records += self.current_record self.current_record = 0 def get_new_rate(self, new_rate): """ return the rate of the last period: this is the new rate but averaged with the last rate to smooth a bit """ if self.current_rate == 0.0: return new_rate else: return (self.current_rate + new_rate) / 2.0 def get_avg_rate(self): """ return the average rate since we started measuring """ time_difference = time.time() - self.start_time return self.total_records / time_difference if time_difference >= 1e-09 else 0 def log_message(self): if not self.log_fcn: return output = 'Processed: %d rows; Rate: %7.0f rows/s; Avg. rate: %7.0f rows/s\r' % \ (self.total_records, self.current_rate, self.get_avg_rate()) self.log_fcn(output, eol='\r') if self.log_file: with open(self.log_file, "a") as f: f.write(output + '\n') def get_total_records(self): self.update(time.time()) self.log_message() return self.total_records
fc_2015_02_26.py
#!/usr/bin/env python3 # imports go here from gevent import monkey monkey.patch_all() import time from threading import Thread from github import Github import os from flask import Flask, render_template from flask.ext.socketio import SocketIO # # Free Coding session for 2015-02-26 # Written by Matt Warren # app = Flask(__name__) app.config['SECRET_KEY'] = 'asdivbaiwubdfilsudbxv' app.debug = True socketio = SocketIO(app) thread = None hub = Github(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_PASSWORD']) USERS = [ 'mfwarren' ] def get_public_events(): events = {} for u in USERS: hub_user = hub.get_user('mfwarren') events[u] = hub_user.get_public_events()[0].raw_data return events def background_thread(): while True: events = get_public_events() socketio.emit('response', {'data': 'events', 'events': events}, namespace='') time.sleep(30) @app.route('/') def index(): global thread if thread is None: thread = Thread(target=background_thread) thread.start() return render_template('github_index.html') @socketio.on('event') def message(message): pass # need side effects from having this here if __name__ == '__main__': socketio.run(app)
helper.py
from __future__ import annotations import asyncio import math import json import threading from sphero.sphero_bolt import SpheroBolt import numpy as np from cv2 import cv2 from typing import List # CAP = None CURRENT_COORDINATES = {} def get_json_data(file: str) -> List[dict[str, str]]: """Reads json file and returns a list of dictionaries. Parameters ---------- file : str location of the json file. Returns ------- list[dict[str, str]] list with one or more dictionaries. """ with open(file) as json_file: return json.load(json_file) async def viewMovement(): print("VIEW MOVEMENTS!") global CAP global CURRENT_COORDINATES if CAP is None or not CAP.isOpened(): print("[Error] Could not open the main webcam stream.") return while CAP.isOpened(): ret, main_frame = CAP.read() for bolt_address in list(CURRENT_COORDINATES): bolt = CURRENT_COORDINATES[bolt_address] # color is via BGR cv2.circle(main_frame, (int(bolt.get('coordinate')[0]), int(bolt.get('coordinate')[1])), 5, (int(bolt.get('color')[2]), int(bolt.get('color')[1]), int(bolt.get('color')[0])), 2) cv2.circle(main_frame, (320, 240), 10, (255, 255, 255), 3) cv2.imshow("Movement Viewer", main_frame) if cv2.waitKey(1) & 0xFF == ord("q"): CAP.release() cv2.destroyAllWindows() def findDirection(_point_a, _point_b): direction1 = _point_b[0] - _point_a[0] direction2 = _point_b[1] - _point_a[1] if direction1 == 0: if direction2 == 0: # same points? degree = 0 else: degree = 0 if _point_a[1] > _point_b[1] else 180 elif direction2 == 0: degree = 90 if _point_a[0] < _point_b[0] else 270 else: degree = math.atan(direction2 / direction1) / math.pi * 180 lowering = _point_a[1] < _point_b[1] if (lowering and degree < 0) or (not lowering and degree > 0): degree += 270 else: degree += 90 return degree def getSquareCoordinates(_center=(0, 0), _r=10, _n=10): if _n < 4: _n = 4 if _n == 4: return [[_center[0] + _r, _center[1] - _r], [_center[0] + _r, _center[1] + _r], [_center[0] - _r, _center[1] + _r], [_center[0] - _r, _center[1] - _r]] elif 4 < _n <= 6: return [[_center[0] + _r, _center[1] - _r], [_center[0] + _r, _center[1]], [_center[0] + _r, _center[1] + _r], [_center[0] - _r, _center[1] + _r], [_center[0] - _r, _center[1]], [_center[0] - _r, _center[1] - _r]] elif 6 < _n <= 8: return [[_center[0] + _r, _center[1] - _r], [_center[0] + _r, _center[1]], [_center[0] + _r, _center[1] + _r], [_center[0], _center[1] + _r], [_center[0] - _r, _center[1] + _r], [_center[0] - _r, _center[1]], [_center[0] - _r, _center[1] - _r], [_center[0], _center[1] - _r]] elif 8 < _n <= 10: return [[_center[0] + _r, _center[1] - _r], [_center[0] + _r, _center[1]], [_center[0] + _r, _center[1] + _r], [_center[0] + _r* 0.5, _center[1] + _r], [_center[0] - _r * 0.5, _center[1] + _r], [_center[0] - _r, _center[1] + _r], [_center[0] - _r, _center[1]], [_center[0] - _r, _center[1] - _r], [_center[0] - _r * 0.5, _center[1] - _r], [_center[0] + _r * 0.5, _center[1] - _r]] def getTriangleCoordinates(_center=(0, 0), _r=10, _n=10): if _n < 3: _n = 3 if _n == 3: return [[_center[0], _center[1] + _r], [_center[0] - _r/2, _center[1] - _r], [_center[0] + _r/2, _center[1] - _r]] elif 3 < _n <= 6: return [[_center[0], _center[1] + _r], [(_center[0] + (_center[0] - _r / 2)) / 2, (_center[1] + _r + _center[1] - _r) / 2], [_center[0] - _r / 2, _center[1] - _r], [((_center[0] - _r / 2) + (_center[0] + _r / 2)) / 2, (_center[1] - _r + _center[1] - _r) / 2], [_center[0] + _r / 2, _center[1] - _r], [(_center[0] + (_center[0] + _r / 2))/2, (_center[1] + _r + _center[1] - _r)/2]] elif 6 < _n <= 10: return [[_center[0], _center[1] + _r*1.5, [_center[0], _center[1] + _r*0.75], [(_center[0] + (_center[0] - _r / 2)) / 2, (_center[1] + _r + _center[1] - _r) / 2], [_center[0], _center[1]], [_center[0] - _r, _center[1] - _r], [_center[0] - _r / 2, _center[1] - _r], [((_center[0] - _r / 2) + (_center[0] + _r / 2)) / 2, (_center[1] - _r + _center[1] - _r) / 2], [_center[0] + _r / 2, _center[1] - _r], [_center[0] + _r, _center[1] - _r], [(_center[0] + (_center[0] + _r / 2))/2, (_center[1] + _r + _center[1] - _r)/2]]] def getCircleCoordinates(_center=(0, 0), _r=10, _n=10): if _n < 4: _n = 4 return [ [ _center[0] + (math.cos(2 * math.pi / _n * x) * _r), # x _center[1] + (math.sin(2 * math.pi / _n * x) * _r) # y ] for x in range(0, _n)] async def sendToCoordinates(bolts, coordinates, CAPTURE): global CURRENT_COORDINATES threads = [] for bolt in bolts: await bolt.setMatrixLED(0, 0, 0) await bolt.setFrontLEDColor(0, 0, 0) await bolt.setBackLEDColor(0, 0, 0) for i in range(len(bolts)): if i >= len(coordinates): break thread = threading.Thread(target=asyncio.run, args=(sendToCoordinate(bolts[i], coordinates[i], CAPTURE),)) thread.start() threads.append(thread) for thread in threads: thread.join() for bolt in bolts: await bolt.setMatrixLED(bolt.color[0], bolt.color[1], bolt.color[2]) await bolt.setFrontLEDColor(255, 255, 255) await bolt.setBackLEDColor(255, 0, 0) async def sendToCoordinate(bolt, coordinate, CAPTURE): global CURRENT_COORDINATES print(f"[!] Sending bolt {bolt.address} to X: {coordinate[0]}, Y: {coordinate[1]}") if CAPTURE is None or not CAPTURE.isOpened(): print("[Error] Could not open webcam.") return CURRENT_COORDINATES[bolt.address] = { 'color': bolt.color, 'coordinate': coordinate } correct_coordinate = False while CAPTURE.isOpened() and not correct_coordinate: ret, main_frame = CAPTURE.read() cv2.circle(main_frame, (int(coordinate[0]), int(coordinate[1])), 5, (0, 0, 255), 2) hsv_frame = cv2.medianBlur(cv2.cvtColor(main_frame, cv2.COLOR_BGR2HSV), 9) lower = np.array(bolt.low_hsv, np.uint8) upper = np.array(bolt.high_hsv, np.uint8) mask = cv2.inRange(hsv_frame, lower, upper) contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(contours) > 0: # for pic, contour in enumerate(contours): contour = max(contours, key=cv2.contourArea) area = cv2.contourArea(contour) if area >= 25: x, y, w, h = cv2.boundingRect(contour) cv2.rectangle(main_frame, (x, y), (x + w, y + h), (0, 255, 0), 2) direction = findDirection([x, y], coordinate) # in right position if x < coordinate[0] < x + h and y < coordinate[1] < y + h: # to be sure that the bolt gets the command for i in range(10): await bolt.roll(0, 0) correct_coordinate = True CURRENT_COORDINATES.pop(bolt.address, None) else: await bolt.roll(35, int(direction)) cv2.imshow(f"Detection for {bolt.name}, coordinates: {coordinate}", main_frame) if cv2.waitKey(1) & 0xFF == ord("q"): CAP.release() cv2.destroyAllWindows()
house_keeping.py
# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime import signal import sys import threading from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from octavia.common import service from octavia.controller.housekeeping import house_keeping from octavia import version LOG = logging.getLogger(__name__) CONF = cfg.CONF spare_amp_thread_event = threading.Event() db_cleanup_thread_event = threading.Event() cert_rotate_thread_event = threading.Event() def spare_amphora_check(): """Initiates spare amp check with respect to configured interval.""" # Read the interval from CONF interval = CONF.house_keeping.spare_check_interval LOG.info("Spare check interval is set to %d sec", interval) spare_amp = house_keeping.SpareAmphora() while not spare_amp_thread_event.is_set(): LOG.debug("Initiating spare amphora check...") try: spare_amp.spare_check() except Exception as e: LOG.debug('spare_amphora caught the following exception and ' 'is restarting: %s', str(e)) spare_amp_thread_event.wait(interval) def db_cleanup(): """Perform db cleanup for old resources.""" # Read the interval from CONF interval = CONF.house_keeping.cleanup_interval LOG.info("DB cleanup interval is set to %d sec", interval) LOG.info('Amphora expiry age is %s seconds', CONF.house_keeping.amphora_expiry_age) LOG.info('Load balancer expiry age is %s seconds', CONF.house_keeping.load_balancer_expiry_age) db_cleanup = house_keeping.DatabaseCleanup() while not db_cleanup_thread_event.is_set(): LOG.debug("Initiating the cleanup of old resources...") try: db_cleanup.delete_old_amphorae() db_cleanup.cleanup_load_balancers() except Exception as e: LOG.debug('db_cleanup caught the following exception and ' 'is restarting: %s', str(e)) db_cleanup_thread_event.wait(interval) def cert_rotation(): """Perform certificate rotation.""" interval = CONF.house_keeping.cert_interval LOG.info( "Expiring certificate check interval is set to %d sec", interval) cert_rotate = house_keeping.CertRotation() while not cert_rotate_thread_event.is_set(): LOG.debug("Initiating certification rotation ...") try: cert_rotate.rotate() except Exception as e: LOG.debug('cert_rotation caught the following exception and ' 'is restarting: %s', str(e)) cert_rotate_thread_event.wait(interval) def _mutate_config(*args, **kwargs): LOG.info("Housekeeping recieved HUP signal, mutating config.") CONF.mutate_config_files() def main(): service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) timestamp = str(datetime.datetime.utcnow()) LOG.info("Starting house keeping at %s", timestamp) threads = [] # Thread to perform spare amphora check spare_amp_thread = threading.Thread(target=spare_amphora_check) spare_amp_thread.daemon = True spare_amp_thread.start() threads.append(spare_amp_thread) # Thread to perform db cleanup db_cleanup_thread = threading.Thread(target=db_cleanup) db_cleanup_thread.daemon = True db_cleanup_thread.start() threads.append(db_cleanup_thread) # Thread to perform certificate rotation cert_rotate_thread = threading.Thread(target=cert_rotation) cert_rotate_thread.daemon = True cert_rotate_thread.start() threads.append(cert_rotate_thread) def process_cleanup(*args, **kwargs): LOG.info("Attempting to gracefully terminate House-Keeping") spare_amp_thread_event.set() db_cleanup_thread_event.set() cert_rotate_thread_event.set() spare_amp_thread.join() db_cleanup_thread.join() cert_rotate_thread.join() LOG.info("House-Keeping process terminated") signal.signal(signal.SIGTERM, process_cleanup) signal.signal(signal.SIGHUP, _mutate_config) try: for thread in threads: thread.join() except KeyboardInterrupt: process_cleanup()
conftest.py
import asyncio import json import os import threading import time import typing import pytest import trustme from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.serialization import ( BestAvailableEncryption, Encoding, PrivateFormat, load_pem_private_key, ) from uvicorn.config import Config from uvicorn.main import Server from httpx import URL from httpx.concurrency.asyncio import AsyncioBackend from httpx.concurrency.base import lookup_backend ENVIRONMENT_VARIABLES = { "SSL_CERT_FILE", "SSL_CERT_DIR", "HTTP_PROXY", "HTTPS_PROXY", "ALL_PROXY", "NO_PROXY", "SSLKEYLOGFILE", } @pytest.fixture(scope="function", autouse=True) def clean_environ() -> typing.Dict[str, typing.Any]: """Keeps os.environ clean for every test without having to mock os.environ""" original_environ = os.environ.copy() os.environ.clear() os.environ.update( { k: v for k, v in original_environ.items() if k not in ENVIRONMENT_VARIABLES and k.lower() not in ENVIRONMENT_VARIABLES } ) yield os.environ.clear() os.environ.update(original_environ) @pytest.fixture( params=[ # pytest uses the marks to set up the specified async environment and run # 'async def' test functions. The "auto" backend should then auto-detect # the environment it's running in. # Passing the backend explicitly, e.g. `backend="asyncio"`, # is tested separately. pytest.param("auto", marks=pytest.mark.asyncio), pytest.param("auto", marks=pytest.mark.trio), ] ) def backend(request): return request.param async def app(scope, receive, send): assert scope["type"] == "http" if scope["path"].startswith("/slow_response"): await slow_response(scope, receive, send) elif scope["path"].startswith("/premature_close"): await premature_close(scope, receive, send) elif scope["path"].startswith("/status"): await status_code(scope, receive, send) elif scope["path"].startswith("/echo_body"): await echo_body(scope, receive, send) elif scope["path"].startswith("/echo_headers"): await echo_headers(scope, receive, send) elif scope["path"].startswith("/redirect_301"): await redirect_301(scope, receive, send) else: await hello_world(scope, receive, send) async def hello_world(scope, receive, send): await send( { "type": "http.response.start", "status": 200, "headers": [[b"content-type", b"text/plain"]], } ) await send({"type": "http.response.body", "body": b"Hello, world!"}) async def slow_response(scope, receive, send): delay_ms_str: str = scope["path"].replace("/slow_response/", "") try: delay_ms = float(delay_ms_str) except ValueError: delay_ms = 100 await asyncio.sleep(delay_ms / 1000.0) await send( { "type": "http.response.start", "status": 200, "headers": [[b"content-type", b"text/plain"]], } ) await send({"type": "http.response.body", "body": b"Hello, world!"}) async def premature_close(scope, receive, send): await send( { "type": "http.response.start", "status": 200, "headers": [[b"content-type", b"text/plain"]], } ) async def status_code(scope, receive, send): status_code = int(scope["path"].replace("/status/", "")) await send( { "type": "http.response.start", "status": status_code, "headers": [[b"content-type", b"text/plain"]], } ) await send({"type": "http.response.body", "body": b"Hello, world!"}) async def echo_body(scope, receive, send): body = b"" more_body = True while more_body: message = await receive() body += message.get("body", b"") more_body = message.get("more_body", False) await send( { "type": "http.response.start", "status": 200, "headers": [[b"content-type", b"text/plain"]], } ) await send({"type": "http.response.body", "body": body}) async def echo_headers(scope, receive, send): body = {} for name, value in scope.get("headers", []): body[name.capitalize().decode()] = value.decode() await send( { "type": "http.response.start", "status": 200, "headers": [[b"content-type", b"application/json"]], } ) await send({"type": "http.response.body", "body": json.dumps(body).encode()}) async def redirect_301(scope, receive, send): await send( {"type": "http.response.start", "status": 301, "headers": [[b"location", b"/"]]} ) await send({"type": "http.response.body"}) SERVER_SCOPE = "session" @pytest.fixture(scope=SERVER_SCOPE) def cert_authority(): return trustme.CA() @pytest.fixture(scope=SERVER_SCOPE) def ca_cert_pem_file(cert_authority): with cert_authority.cert_pem.tempfile() as tmp: yield tmp @pytest.fixture(scope=SERVER_SCOPE) def localhost_cert(cert_authority): return cert_authority.issue_cert("localhost") @pytest.fixture(scope=SERVER_SCOPE) def cert_pem_file(localhost_cert): with localhost_cert.cert_chain_pems[0].tempfile() as tmp: yield tmp @pytest.fixture(scope=SERVER_SCOPE) def cert_private_key_file(localhost_cert): with localhost_cert.private_key_pem.tempfile() as tmp: yield tmp @pytest.fixture(scope=SERVER_SCOPE) def cert_encrypted_private_key_file(localhost_cert): # Deserialize the private key and then reserialize with a password private_key = load_pem_private_key( localhost_cert.private_key_pem.bytes(), password=None, backend=default_backend() ) encrypted_private_key_pem = trustme.Blob( private_key.private_bytes( Encoding.PEM, PrivateFormat.TraditionalOpenSSL, BestAvailableEncryption(password=b"password"), ) ) with encrypted_private_key_pem.tempfile() as tmp: yield tmp class TestServer(Server): @property def url(self) -> URL: protocol = "https" if self.config.is_ssl else "http" return URL(f"{protocol}://{self.config.host}:{self.config.port}/") def install_signal_handlers(self) -> None: # Disable the default installation of handlers for signals such as SIGTERM, # because it can only be done in the main thread. pass async def serve(self, sockets=None): self.restart_requested = asyncio.Event() loop = asyncio.get_event_loop() tasks = { loop.create_task(super().serve(sockets=sockets)), loop.create_task(self.watch_restarts()), } await asyncio.wait(tasks) async def restart(self) -> None: # Ensure we are in an asyncio environment. assert asyncio.get_event_loop() is not None # This may be called from a different thread than the one the server is # running on. For this reason, we use an event to coordinate with the server # instead of calling shutdown()/startup() directly. self.restart_requested.set() self.started = False while not self.started: await asyncio.sleep(0.5) async def watch_restarts(self): while True: if self.should_exit: return try: await asyncio.wait_for(self.restart_requested.wait(), timeout=0.1) except asyncio.TimeoutError: continue self.restart_requested.clear() await self.shutdown() await self.startup() @pytest.fixture def restart(backend): """Restart the running server from an async test function. This fixture deals with possible differences between the environment of the test function and that of the server. """ asyncio_backend = AsyncioBackend() backend_implementation = lookup_backend(backend) async def restart(server): await backend_implementation.run_in_threadpool( asyncio_backend.run, server.restart ) return restart def serve_in_thread(server: Server): thread = threading.Thread(target=server.run) thread.start() try: while not server.started: time.sleep(1e-3) yield server finally: server.should_exit = True thread.join() @pytest.fixture(scope=SERVER_SCOPE) def server(): config = Config(app=app, lifespan="off", loop="asyncio") server = TestServer(config=config) yield from serve_in_thread(server) @pytest.fixture(scope=SERVER_SCOPE) def uds_server(): uds = "test_server.sock" config = Config(app=app, lifespan="off", loop="asyncio", uds=uds) server = TestServer(config=config) yield from serve_in_thread(server) os.remove(uds) @pytest.fixture(scope=SERVER_SCOPE) def https_server(cert_pem_file, cert_private_key_file): config = Config( app=app, lifespan="off", ssl_certfile=cert_pem_file, ssl_keyfile=cert_private_key_file, host="localhost", port=8001, loop="asyncio", ) server = TestServer(config=config) yield from serve_in_thread(server) @pytest.fixture(scope=SERVER_SCOPE) def https_uds_server(cert_pem_file, cert_private_key_file): uds = "https_test_server.sock" config = Config( app=app, lifespan="off", ssl_certfile=cert_pem_file, ssl_keyfile=cert_private_key_file, uds=uds, loop="asyncio", ) server = TestServer(config=config) yield from serve_in_thread(server) os.remove(uds)
cron.py
import multiprocessing import os import sys import traceback from pysigner.mesosphere_signer import get_configs from pysigner.mesosphere_signer import TellorSigner def run_signer(private_key): """ Starts mesosphere_signer.py with a provided private key """ cfg = get_configs(sys.argv[1:]) signer = TellorSigner(cfg, private_key) signer.run() if __name__ == "__main__": for private_key in os.getenv("PRIVATEKEY").split(","): try: multiprocessing.Process(target=run_signer, args=(private_key,)).start() except multiprocessing.ProcessError as e: tb = str(traceback.format_exc()) msg = str(e) + "\n" + tb print(msg)
workers.py
from multiprocessing import Process, Queue import multiprocessing import threading import time import uuid import ctypes import json import traceback import copy import sys import jimi class _threading(threading.Thread): def __init__(self, *args, **keywords): threading.Thread.__init__(self, *args, **keywords) def get_id(self): for id, thread in threading._active.items(): if thread is self: return id def kill(self): thread_id = self.get_id() res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread_id), ctypes.py_object(SystemExit)) if res == 0: if jimi.logging.debugEnabled: jimi.logging.debug("Exception raise failure - invalid thread ID") if res > 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread_id), 0) class workerHandler: class _worker: def __init__(self, name, call, args, delete, maxDuration, multiprocessing, raiseException): self.name = name self.call = call self.id = str(uuid.uuid4()) self.createdTime = int(time.time()) self.startTime = 0 self.endTime = 0 self.duration = 0 self.result = None self.resultException = None self.raiseException = raiseException self.running = None self.crash = False self.args = args self.multiprocessing = multiprocessing if not self.multiprocessing: self.thread = _threading(target=self.threadCall) else: self.thread = _threading(target=self.multiprocessingThreadCall) self.maxDuration = maxDuration self.delete = delete def start(self): self.thread.start() def multiprocessingThreadCall(self): self.startTime = int(time.time()) self.running = True if jimi.logging.debugEnabled: jimi.logging.debug("Threaded process worker started, workerID={0}".format(self.id)) Q = Queue() p = Process(target=multiprocessingThreadStart, args=(Q,self.call,self.args)) # Taking an entire copy of cache is not effient review bug try: p.start() try: rc, e = Q.get(timeout=self.maxDuration) p.join(timeout=self.maxDuration) except: raise SystemExit if rc != 0: self.crash = True raise # Ensure cache is updated with any new items #cache.globalCache.sync(globalCacheObjects) except SystemExit as e: if self.raiseException: self.crash = True jimi.exceptions.workerKilled(self.id,self.name) else: self.resultException = e except Exception as e: if self.raiseException: self.crash = True jimi.exceptions.workerCrash(self.id,self.name,''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))) else: self.resultException = e finally: if p.exitcode == None: p.terminate() #Q.close() if jimi.logging.debugEnabled: jimi.logging.debug("Threaded process worker completed, workerID={0}".format(self.id)) self.running = False self.endTime = int(time.time()) self.duration = (self.endTime - self.startTime) def threadCall(self): self.startTime = int(time.time()) self.running = True if jimi.logging.debugEnabled: jimi.logging.debug("Threaded worker started, workerID={0}".format(self.id)) # Handle thread raise exception kill try: if self.args: self.result = self.call(*self.args) else: self.result = self.call() except SystemExit as e: if self.raiseException: self.crash = True jimi.exceptions.workerKilled(self.id,self.name) else: self.resultException = e except Exception as e: if self.raiseException: self.crash = True jimi.exceptions.workerCrash(self.id,self.name,''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))) else: self.resultException = e if jimi.logging.debugEnabled: jimi.logging.debug("Threaded worker completed, workerID={0}".format(self.id)) self.running = False self.endTime = int(time.time()) self.duration = (self.endTime - self.startTime) def __init__(self,concurrent=15,autoStart=True,cleanUp=True): self.concurrent = concurrent self.workerList = [] self.stopped = False self.cleanUp = cleanUp self.backlog = False self.failures = False # Autostarting worker handler thread workerThread = self._worker("workerThread",self.handler,None,True,0,False,True) workerThread.start() self.workerList.append(workerThread) self.workerID = workerThread.id def handler(self): tick = 0 loops = 0 underConcurrent = self.concurrent # Used to limit list looping to find active workers workersStillWaiting = [] # Cache waiting workers to limit list looping to find waiting workers while not self.stopped: now = int(time.time()) self.lastHandle = now # Any room to start another worker? if underConcurrent < 1: activeWorkerCount = len([ x for x in self.workerList if x.running == True ]) underConcurrent = ( self.concurrent - activeWorkerCount ) if underConcurrent > 0: if len(workersStillWaiting) == 0: workersStillWaiting = [ x for x in self.workerList if x.running == None ] if len(workersStillWaiting) > 0: self.backlog = True # Check if number of workersWaiting is above the number of available concurrent threads and select mx available workersWaiting = workersStillWaiting if len(workersWaiting) > underConcurrent: workersWaiting = workersWaiting[0:underConcurrent] # Start all workers possible up to the concurrent limit for workerWaiting in workersWaiting: if jimi.logging.debugEnabled: jimi.logging.debug("Starting threaded worker, workerID={0}".format(workerWaiting.id)) workerWaiting.start() underConcurrent-=1 del workersStillWaiting[workersStillWaiting.index(workerWaiting)] else: self.backlog = False else: self.backlog = False # Execute worker cleanup every 5ish seconds if (tick + 5) < now: # Any workers need clearning up due to overrun or stopped? cleanupWorkers = [ x for x in self.workerList if (x.running == False and x.delete) or (x.startTime > 0 and x.maxDuration > 0 and (now - x.startTime ) > x.maxDuration) ] for worker in cleanupWorkers: if worker.running != False: worker.thread.kill() if not self.failures and worker.resultException != None and worker.endTime != 0: self.failures = True if self.cleanUp: # Making sure that only completed workers i.e. endTime!=0 are clearned if worker.resultException == None and worker.endTime != 0 or (( worker.endTime + 60 < now ) and worker.endTime != 0): self.workerList.remove(worker) tick = now # CPU saver loops+=1 if ((underConcurrent == 0) or (underConcurrent > 0 and len(workersStillWaiting) == 0)): loops = 0 time.sleep(workerSettings["loopT1"]) elif (loops > workerSettings["loopL"]): loops = 0 time.sleep(workerSettings["loopT"]) def new(self, name, call, args=None, delete=True, maxDuration=60, multiprocessing=False, raiseException=True): workerThread = self._worker(name, call, args, delete, maxDuration, multiprocessing, raiseException) self.workerList.append(workerThread) if jimi.logging.debugEnabled: jimi.logging.debug("Created new worker, workerID={0}".format(workerThread.id)) return workerThread.id def get(self, id): worker = [x for x in self.workerList if x.id == id] if worker: worker = worker[0] if jimi.logging.debugEnabled: jimi.logging.debug("Got data for worker, workerID={0}".format(id)) return worker def getAll(self): result = [] for worker in self.workerList: result.append(worker) return result def getActive(self): result = [] workersRunning = [x for x in self.workerList if x.running == True] for worker in workersRunning: result.append(worker) return result def getError(self, id): result = None worker = [x for x in self.workerList if x.id == id] if worker: worker = worker[0] result = worker.resultException worker.resultException = None return result def delete(self, id): worker = [x for x in self.workerList if x.id == id] if worker: worker = worker[0] if jimi.logging.debugEnabled: jimi.logging.debug("Deleted worker, workerID={0}".format(id)) del worker else: if jimi.logging.debugEnabled: jimi.logging.debug("Unable to locate worker, workerID={0}".format(id)) def kill(self, id): worker = [x for x in self.workerList if x.id == id] if worker: worker = worker[0] worker.thread.kill() if jimi.logging.debugEnabled: jimi.logging.debug("Killed worker, workerID={0}".format(id)) else: if jimi.logging.debugEnabled: jimi.logging.debug("Unable to locate worker, workerID={0}".format(id)) def wait(self, jid): worker = [x for x in self.workerList if x.id == jid][0] if jimi.logging.debugEnabled: jimi.logging.debug("Waiting for worker, workerID={0}".format(id)) if worker: while (worker.running != False ): time.sleep(0.1) def waitAll(self): while (self.queue() > 0 or len(self.active()) > 0): time.sleep(0.1) def activeCount(self): workersRunning = [x for x in self.workerList if x.id != self.workerID and x.running == True] return len(workersRunning) def failureCount(self): crashedWorkers = [x for x in self.workerList if x.id != self.workerID and x.crash == True] return len(crashedWorkers) def active(self): result = [] workersRunning = [x for x in self.workerList if x.id != self.workerID and x.running == True] for workerRunning in workersRunning: result.append(workerRunning.name) return result def count(self): return len(self.workerList) def countIncomplete(self): return len([x for x in self.workerList if x.id != self.workerID and (x.running == True or x.running == None) ]) def queue(self): workersWaiting = [x for x in self.workerList if x.running == None] return len(workersWaiting) def stop(self): self.stopped = True # Waiting 1 second for handler to finsh gracefuly otherwise force by systemExit time.sleep(1) for runningJob in self.getActive(): self.kill(runningJob.id) for job in self.getAll(): self.delete(job.id) # API Calls def api_get(self,id=None,action=None): result = { "results" : []} if not id and not action: workers = self.getAll() elif id and not action: workers = [self.get(id)] elif not id and action == "active": workers = self.getActive() for worker in workers: if worker: result["results"].append({ "id" : worker.id, "name": worker.name, "startTime" : worker.startTime, "createdTime" : worker.createdTime }) return result def api_delete(self,id=None): if not id: workers = self.getAll() else: workers = [self.get(id)] for worker in workers: worker.thread.kill() return { "result" : True } from system.models import trigger as systemTrigger workerSettings = jimi.settings.config["workers"] multiprocessing.set_start_method("spawn",force=True) def start(): global workers # Creating instance of workers try: if workers: workers.kill(workers.workerID) if logging.debugEnabled: logging.debug("Workers start requested, Existing thread kill attempted, workerID='{0}'".format(workers.workerID),6) workers = None except NameError: pass workers = workerHandler(workerSettings["concurrent"]) if jimi.logging.debugEnabled: jimi.logging.debug("Workers started, workerID='{0}'".format(workers.workerID),6) return True def multiprocessingThreadStart(Q,threadCall,args): #cache.globalCache.sync(globalCache) rc = 0 error = None try: threadCall(*args) except Exception as e: error = e rc = 1 Q.put((rc,error)) ######### --------- API --------- ######### if jimi.api.webServer: if not jimi.api.webServer.got_first_request: if jimi.api.webServer.name == "jimi_core": @jimi.api.webServer.route(jimi.api.base+"workers/", methods=["GET"]) @jimi.auth.adminEndpoint def getWorkers(): result = workers.api_get() if result["results"]: return result, 200 else: return {}, 404 @jimi.api.webServer.route(jimi.api.base+"workers/", methods=["DELETE"]) @jimi.auth.adminEndpoint def deleteWorkers(): result = workers.api_delete() if result["result"]: return result, 200 else: return {}, 404 @jimi.api.webServer.route(jimi.api.base+"workers/", methods=["POST"]) @jimi.auth.adminEndpoint def updateWorkers(): data = json.loads(jimi.api.request.data) if data["action"] == "start": result = start() return { "result" : result }, 200 elif data["action"] == "settings": if "concurrent" in data: workerSettings["concurrent"] = int(data["concurrent"]) workers.concurrent = workerSettings["concurrent"] if "loopT" in data: workerSettings["loopT"] = float(data["loopT"]) if "loopL" in data: workerSettings["loopL"] = float(data["loopL"]) return { }, 200 else: return { }, 404 @jimi.api.webServer.route(jimi.api.base+"workers/<workerID>/", methods=["GET"]) @jimi.auth.adminEndpoint def getWorker(workerID): if workerID == "0": result = workers.api_get(workers.workerID) result["results"][0]["lastHandle"] = workers.lastHandle result["results"][0]["workerID"] = workers.workerID else: result = workers.api_get(workerID) if result["results"]: return result, 200 else: return {}, 404 @jimi.api.webServer.route(jimi.api.base+"workers/<workerID>/", methods=["DELETE"]) @jimi.auth.adminEndpoint def deleteWorker(workerID): result = workers.api_delete(workerID) if result["result"]: return result, 200 else: return {}, 404 @jimi.api.webServer.route(jimi.api.base+"workers/stats/", methods=["GET"]) @jimi.auth.adminEndpoint def getWorkerStats(): result = {} result["results"] = [] result["results"].append({ "activeCount" : workers.activeCount(), "queueLength" : workers.queue(), "workers" : workers.active() }) return result, 200 @jimi.api.webServer.route(jimi.api.base+"workers/settings/", methods=["GET"]) @jimi.auth.adminEndpoint def getWorkerSettings(): result = {} result["results"] = [] result["results"].append(workerSettings) return result, 200
proxy.py
import socket import SocketServer import threading import select import sqlalchemy import time import errno from time import sleep import sys from sqlalchemy import create_engine, Table, Column, Index, Integer, String, ForeignKey from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, TEXT, TIMESTAMP, BIGINT from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from socket import error as SocketError from sqlalchemy.engine import reflection PORT_ = sys.argv[1] DESTINATION_ = sys.argv[2] DESTINATION_PORT_ = sys.argv[3] PROXY_ID_ = sys.argv[4] print "PORT_:" + PORT_ print "DESTINATION_:" + DESTINATION_ print "DESTINATION_PORT_:" + DESTINATION_PORT_ print "PROXY_ID_:" + PROXY_ID_ db_engine = create_engine( 'postgresql://postgres:postgres@localhost:5432/postgres') db_connection = db_engine.connect() meta = sqlalchemy.MetaData(bind=db_connection, reflect=True, schema="jltc") insp = reflection.Inspector.from_engine(db_engine) Session = sessionmaker(bind=db_engine) db_session = Session() if not db_engine.dialect.has_table(db_engine.connect(), "delay_table"): delay_table = Table( 'delay_table', meta, Column('value', DOUBLE_PRECISION), ) meta.create_all(db_connection) proxy = meta.tables['jltc.proxy'] def get_delay(proxy_id): statement = sqlalchemy.sql.select( [proxy.c.delay]).where(proxy.c.id == proxy_id) x = execute_statement(statement, False)[0][0] return float(x) def execute_statement(statement, with_header): #log.debug("Executing SQL-query: " + str(statement)) q = db_engine.execute(statement) output = [] fieldnames = [] for fieldname in q.keys(): fieldnames.append(fieldname) if with_header: output.append(fieldnames) for row in q.fetchall(): values = [] for fieldname in fieldnames: values.append(row[fieldname]) output.append(values) return output current_incomings = [] current_forwarders = [] BUFFER_SIZE = 4096 class Forwarder(threading.Thread): def __init__(self, source): threading.Thread.__init__(self) self._stop = threading.Event() self.source = source self.destination = \ socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.destination.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.destination.connect((DESTINATION_, int(DESTINATION_PORT_))) self.connection_string = str(self.destination.getpeername()) print "[+] New forwarder: " + self.connection_string #current_forwarders.append(self.connection_string) #print current_forwarders def run(self): try: while 1: r, _, _ = select.select( [self.destination], [], [], ) if r: data = self.destination.recv(BUFFER_SIZE) if len(data) == BUFFER_SIZE: print "[<] Trying to get data from destination" while 1: try: data += self.destination.recv( BUFFER_SIZE, socket.MSG_DONTWAIT) except: break if data == "": self.close_connection() break print "[<] Received from destination: " + str(len(data)) self.source.write_to_source(data) except SocketError as e: if e.errno != errno.ECONNRESET: raise pass #self.source.request.shutdown(socket.SHUT_RDWR) print "[-] Closed destination" def write_to_dest(self, data): print "[>] Sending to destination" _, w, _ = select.select( [], [self.destination], [], ) if w: self.destination.send(data) print "[>] Data was sent to destination: " + str(len(data)) def close_connection(self): try: self.source.request.shutdown(socket.SHUT_RDWR) except socket.error: pass #self.source.request.close() class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): def handle(self): delay = get_delay(PROXY_ID_) print "[**] Delay: " + str(delay) time.sleep(delay) self.connection_string = str(self.request.getpeername()) self.request.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) print "[+] Incoming connection:" + str(self.connection_string) #current_incomings.append(self.connection_string) #print current_incomings f = Forwarder(self) f.start() try: while 1: r, _, _ = select.select( [self.request], [], [], ) if r: print "[>] Trying to get data from incoming connection" data = self.request.recv(BUFFER_SIZE) if (len(data) == BUFFER_SIZE): while 1: try: #error means no more data data += self.request.recv(BUFFER_SIZE, socket.MSG_DONTWAIT) except: break f.write_to_dest(data) if data == "": #f.close_connection() break print "[>] Data from incoming connection: " + str( len(data)) print "[>] Data from incoming connection is not ready" except SocketError as e: if e.errno != errno.ECONNRESET: raise pass print "[-] Close incoming connection" def write_to_source(self, data): print "[<] Sending to incoming connect" _, w, _ = select.select( [], [self.request], [], ) if w: self.request.send(data) print "[<] Data was sent to incoming connect: " + str(len(data)) class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): pass if __name__ == "__main__": HOST, PORT = "", PORT_ server = ThreadedTCPServer((HOST, int(PORT)), ThreadedTCPRequestHandler) ip, port = server.server_address server_thread = threading.Thread(target=server.serve_forever) server_thread.daemon = True server_thread.start() print "[*] Starting proxy on port: ", port try: while True: sleep(1) except: pass print "[*] Stopping proxy..." server.shutdown()
keepkey.py
from binascii import hexlify, unhexlify import traceback import sys from electrum_axe.util import bfh, bh2u, UserCancelled from electrum_axe.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, TYPE_ADDRESS, TYPE_SCRIPT) from electrum_axe import constants from electrum_axe.i18n import _ from electrum_axe.plugin import BasePlugin from electrum_axe.transaction import deserialize, Transaction from electrum_axe.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey from electrum_axe.wallet import Standard_Wallet from electrum_axe.base_wizard import ScriptTypeNotSupported from ..hw_wallet import HW_PluginBase from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data # TREZOR initialization methods TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4) class KeepKey_KeyStore(Hardware_KeyStore): hw_type = 'keepkey' device = 'KeepKey' def get_derivation(self): return self.derivation def get_client(self, force_pair=True): return self.plugin.get_client(self, force_pair) def decrypt_message(self, sequence, message, password): raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device)) def sign_message(self, sequence, message, password): client = self.get_client() address_path = self.get_derivation() + "/%d/%d"%sequence address_n = client.expand_path(address_path) msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message) return msg_sig.signature def sign_transaction(self, tx, password): if tx.is_complete(): return # previous transactions used as inputs prev_tx = {} # path of the xpubs that are involved xpub_path = {} for txin in tx.inputs(): pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin) tx_hash = txin['prevout_hash'] if txin.get('prev_tx') is None: raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device)) prev_tx[tx_hash] = txin['prev_tx'] for x_pubkey in x_pubkeys: if not is_xpubkey(x_pubkey): continue xpub, s = parse_xpubkey(x_pubkey) if xpub == self.get_master_public_key(): xpub_path[xpub] = self.get_derivation() self.plugin.sign_transaction(self, tx, prev_tx, xpub_path) class KeepKeyPlugin(HW_PluginBase): # Derived classes provide: # # class-static variables: client_class, firmware_URL, handler_class, # libraries_available, libraries_URL, minimum_firmware, # wallet_class, ckd_public, types, HidTransport firmware_URL = 'https://www.keepkey.com' libraries_URL = 'https://github.com/keepkey/python-keepkey' minimum_firmware = (1, 0, 0) keystore_class = KeepKey_KeyStore SUPPORTED_XTYPES = ('standard', ) MAX_LABEL_LEN = 32 def __init__(self, parent, config, name): HW_PluginBase.__init__(self, parent, config, name) try: from . import client import keepkeylib import keepkeylib.ckd_public import keepkeylib.transport_hid self.client_class = client.KeepKeyClient self.ckd_public = keepkeylib.ckd_public self.types = keepkeylib.client.types self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS self.device_manager().register_devices(self.DEVICE_IDS) self.libraries_available = True except ImportError: self.libraries_available = False def hid_transport(self, pair): from keepkeylib.transport_hid import HidTransport return HidTransport(pair) def _try_hid(self, device): self.print_error("Trying to connect over USB...") if device.interface_number == 1: pair = [None, device.path] else: pair = [device.path, None] try: return self.hid_transport(pair) except BaseException as e: # see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114 # raise self.print_error("cannot connect at", device.path, str(e)) return None def create_client(self, device, handler): transport = self._try_hid(device) if not transport: self.print_error("cannot connect to device") return self.print_error("connected to device at", device.path) client = self.client_class(transport, handler, self) # Try a ping for device sanity try: client.ping('t') except BaseException as e: self.print_error("ping failed", str(e)) return None if not client.atleast_version(*self.minimum_firmware): msg = (_('Outdated {} firmware for device labelled {}. Please ' 'download the updated firmware from {}') .format(self.device, client.label(), self.firmware_URL)) self.print_error(msg) if handler: handler.show_error(msg) else: raise Exception(msg) return None return client def get_client(self, keystore, force_pair=True): devmgr = self.device_manager() handler = keystore.handler with devmgr.hid_lock: client = devmgr.client_for_keystore(self, handler, keystore, force_pair) # returns the client for a given keystore. can use xpub if client: client.used() return client def get_coin_name(self): return "AxeTestnet" if constants.net.TESTNET else "Axe" def initialize_device(self, device_id, wizard, handler): # Initialization method msg = _("Choose how you want to initialize your {}.\n\n" "The first two methods are secure as no secret information " "is entered into your computer.\n\n" "For the last two methods you input secrets on your keyboard " "and upload them to your {}, and so you should " "only do those on a computer you know to be trustworthy " "and free of malware." ).format(self.device, self.device) choices = [ # Must be short as QT doesn't word-wrap radio button text (TIM_NEW, _("Let the device generate a completely new seed randomly")), (TIM_RECOVER, _("Recover from a seed you have previously written down")), (TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")), (TIM_PRIVKEY, _("Upload a master private key")) ] def f(method): import threading settings = self.request_trezor_init_settings(wizard, method, self.device) t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler)) t.setDaemon(True) t.start() exit_code = wizard.loop.exec_() if exit_code != 0: # this method (initialize_device) was called with the expectation # of leaving the device in an initialized state when finishing. # signal that this is not the case: raise UserCancelled() wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f) def _initialize_device_safe(self, settings, method, device_id, wizard, handler): exit_code = 0 try: self._initialize_device(settings, method, device_id, wizard, handler) except UserCancelled: exit_code = 1 except BaseException as e: traceback.print_exc(file=sys.stderr) handler.show_error(str(e)) exit_code = 1 finally: wizard.loop.exit(exit_code) def _initialize_device(self, settings, method, device_id, wizard, handler): item, label, pin_protection, passphrase_protection = settings language = 'english' devmgr = self.device_manager() client = devmgr.client_by_id(device_id) if method == TIM_NEW: strength = 64 * (item + 2) # 128, 192 or 256 client.reset_device(True, strength, passphrase_protection, pin_protection, label, language) elif method == TIM_RECOVER: word_count = 6 * (item + 2) # 12, 18 or 24 client.step = 0 client.recovery_device(word_count, passphrase_protection, pin_protection, label, language) elif method == TIM_MNEMONIC: pin = pin_protection # It's the pin, not a boolean client.load_device_by_mnemonic(str(item), pin, passphrase_protection, label, language) else: pin = pin_protection # It's the pin, not a boolean client.load_device_by_xprv(item, pin, passphrase_protection, label, language) def setup_device(self, device_info, wizard, purpose): devmgr = self.device_manager() device_id = device_info.device.id_ client = devmgr.client_by_id(device_id) if client is None: raise Exception(_('Failed to create a client for this device.') + '\n' + _('Make sure it is in the correct state.')) # fixme: we should use: client.handler = wizard client.handler = self.create_handler(wizard) if not device_info.initialized: self.initialize_device(device_id, wizard, client.handler) client.get_xpub('m', 'standard') client.used() def get_xpub(self, device_id, derivation, xtype, wizard): if xtype not in self.SUPPORTED_XTYPES: raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device)) devmgr = self.device_manager() client = devmgr.client_by_id(device_id) client.handler = wizard xpub = client.get_xpub(derivation, xtype) client.used() return xpub def sign_transaction(self, keystore, tx, prev_tx, xpub_path): self.prev_tx = prev_tx self.xpub_path = xpub_path client = self.get_client(keystore) inputs = self.tx_inputs(tx, True) outputs = self.tx_outputs(keystore.get_derivation(), tx) signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0] signatures = [(bh2u(x) + '01') for x in signatures] tx.update_signatures(signatures) def show_address(self, wallet, address, keystore=None): if keystore is None: keystore = wallet.get_keystore() if not self.show_address_helper(wallet, address, keystore): return if type(wallet) is not Standard_Wallet: keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device)) return client = self.get_client(wallet.keystore) if not client.atleast_version(1, 3): wallet.keystore.handler.show_error(_("Your device firmware is too old")) return change, index = wallet.get_address_index(address) derivation = wallet.keystore.derivation address_path = "%s/%d/%d"%(derivation, change, index) address_n = client.expand_path(address_path) script_type = self.types.SPENDADDRESS client.get_address(self.get_coin_name(), address_n, True, script_type=script_type) def tx_inputs(self, tx, for_sig=False): inputs = [] for txin in tx.inputs(): txinputtype = self.types.TxInputType() if txin['type'] == 'coinbase': prev_hash = b"\x00"*32 prev_index = 0xffffffff # signed int -1 else: if for_sig: x_pubkeys = txin['x_pubkeys'] if len(x_pubkeys) == 1: x_pubkey = x_pubkeys[0] xpub, s = parse_xpubkey(x_pubkey) xpub_n = self.client_class.expand_path(self.xpub_path[xpub]) txinputtype.address_n.extend(xpub_n + s) txinputtype.script_type = self.types.SPENDADDRESS else: def f(x_pubkey): if is_xpubkey(x_pubkey): xpub, s = parse_xpubkey(x_pubkey) else: xpub = xpub_from_pubkey(0, bfh(x_pubkey)) s = [] node = self.ckd_public.deserialize(xpub) return self.types.HDNodePathType(node=node, address_n=s) pubkeys = map(f, x_pubkeys) multisig = self.types.MultisigRedeemScriptType( pubkeys=pubkeys, signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')), m=txin.get('num_sig'), ) script_type = self.types.SPENDMULTISIG txinputtype = self.types.TxInputType( script_type=script_type, multisig=multisig ) # find which key is mine for x_pubkey in x_pubkeys: if is_xpubkey(x_pubkey): xpub, s = parse_xpubkey(x_pubkey) if xpub in self.xpub_path: xpub_n = self.client_class.expand_path(self.xpub_path[xpub]) txinputtype.address_n.extend(xpub_n + s) break prev_hash = unhexlify(txin['prevout_hash']) prev_index = txin['prevout_n'] if 'value' in txin: txinputtype.amount = txin['value'] txinputtype.prev_hash = prev_hash txinputtype.prev_index = prev_index if txin.get('scriptSig') is not None: script_sig = bfh(txin['scriptSig']) txinputtype.script_sig = script_sig txinputtype.sequence = txin.get('sequence', 0xffffffff - 1) inputs.append(txinputtype) return inputs def tx_outputs(self, derivation, tx): def create_output_by_derivation(): if len(xpubs) == 1: script_type = self.types.PAYTOADDRESS address_n = self.client_class.expand_path(derivation + "/%d/%d" % index) txoutputtype = self.types.TxOutputType( amount=amount, script_type=script_type, address_n=address_n, ) else: script_type = self.types.PAYTOMULTISIG address_n = self.client_class.expand_path("/%d/%d" % index) nodes = map(self.ckd_public.deserialize, xpubs) pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes] multisig = self.types.MultisigRedeemScriptType( pubkeys=pubkeys, signatures=[b''] * len(pubkeys), m=m) txoutputtype = self.types.TxOutputType( multisig=multisig, amount=amount, address_n=self.client_class.expand_path(derivation + "/%d/%d" % index), script_type=script_type) return txoutputtype def create_output_by_address(): txoutputtype = self.types.TxOutputType() txoutputtype.amount = amount if _type == TYPE_SCRIPT: txoutputtype.script_type = self.types.PAYTOOPRETURN txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o) elif _type == TYPE_ADDRESS: addrtype, hash_160 = b58_address_to_hash160(address) if addrtype == constants.net.ADDRTYPE_P2PKH: txoutputtype.script_type = self.types.PAYTOADDRESS elif addrtype == constants.net.ADDRTYPE_P2SH: txoutputtype.script_type = self.types.PAYTOSCRIPTHASH else: raise Exception('addrtype: ' + str(addrtype)) txoutputtype.address = address return txoutputtype outputs = [] has_change = False any_output_on_change_branch = is_any_tx_output_on_change_branch(tx) for o in tx.outputs(): _type, address, amount = o.type, o.address, o.value use_create_by_derivation = False info = tx.output_info.get(address) if info is not None and not has_change: index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig on_change_branch = index[0] == 1 # prioritise hiding outputs on the 'change' branch from user # because no more than one change address allowed if on_change_branch == any_output_on_change_branch: use_create_by_derivation = True has_change = True if use_create_by_derivation: txoutputtype = create_output_by_derivation() else: txoutputtype = create_output_by_address() outputs.append(txoutputtype) return outputs def electrum_tx_to_txtype(self, tx): t = self.types.TransactionType() d = deserialize(tx.raw) t.version = d['version'] t.lock_time = d['lockTime'] inputs = self.tx_inputs(tx) t.inputs.extend(inputs) for vout in d['outputs']: o = t.bin_outputs.add() o.amount = vout['value'] o.script_pubkey = bfh(vout['scriptPubKey']) return t # This function is called from the TREZOR libraries (via tx_api) def get_tx(self, tx_hash): tx = self.prev_tx[tx_hash] return self.electrum_tx_to_txtype(tx)
utility.py
import os import math import time import datetime from multiprocessing import Process from multiprocessing import Queue import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import imageio import torch import torch.optim as optim import torch.optim.lr_scheduler as lrs class timer(): def __init__(self): self.acc = 0 self.tic() def tic(self): self.t0 = time.time() def toc(self, restart=False): diff = time.time() - self.t0 if restart: self.t0 = time.time() return diff def hold(self): self.acc += self.toc() def release(self): ret = self.acc self.acc = 0 return ret def reset(self): self.acc = 0 class checkpoint(): def __init__(self, args): self.args = args self.ok = True self.log = torch.Tensor() now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S') if not args.load: if not args.save: args.save = now self.dir = os.path.join('experiment', args.save) else: self.dir = os.path.join('experiment', args.load) if os.path.exists(self.dir): self.log = torch.load(self.get_path('psnr_log.pt')) print('Continue from epoch {}...'.format(len(self.log))) else: args.load = '' if args.reset: os.system('rm -rf ' + self.dir) args.load = '' # os.makedirs(self.dir, exist_ok=True) # os.makedirs(self.get_path('model'), exist_ok=True) # for d in args.data_test: # os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True) # open_type = 'a' if os.path.exists(self.get_path('log.txt')) else 'w' # self.log_file = open(self.get_path('log.txt'), open_type) #with open(self.get_path('config.txt'), open_type) as f: # f.write(now + '\n\n') # for arg in vars(args): # f.write('{}: {}\n'.format(arg, getattr(args, arg))) # f.write('\n') self.n_processes = 8 def get_path(self, *subdir): return os.path.join(self.dir, *subdir) def save(self, trainer, epoch, is_best=False): trainer.model.save(self.get_path('model'), epoch, is_best=is_best) trainer.loss.save(self.dir) trainer.loss.plot_loss(self.dir, epoch) self.plot_psnr(epoch) trainer.optimizer.save(self.dir) torch.save(self.log, self.get_path('psnr_log.pt')) def add_log(self, log): self.log = torch.cat([self.log, log]) # def write_log(self, log, refresh=False): # print(log) # # self.log_file.write(log + '\n') # if refresh: # self.log_file.close() # self.log_file = open(self.get_path('log.txt'), 'a') def done(self): print() # self.log_file.close() def plot_psnr(self, epoch): axis = np.linspace(1, epoch, epoch) for idx_data, d in enumerate(self.args.data_test): label = 'SR on {}'.format(d) fig = plt.figure() plt.title(label) for idx_scale, scale in enumerate(self.args.scale): plt.plot( axis, self.log[:, idx_data, idx_scale].numpy(), label='Scale {}'.format(scale) ) plt.legend() plt.xlabel('Epochs') plt.ylabel('PSNR') plt.grid(True) plt.savefig(self.get_path('test_{}.pdf'.format(d))) plt.close(fig) def begin_background(self): self.queue = Queue() def bg_target(queue): while True: if not queue.empty(): filename, tensor = queue.get() if filename is None: break imageio.imwrite(filename, tensor.numpy()) self.process = [ Process(target=bg_target, args=(self.queue,)) \ for _ in range(self.n_processes) ] for p in self.process: p.start() def end_background(self): for _ in range(self.n_processes): self.queue.put((None, None)) while not self.queue.empty(): time.sleep(1) for p in self.process: p.join() def save_results(self, dataset, filename, save_list): if self.args.save_results: ''' filename = self.get_path( 'results-{}'.format(dataset.dataset.name), '{}'.format(filename) ) ''' save_dir = self.args.save_dir # if not os.path.exists(save_dir): # os.mkdir(save_dir) filename = self.get_path(save_dir, '{}'.format(filename)) postfix = ('SR', 'LR', 'HR') for v, p in zip(save_list, postfix): normalized = v[0].mul(255 / self.args.rgb_range) tensor_cpu = normalized.byte().permute(1, 2, 0).cpu() self.queue.put(('{}.png'.format(filename), tensor_cpu)) def quantize(img, rgb_range): pixel_range = 255 / rgb_range return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range) def calc_psnr(sr, hr, scale, rgb_range, dataset=None): if hr.nelement() == 1: return 0 diff = (sr - hr) / rgb_range gray_coeffs = [65.738, 129.057, 25.064] convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256 diff = diff.mul(convert).sum(dim=1) mse = diff.pow(2).mean() return -10 * math.log10(mse) def make_optimizer(args, target): ''' make optimizer and scheduler together ''' # optimizer trainable = filter(lambda x: x.requires_grad, target.parameters()) kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay} if args.optimizer == 'SGD': optimizer_class = optim.SGD kwargs_optimizer['momentum'] = args.momentum elif args.optimizer == 'ADAM': optimizer_class = optim.Adam kwargs_optimizer['betas'] = args.betas kwargs_optimizer['eps'] = args.epsilon elif args.optimizer == 'RMSprop': optimizer_class = optim.RMSprop kwargs_optimizer['eps'] = args.epsilon # scheduler milestones = list(map(lambda x: int(x), args.decay.split('-'))) kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma} scheduler_class = lrs.MultiStepLR class CustomOptimizer(optimizer_class): def __init__(self, *args, **kwargs): super(CustomOptimizer, self).__init__(*args, **kwargs) def _register_scheduler(self, scheduler_class, **kwargs): self.scheduler = scheduler_class(self, **kwargs) def save(self, save_dir): torch.save(self.state_dict(), self.get_dir(save_dir)) def load(self, load_dir, epoch=1): self.load_state_dict(torch.load(self.get_dir(load_dir))) if epoch > 1: for _ in range(epoch): self.scheduler.step() def get_dir(self, dir_path): return os.path.join(dir_path, 'optimizer.pt') def schedule(self): self.scheduler.step() def get_lr(self): return self.scheduler.get_lr()[0] def get_last_epoch(self): return self.scheduler.last_epoch optimizer = CustomOptimizer(trainable, **kwargs_optimizer) optimizer._register_scheduler(scheduler_class, **kwargs_scheduler) return optimizer
multiprocess.py
import concurrent import math import threading from concurrent.futures import ThreadPoolExecutor from multiprocessing import Pool, cpu_count, Process import src.repository.global_params as global_params from src.library.logger import logger from src.library.shell import run_system_command_with_res class MultiProcess(object): def __init__(self, work_num: int = 0): if not work_num: work_num = cpu_count() self.work_num = work_num self.pool = Pool(self.work_num) self.params = [] self.func = None self.res = None def add_params(self, params): self.params = params def add_func(self, func): self.func = func def deal(self): logger.info("generate {} worker pool for {}".format(self.work_num, self.func)) self.res = self.pool.starmap_async(self.func, self.params) def wait(self): logger.info("wait process finish") if self.res: self.res.get() if self.pool: self.pool.close() def multiprocess_deal(func, deal_list, work_num: int = 0): if not work_num: work_num = cpu_count() work_num = min(work_num, len(deal_list), 80) logger.info("generate {} worker pool for {}".format(work_num, func)) pool = Pool(work_num) res = pool.starmap(func, deal_list) pool.close() return res def multiprocess_run(func, deal_list, work_num: int = 0): if not work_num: work_num = cpu_count() work_num = min(work_num, 80) logger.info("generate {} worker pool for {}".format(work_num, func)) pool = Pool(work_num) res = pool.map(func, deal_list) pool.close() return res def chunk(data_list: list, chunk_num): item_num = len(data_list) if item_num <= chunk_num: return [data_list] step = int(math.ceil(item_num / chunk_num)) res = [] if step <= 0: return res for i in range(0, item_num, step): res.append(data_list[i:i + step]) return res def multiprocess_exe(func, deal_list, work_num: int = 0): if not work_num: work_num = cpu_count() process_list = [] deal_list = chunk(deal_list, work_num) logger.info("generate {} worker pool for {}".format(work_num, func)) for i in range(work_num): process_list.append(Process(target=func, args=(deal_list[i],))) for process in process_list: process.start() for process in process_list: process.join() def get_process_num() -> int: process_num = global_params.get("process_num", cpu_count()) process_num = int(process_num) return min(process_num, cpu_count()) def get_gpu_num() -> int: gpu_config_num = len(global_params.get("gpu_config", "0 1 2 3 4 5 6 7").split(" ")) gpu_num = gpu_config_num try: _, online_num = run_system_command_with_res("nvidia-smi -L |wc -l") gpu_num = int(online_num) except Exception as ex: logger.error('get nvidia-smi num error: {}'.format(ex)) return min(gpu_config_num, gpu_num) def multithread_run(func, deal_list, work_num: int = 0, max_execute_time=10): if not work_num: work_num = cpu_count() work_num = min(work_num, 200) logger.info("generate {} thread worker pool for {}".format(work_num, func)) res = [] with concurrent.futures.ThreadPoolExecutor(max_workers=work_num) as executor: thread_tasks = {executor.submit(func, *params): params for params in deal_list} for task in concurrent.futures.as_completed(thread_tasks): try: data = task.result(timeout=max_execute_time) res.append(data) except Exception as exc: logger.error('generated an exception: {}'.format(exc)) return res class Thread(threading.Thread): def __init__(self, target, *args): super().__init__() self._target = target self._args = args self._result = None def run(self): self._result = self._target(*self._args) def get_result(self): return self._result
server.py
import socket import utils import threading TCP_IP, TCP_PORT = utils.request_socket_info() BUFFER_SIZE = 1024 MAX_CLIENT_COUNT = 5 SHOW_CODES = False print("Generating an RSA Scheme...") my_rsa = utils.RSA() public_key = my_rsa.public_key() print("IP: ", TCP_IP) print("Port: ", TCP_PORT) print("Public key: ", str(public_key)) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT)) s.listen(5) clients = {} def add_new_client(client, address): clients[address] = client print(f"Established connection to {address}.") client.send(bytes("Connected to server. Public key: " + str(public_key), "utf-8")) carry_over = "" while True: chunk = client.recv(BUFFER_SIZE) if not chunk: print(f"Connection to {address} lost.") break chunk = str(chunk, 'utf-8') chunk = carry_over + chunk message, carry_over = utils.separate_messages(chunk) if SHOW_CODES: print(message) if len(message) == 0: print("") continue print(f"{address}: {my_rsa.decrypt(int(message))}") client.shutdown(socket.SHUT_RDWR) client.close() del clients[address] def accept_incoming_clients(): print("Waiting for new clients...") client_count = 0 while client_count <= MAX_CLIENT_COUNT: client, address = s.accept() client_thread = threading.Thread(target=add_new_client, args=(client, address), daemon=True) client_thread.start() client_count += 1 accepting_thread = threading.Thread(target=accept_incoming_clients, daemon=True) accepting_thread.start() print("Enter '/close' to close the server.") print("Enter '/toggleShow' to toggle showing your encrypted messages.") while True: command = input() if command == "/close": break elif command == "/toggleShow": SHOW_CODES = not SHOW_CODES else: print(f"Unknown command, {command}. Please try again.") print("Closing server...") for client in clients.values(): client.shutdown(socket.SHUT_RDWR) client.close() s.close()
linkcheck.py
""" sphinx.builders.linkcheck ~~~~~~~~~~~~~~~~~~~~~~~~~ The CheckExternalLinksBuilder class. :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import json import queue import re import socket import threading from html.parser import HTMLParser from os import path from typing import Any, Dict, List, Set, Tuple from urllib.parse import unquote, urlparse from docutils import nodes from docutils.nodes import Node from requests.exceptions import HTTPError from sphinx.application import Sphinx from sphinx.builders import Builder from sphinx.locale import __ from sphinx.util import encode_uri, requests, logging from sphinx.util.console import ( # type: ignore purple, red, darkgreen, darkgray, turquoise ) from sphinx.util.nodes import get_node_line from sphinx.util.requests import is_ssl_error logger = logging.getLogger(__name__) uri_re = re.compile('[a-z]+://') DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml;q=0.9,*/*;q=0.8', } class AnchorCheckParser(HTMLParser): """Specialized HTML parser that looks for a specific anchor.""" def __init__(self, search_anchor: str) -> None: super().__init__() self.search_anchor = search_anchor self.found = False def handle_starttag(self, tag: Any, attrs: Any) -> None: for key, value in attrs: if key in ('id', 'name') and value == self.search_anchor: self.found = True break def check_anchor(response: requests.requests.Response, anchor: str) -> bool: """Reads HTML data from a response object `response` searching for `anchor`. Returns True if anchor was found, False otherwise. """ parser = AnchorCheckParser(anchor) # Read file in chunks. If we find a matching anchor, we break # the loop early in hopes not to have to download the whole thing. for chunk in response.iter_content(chunk_size=4096, decode_unicode=True): if isinstance(chunk, bytes): # requests failed to decode chunk = chunk.decode() # manually try to decode it parser.feed(chunk) if parser.found: break parser.close() return parser.found class CheckExternalLinksBuilder(Builder): """ Checks for broken external links. """ name = 'linkcheck' epilog = __('Look for any errors in the above output or in ' '%(outdir)s/output.txt') def init(self) -> None: self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore] self.anchors_ignore = [re.compile(x) for x in self.app.config.linkcheck_anchors_ignore] self.auth = [(re.compile(pattern), auth_info) for pattern, auth_info in self.app.config.linkcheck_auth] self.good = set() # type: Set[str] self.broken = {} # type: Dict[str, str] self.redirected = {} # type: Dict[str, Tuple[str, int]] # set a timeout for non-responding servers socket.setdefaulttimeout(5.0) # create output file open(path.join(self.outdir, 'output.txt'), 'w').close() # create JSON output file open(path.join(self.outdir, 'output.json'), 'w').close() # create queues and worker threads self.wqueue = queue.Queue() # type: queue.Queue self.rqueue = queue.Queue() # type: queue.Queue self.workers = [] # type: List[threading.Thread] for i in range(self.app.config.linkcheck_workers): thread = threading.Thread(target=self.check_thread) thread.setDaemon(True) thread.start() self.workers.append(thread) def check_thread(self) -> None: kwargs = { 'allow_redirects': True, } # type: Dict if self.app.config.linkcheck_timeout: kwargs['timeout'] = self.app.config.linkcheck_timeout def get_request_headers() -> Dict: url = urlparse(uri) candidates = ["%s://%s" % (url.scheme, url.netloc), "%s://%s/" % (url.scheme, url.netloc), uri, "*"] for u in candidates: if u in self.config.linkcheck_request_headers: headers = dict(DEFAULT_REQUEST_HEADERS) headers.update(self.config.linkcheck_request_headers[u]) return headers return {} def check_uri() -> Tuple[str, str, int]: # split off anchor if '#' in uri: req_url, anchor = uri.split('#', 1) for rex in self.anchors_ignore: if rex.match(anchor): anchor = None break else: req_url = uri anchor = None # handle non-ASCII URIs try: req_url.encode('ascii') except UnicodeError: req_url = encode_uri(req_url) # Get auth info, if any for pattern, auth_info in self.auth: if pattern.match(uri): break else: auth_info = None # update request headers for the URL kwargs['headers'] = get_request_headers() try: if anchor and self.app.config.linkcheck_anchors: # Read the whole document and see if #anchor exists response = requests.get(req_url, stream=True, config=self.app.config, auth=auth_info, **kwargs) found = check_anchor(response, unquote(anchor)) if not found: raise Exception(__("Anchor '%s' not found") % anchor) else: try: # try a HEAD request first, which should be easier on # the server and the network response = requests.head(req_url, config=self.app.config, auth=auth_info, **kwargs) response.raise_for_status() except HTTPError: # retry with GET request if that fails, some servers # don't like HEAD requests. response = requests.get(req_url, stream=True, config=self.app.config, auth=auth_info, **kwargs) response.raise_for_status() except HTTPError as err: if err.response.status_code == 401: # We'll take "Unauthorized" as working. return 'working', ' - unauthorized', 0 elif err.response.status_code == 503: # We'll take "Service Unavailable" as ignored. return 'ignored', str(err), 0 else: return 'broken', str(err), 0 except Exception as err: if is_ssl_error(err): return 'ignored', str(err), 0 else: return 'broken', str(err), 0 if response.url.rstrip('/') == req_url.rstrip('/'): return 'working', '', 0 else: new_url = response.url if anchor: new_url += '#' + anchor # history contains any redirects, get last if response.history: code = response.history[-1].status_code return 'redirected', new_url, code else: return 'redirected', new_url, 0 def check() -> Tuple[str, str, int]: # check for various conditions without bothering the network if len(uri) == 0 or uri.startswith(('#', 'mailto:')): return 'unchecked', '', 0 elif not uri.startswith(('http:', 'https:')): if uri_re.match(uri): # non supported URI schemes (ex. ftp) return 'unchecked', '', 0 else: if path.exists(path.join(self.srcdir, uri)): return 'working', '', 0 else: for rex in self.to_ignore: if rex.match(uri): return 'ignored', '', 0 else: return 'broken', '', 0 elif uri in self.good: return 'working', 'old', 0 elif uri in self.broken: return 'broken', self.broken[uri], 0 elif uri in self.redirected: return 'redirected', self.redirected[uri][0], self.redirected[uri][1] for rex in self.to_ignore: if rex.match(uri): return 'ignored', '', 0 # need to actually check the URI for _ in range(self.app.config.linkcheck_retries): status, info, code = check_uri() if status != "broken": break if status == "working": self.good.add(uri) elif status == "broken": self.broken[uri] = info elif status == "redirected": self.redirected[uri] = (info, code) return (status, info, code) while True: uri, docname, lineno = self.wqueue.get() if uri is None: break status, info, code = check() self.rqueue.put((uri, docname, lineno, status, info, code)) def process_result(self, result: Tuple[str, str, int, str, str, int]) -> None: uri, docname, lineno, status, info, code = result filename = self.env.doc2path(docname, None) linkstat = dict(filename=filename, lineno=lineno, status=status, code=code, uri=uri, info=info) if status == 'unchecked': self.write_linkstat(linkstat) return if status == 'working' and info == 'old': self.write_linkstat(linkstat) return if lineno: logger.info('(line %4d) ', lineno, nonl=True) if status == 'ignored': if info: logger.info(darkgray('-ignored- ') + uri + ': ' + info) else: logger.info(darkgray('-ignored- ') + uri) self.write_linkstat(linkstat) elif status == 'local': logger.info(darkgray('-local- ') + uri) self.write_entry('local', docname, filename, lineno, uri) self.write_linkstat(linkstat) elif status == 'working': logger.info(darkgreen('ok ') + uri + info) self.write_linkstat(linkstat) elif status == 'broken': if self.app.quiet or self.app.warningiserror: logger.warning(__('broken link: %s (%s)'), uri, info, location=(filename, lineno)) else: logger.info(red('broken ') + uri + red(' - ' + info)) self.write_entry('broken', docname, filename, lineno, uri + ': ' + info) self.write_linkstat(linkstat) elif status == 'redirected': try: text, color = { 301: ('permanently', purple), 302: ('with Found', purple), 303: ('with See Other', purple), 307: ('temporarily', turquoise), 308: ('permanently', purple), }[code] except KeyError: text, color = ('with unknown code', purple) linkstat['text'] = text logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info)) self.write_entry('redirected ' + text, docname, filename, lineno, uri + ' to ' + info) self.write_linkstat(linkstat) def get_target_uri(self, docname: str, typ: str = None) -> str: return '' def get_outdated_docs(self) -> Set[str]: return self.env.found_docs def prepare_writing(self, docnames: Set[str]) -> None: return def write_doc(self, docname: str, doctree: Node) -> None: logger.info('') n = 0 # reference nodes for refnode in doctree.traverse(nodes.reference): if 'refuri' not in refnode: continue uri = refnode['refuri'] lineno = get_node_line(refnode) self.wqueue.put((uri, docname, lineno), False) n += 1 # image nodes for imgnode in doctree.traverse(nodes.image): uri = imgnode['candidates'].get('?') if uri and '://' in uri: lineno = get_node_line(imgnode) self.wqueue.put((uri, docname, lineno), False) n += 1 done = 0 while done < n: self.process_result(self.rqueue.get()) done += 1 if self.broken: self.app.statuscode = 1 def write_entry(self, what: str, docname: str, filename: str, line: int, uri: str) -> None: with open(path.join(self.outdir, 'output.txt'), 'a') as output: output.write("%s:%s: [%s] %s\n" % (filename, line, what, uri)) def write_linkstat(self, data: dict) -> None: with open(path.join(self.outdir, 'output.json'), 'a') as output: output.write(json.dumps(data)) output.write('\n') def finish(self) -> None: for worker in self.workers: self.wqueue.put((None, None, None), False) def setup(app: Sphinx) -> Dict[str, Any]: app.add_builder(CheckExternalLinksBuilder) app.add_config_value('linkcheck_ignore', [], None) app.add_config_value('linkcheck_auth', [], None) app.add_config_value('linkcheck_request_headers', {}, None) app.add_config_value('linkcheck_retries', 1, None) app.add_config_value('linkcheck_timeout', None, None, [int]) app.add_config_value('linkcheck_workers', 5, None) app.add_config_value('linkcheck_anchors', True, None) # Anchors starting with ! are ignored since they are # commonly used for dynamic pages app.add_config_value('linkcheck_anchors_ignore', ["^!"], None) return { 'version': 'builtin', 'parallel_read_safe': True, 'parallel_write_safe': True, }
base_service.py
# -*- coding: utf-8 -*- """Custom Service implementation from MazeRunner.""" from __future__ import unicode_literals, absolute_import import os import sys import time import logging from threading import Thread from multiprocessing import Process import six import docker from attr import attrs, attrib from six.moves.queue import Queue, Full, Empty from honeycomb.decoymanager.models import Alert from honeycomb.servicemanager.defs import SERVICE_ALERT_QUEUE_SIZE from honeycomb.servicemanager.error_messages import INVALID_ALERT_TYPE from honeycomb.integrationmanager.tasks import send_alert_to_subscribed_integrations @attrs class ServerCustomService(Process): """Custom Service Class. This class provides a basic wrapper for honeycomb (and mazerunner) services. """ alerts_queue = None thread_server = None logger = logging.getLogger(__name__) """Logger to be used by plugins and collected by main logger.""" alert_types = attrib(type=list) """List of alert types, parsed from config.json""" service_args = attrib(type=dict, default={}) """Validated dictionary of service arguments (see: :func:`honeycomb.utils.plugin_utils.parse_plugin_args`)""" logger.setLevel(logging.DEBUG) def signal_ready(self): """Signal the service manager this service is ready for incoming connections.""" self.logger.debug("service is ready") def on_server_start(self): """Service run loop function. The service manager will call this function in a new thread. .. note:: Must call :func:`signal_ready` after finishing configuration """ raise NotImplementedError def on_server_shutdown(self): """Shutdown function of the server. Override this and take care to gracefully shut down your service (e.g., close files) """ raise NotImplementedError def run_service(self): """Run the service and start an alert processing queue. .. seealso:: Use :func:`on_server_start` and :func:`on_server_shutdown` for starting and shutting down your service """ self.alerts_queue = Queue(maxsize=SERVICE_ALERT_QUEUE_SIZE) self.thread_server = Thread(target=self._on_server_start) # self.thread_server.daemon = True self.thread_server.start() try: while self.thread_server.is_alive(): try: new_alert = self.alerts_queue.get(timeout=1) self.emit(**new_alert) except Empty: continue except KeyboardInterrupt: self.logger.debug("Caught KeyboardInterrupt, shutting service down gracefully") raise except Exception as exc: self.logger.exception(exc) finally: self._on_server_shutdown() def run(self): """Daemon entry point.""" self.run_service() def emit(self, **kwargs): """Send alerts to logfile. :param kwargs: Fields to pass to :py:class:`honeycomb.decoymanager.models.Alert` """ try: alert_type = next(_ for _ in self.alert_types if _.name == kwargs["event_type"]) except StopIteration: self.logger.error(INVALID_ALERT_TYPE, kwargs["event_type"]) return self.logger.critical(kwargs) alert = Alert(alert_type) for key, value in six.iteritems(kwargs): setattr(alert, key, value) send_alert_to_subscribed_integrations(alert) def add_alert_to_queue(self, alert_dict): """Log alert and send to integrations.""" try: self.alerts_queue.put(alert_dict, block=False) except Full: self.logger.warning("Queue (size=%d) is full and can't process messages", SERVICE_ALERT_QUEUE_SIZE) except Exception as exc: self.logger.exception(exc) def _on_server_start(self): try: self.on_server_start() except Exception as exc: self.logger.exception(exc) def _on_server_shutdown(self, signum=None, frame=None): if signum: sys.stderr.write("Terminating on signal {}".format(signum)) self.logger.debug("Terminating on signal %s", signum) self.on_server_shutdown() raise SystemExit() class DockerService(ServerCustomService): """Provides an ability to run a Docker container that will be monitored for events.""" def __init__(self, *args, **kwargs): super(DockerService, self).__init__(*args, **kwargs) self._container = None self._docker_client = docker.from_env() @property def docker_params(self): """Return a dictionary of docker run parameters. .. seealso:: Docker run: https://docs.docker.com/engine/reference/run/ :return: Dictionary, e.g., :code:`dict(ports={80: 80})` """ return {} @property def docker_image_name(self): """Return docker image name.""" raise NotImplementedError def parse_line(self, line): """Parse line and return dictionary if its an alert, else None / {}.""" raise NotImplementedError def get_lines(self): """Fetch log lines from the docker service. :return: A blocking logs generator """ return self._container.logs(stream=True) def read_lines(self, file_path, empty_lines=False, signal_ready=True): """Fetch lines from file. In case the file handler changes (logrotate), reopen the file. :param file_path: Path to file :param empty_lines: Return empty lines :param signal_ready: Report signal ready on start """ file_handler, file_id = self._get_file(file_path) file_handler.seek(0, os.SEEK_END) if signal_ready: self.signal_ready() while self.thread_server.is_alive(): line = six.text_type(file_handler.readline(), "utf-8") if line: yield line continue elif empty_lines: yield line time.sleep(0.1) if file_id != self._get_file_id(os.stat(file_path)) and os.path.isfile(file_path): file_handler, file_id = self._get_file(file_path) @staticmethod def _get_file_id(file_stat): if os.name == "posix": # st_dev: Device inode resides on. # st_ino: Inode number. return "%xg%x" % (file_stat.st_dev, file_stat.st_ino) return "%f" % file_stat.st_ctime def _get_file(self, file_path): file_handler = open(file_path, "rb") file_id = self._get_file_id(os.fstat(file_handler.fileno())) return file_handler, file_id def on_server_start(self): """Service run loop function. Run the desired docker container with parameters and start parsing the monitored file for alerts. """ self._container = self._docker_client.containers.run(self.docker_image_name, detach=True, **self.docker_params) self.signal_ready() for log_line in self.get_lines(): try: alert_dict = self.parse_line(log_line) if alert_dict: self.add_alert_to_queue(alert_dict) except Exception: self.logger.exception(None) def on_server_shutdown(self): """Stop the container before shutting down.""" if not self._container: return self._container.stop() self._container.remove(v=True, force=True)
daemon.py
# System imports import os, sys # Adjust __file__ for Windows executable try: __file__ = os.path.abspath(__file__) except: __file__ = sys.executable sys.path.insert(0, os.path.dirname(__file__)) from intercom import * import intercom import typeworld.client class IntercomDelegate(intercom.IntercomDelegate): def exitSignalCalled(self): quitIcon() global intercomApp intercomApp = TypeWorldApp(delegate = IntercomDelegate()) log('Start %s' % intercomApp) try: import locales def getClient(): global prefDir if WIN: prefFile = os.path.join(prefDir, 'preferences.json') prefs = typeworld.client.JSON(prefFile) else: prefs = typeworld.client.AppKitNSUserDefaults('world.type.guiapp') client = typeworld.client.APIClient(preferences = prefs, online = False) return client client = getClient() def localize(key, html = False): string = locales.localize(key, client.locale()) if html: string = string.replace('\n', '<br />') return string def openApp(): log('openApp()') global intercomApp intercomApp.open() def setStatus(amountOutdatedFonts, notification = False): if amountOutdatedFonts > 0: icon.icon = image_notification if notification: intercomApp.notification(localize('XFontUpdatesAvailable').replace('%numberOfFonts%', str(amountOutdatedFonts)), localize('Click to open Type.World app')) else: icon.icon = image def getAmountOutdatedFonts(force = False): if force: return intercomApp.speak('amountOutdatedFonts force') else: return intercomApp.speak('amountOutdatedFonts') def searchAppUpdate(): return intercomApp.speak('searchAppUpdate') def checkForUpdates(): amountOutdatedFonts = getAmountOutdatedFonts(force = True) setStatus(amountOutdatedFonts, notification = True) def autoReloadSubscriptions(force = True): log('started autoReloadSubscriptions()') client = getClient() log(client) # Preference is set to check automatically if (client.preferences.get('reloadSubscriptionsInterval') and int(client.preferences.get('reloadSubscriptionsInterval') != -1) or force): # Has never been checked, set to long time ago if not client.preferences.get('reloadSubscriptionsLastPerformed'): client.preferences.set('reloadSubscriptionsLastPerformed', int(time.time()) - int(client.preferences.get('reloadSubscriptionsInterval')) - 10) # See if we should check now log('reloadSubscriptionsLastPerformed: %s' % client.preferences.get('reloadSubscriptionsLastPerformed')) log('reloadSubscriptionsInterval: %s' % client.preferences.get('reloadSubscriptionsInterval')) if (int(client.preferences.get('reloadSubscriptionsLastPerformed')) < int(time.time()) - int(client.preferences.get('reloadSubscriptionsInterval'))) or force: setStatus(getAmountOutdatedFonts(), notification = True) # APP UPDATE if not intercomApp.isOpen(): # Has never been checked, set to long time ago if not client.preferences.get('appUpdateLastSearched'): client.preferences.set('appUpdateLastSearched', int(time.time()) - 30 * 24 * 60 * 60) # set to one month ago if int(client.preferences.get('appUpdateLastSearched')) < int(time.time()) - UPDATESEARCHINTERVAL: # 24 * 60 * 60 log('Calling searchAppUpdate()') time.sleep(15) searchAppUpdate() client.preferences.set('appUpdateLastSearched', int(time.time())) # set to now # # Sync subscriptions # if not client.preferences.get('lastServerSync') or client.preferences.get('lastServerSync') < time.time() - PULLSERVERUPDATEINTERVAL: # intercomApp.speak('pullServerUpdate') # Prevent second start if MAC and not appIsRunning('world.type.agent') or WIN and not appIsRunning('TypeWorld Taskbar Agent.exe'): # Set up tray icon from pystray import MenuItem as item import pystray def closeListener(): address = ('localhost', 65501) myConn = Client(address) myConn.send('closeListener') myConn.close() def quitIcon(): closeListenerThread = Thread(target=closeListener) closeListenerThread.start() time.sleep(1) t.stop() t.join() icon.stop() menu = ( item(localize('Open Type.World App'), openApp, default=True), item(localize('Check for font updates now'), checkForUpdates), )#, pystray.Menu.SEPARATOR, item('Quit', quitIcon)) if MAC: image = NSImage.alloc().initWithContentsOfFile_(NSBundle.mainBundle().pathForResource_ofType_('MacSystemTrayIcon', 'pdf')) image.setTemplate_(True) image_notification = NSImage.alloc().initWithContentsOfFile_(NSBundle.mainBundle().pathForResource_ofType_('MacSystemTrayIcon_Notification', 'pdf')) image_notification.setTemplate_(True) if WIN: image = win32.LoadImage( None, os.path.join(os.path.dirname(__file__), 'icon', 'TaskbarIcon.ico'), win32.IMAGE_ICON, 0, 0, win32.LR_DEFAULTSIZE | win32.LR_LOADFROMFILE) image_notification = win32.LoadImage( None, os.path.join(os.path.dirname(__file__), 'icon', 'TaskbarIcon_Notification.ico'), win32.IMAGE_ICON, 0, 0, win32.LR_DEFAULTSIZE | win32.LR_LOADFROMFILE) icon = pystray.Icon("Type.World", image, "Type.World", menu) if MAC: class Delegate(NSObject): def applicationDidFinishLaunching_(self, aNotification): icon.run() NSUserNotificationCenterDelegate = objc.protocolNamed('NSUserNotificationCenterDelegate') class NotificationDelegate(NSObject, protocols=[NSUserNotificationCenterDelegate]): def userNotificationCenter_didActivateNotification_(self, center, aNotification): intercomApp.open() nsApp = NSApplication.sharedApplication() delegate = Delegate.alloc().init() nsApp.setDelegate_(delegate) nsApp.setActivationPolicy_(NSApplicationActivationPolicyProhibited) notificationCenter = NSUserNotificationCenter.defaultUserNotificationCenter() notificationCenterDelegate = NotificationDelegate.alloc().init() notificationCenter.setDelegate_(notificationCenterDelegate) class StoppableThread(threading.Thread): """Thread class with a stop() method. The thread itself has to check regularly for the stopped() condition.""" def __init__(self, *args, **kwargs): super(StoppableThread, self).__init__(*args, **kwargs) self._stopped = threading.Event() def stop(self): self._stopped.set() def stopped(self): return self._stopped.isSet() def run(self): log('beginning of run()') # First run # autoReloadSubscriptions(force = False) setStatus(getAmountOutdatedFonts(), notification = False) intercomApp.speak('daemonStart') log('about to start inner loop') while True: # log(self.counter) if self.stopped(): return if self.counter == LOOPDURATION: log('calling autoReloadSubscriptions() from inner loop') if WIN: path = os.path.expanduser('~/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Type.World.lnk') if os.path.exists(path): os.remove(path) autoReloadSubscriptions(force = False) self.counter = 0 else: self.counter += 1 if WIN: win32gui.PumpWaitingMessages() time.sleep(1) t = StoppableThread() t.counter = 0 t.start() log('started main loop thread') intercomCommands = ['amountOutdatedFonts', 'version', 'quit'] def intercom(commands): if not commands[0] in intercomCommands: log('Intercom: Command %s not registered' % (commands[0])) return if commands[0] == 'amountOutdatedFonts': if len(commands) > 1: amountOutdatedFonts = int(commands[1]) setStatus(amountOutdatedFonts) if commands[0] == 'version': return APPVERSION if commands[0] == 'quit': quitIcon() def listenerFunction(): from multiprocessing.connection import Listener address = ('localhost', 65501) listener = Listener(address) log('Server started') while True: conn = listener.accept() command = conn.recv() commands = command.split(' ') if command == 'closeListener': conn.close() break response = None if commands[0] in intercomCommands: response = intercom(commands) conn.send(response) conn.close() listener.close() log('About to start listener server') listenerThread = Thread(target=listenerFunction) listenerThread.start() ENDSESSION_CLOSEAPP = 0x1 log('About to start icon') if WIN: icon.run(sigint=quitIcon) log('Icon started') if MAC: from PyObjCTools import AppHelper AppHelper.runEventLoop() except: log(traceback.format_exc())
decorators.py
#!/usr/bin/env python # encoding: utf-8 """""" from __future__ import division, print_function import base64 import hashlib import itertools as it import os import pickle import sys import traceback from functools import wraps from multiprocessing import Process, Queue from . import mkdir CACHEDIR = '.pycache' def processify(func): """Decorator to run a function as a process. Be sure that every argument and the return value is *pickable*. The created process is joined, so the code does not run in parallel. Taken from https://gist.github.com/schlamar/2311116 """ def process_func(q, *args, **kwargs): try: ret = func(*args, **kwargs) except Exception: ex_type, ex_value, tb = sys.exc_info() error = ex_type, ex_value, ''.join(traceback.format_tb(tb)) ret = None else: error = None q.put((ret, error)) # register original function with different name # in sys.modules so it is pickable process_func.__name__ = func.__name__ + 'processify_func' setattr(sys.modules[__name__], process_func.__name__, process_func) @wraps(func) def decorated(*args, **kwargs): q = Queue() p = Process(target=process_func, args=[q] + list(args), kwargs=kwargs) p.start() p.join() ret, error = q.get() if error: ex_type, ex_value, tb_str = error message = '%s (in subprocess)\n%s' % (ex_value.message, tb_str) raise ex_type(message) return ret return decorated def _hash_file(filename, blocksize=65536): """Hashes the given file. :param filename: Path to file :returns: Hex digest of the md5 checksum """ hasher = hashlib.md5() with open(filename, 'rb') as infile: buf = infile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = infile.read(blocksize) return hasher.hexdigest() def _args_to_dict(func, args): """Converts the unnamed arguments of func to a dictionary :param func: Function :param *args: Valid arguments of func as tuple :returns: Dictionary containing with key=argument name and value=value in *args """ argnames = func.func_code.co_varnames[:func.func_code.co_argcount] return {key: val for key, val in zip(argnames, args)} def _to_hashfilename(func, args, kwargs): """Computes the hashfile name for a given function (with filename argument) :param func: Function :param *args: Named arguments of func :param **kwargs: Keyword arguments of func :returns: Proposed filename """ filename_pos = func.func_code.co_varnames.index('filename') filename = args[filename_pos] filehash = _hash_file(filename) sourcehash = _hash_file(func.func_code.co_filename) argdict = _args_to_dict(func, args) argdict.update(kwargs) argstr = '_'.join("{}={}".format(key, val) for key, val in argdict.iteritems()) rawname = '_'.join((filehash, sourcehash, argstr)) return base64.urlsafe_b64encode(rawname) + '.pkl' def cached_filefunc(func): """Caches the return value for a function which contains a named argument `filename` for further use. This can be helpful for creating plots from pre-computed data with an intermediate step which still takes very long. The cache is considered invalid (and the value is calculated from the proper function) if one of the following conditions is met: * the file corresponding the argument `filename` has changed * the source file of the function called has changed * an argument of the function called is changed TODO Detect default values of keyword arguments FIXME This is baaaaad hackery!!! """ @wraps(func) def decorated(*args, **kwargs): # First we try to read the function's return value from cache try: cfilename = _to_hashfilename(func, args, kwargs) with open(os.path.join(CACHEDIR, cfilename), 'rb') as cfile: return pickle.load(cfile) except IOError as exception: if exception.errno != os.errno.ENOENT: raise # Not there? --> Compute it and cache it for further use val = func(*args, **kwargs) mkdir(CACHEDIR) cfilename = _to_hashfilename(func, args, kwargs) with open(os.path.join(CACHEDIR, cfilename), 'wb') as cfile: pickle.dump(val, cfile) return val if 'filename' in func.func_code.co_varnames[:func.func_code.co_argcount]: return decorated else: return func
observing.py
from multiprocessing import Process from panoptes.utils import error def on_enter(event_data): """Take an observation image. This state is responsible for taking the actual observation image. """ pocs = event_data.model current_obs = pocs.observatory.current_observation pocs.say(f"🔭🔭 I'm observing {current_obs.field.field_name}! 🔭🔭") pocs.next_state = 'parking' try: # Do the observing, once per exptime (usually only one unless a compound observation). for _ in current_obs.exptimes: pocs.observatory.observe(blocking=True) pocs.say(f"Finished observing! I'll start processing that in the background.") # Do processing in background. process_proc = Process(target=pocs.observatory.process_observation) process_proc.start() pocs.logger.debug(f'Processing for {current_obs} started on {process_proc.pid=}') except (error.Timeout, error.CameraNotFound): pocs.logger.warning("Timeout waiting for images. Something wrong with cameras, parking.") except Exception as e: pocs.logger.warning(f"Problem with imaging: {e!r}") pocs.say("Hmm, I'm not sure what happened with that exposure.") else: pocs.logger.debug('Finished with observing, going to analyze') pocs.next_state = 'analyzing'
new_gui_user.py
#!/usr/bin/env python3 import socket import os import ast from threading import Thread import random as r import time import datetime as dt import paho.mqtt.client as mqtt # import matplotlib.pyplot as plt from drawnow import * import subprocess as sp import config import smtplib import new_distribution as dst import paramiko hosts = {} # {hostname: ip} record = [] # records the task list and execution and waiting time and host sent _tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15}, 't2': {'wcet': 1, 'period': 5, 'deadline': 4}, 't3': {'wcet': 2, 'period': 10, 'deadline': 8}, 't4': {'wcet': 1, 'period': 10, 'deadline': 9}, 't5': {'wcet': 3, 'period': 15, 'deadline': 12} } # mat = {'p0': ['cpu', 'mem', 'storage']} _need = { 't1': [7, 4, 3], 't2': [1, 2, 2], 't3': [6, 0, 0], 't4': [0, 1, 1], 't5': [4, 3, 1] } allocation = { 't1': [0, 1, 0], 't2': [2, 0, 0], 't3': [3, 0, 2], 't4': [2, 1, 1], 't5': [0, 0, 2] } ''' ax.annotate('local max', xy=(2, 1), xytext=(3, 1.5), arrowprops=dict(facecolor='black', shrink=0.05), ) ''' thread_record = [] task_record = {} # records tasks start time and finish time {seq_no:{task:[duration, start_time,finish_time]}} # idea for task naming # client-id_task-no_task-id client id = 11, task no=> sequence no, task id => t1 tasks_executed_on_time = 0 tasks_not_executed_on_time = 0 filename = {2: 'rms+bankers', 3: 'edf+bankers', 7: 'rms+wound_wait', 10: 'rms+wait_die', 12: 'edf+wound_wait', 16: 'edf+wait_die'} fig = plt.figure() ax1 = fig.add_subplot(111) def auto_value(no): if no < 5: return no elif no < 10: return no - 3 elif no < 50: return no - 6 elif no < 150: return no - 30 elif no < 800: return no - 70 elif no < 2000: return no - 200 else: return no - 400 def plot_performance(): name = ['Timely', 'Untimely'] ypos = ([0, 1]) total = tasks_executed_on_time + tasks_not_executed_on_time if tasks_executed_on_time > 0: timely = round((tasks_executed_on_time/total)*100, 2) else: timely = 0 if tasks_not_executed_on_time > 0: untimely = round((tasks_not_executed_on_time/total)*100, 2) else: untimely = 0 values = [tasks_executed_on_time, tasks_not_executed_on_time] ax1.set_xticks(ypos) ax1.set_xticklabels(name) ax1.bar(ypos, values, align='center', color='m', alpha=0.5) ax1.set_title('Task execution Time record') dis = 'Seq: {}\nTotal Tasks: {}\ntotal: {}'.format(seq, total, total_split_task) # ax1.annotate(dis, xy=(2, 1), xytext=(3, 1.5)) ax1.text(1, auto_value(tasks_executed_on_time), dis, size=10, rotation=0, ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.7, 0.7), fc=(1., 0.8, 0.8),)) ax1.text(-0.1, tasks_executed_on_time, '{}, {}%'.format(tasks_executed_on_time, timely), size=10, rotation=0, ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), )) ax1.text(0.99, tasks_not_executed_on_time, '{}, {}%'.format(tasks_not_executed_on_time, untimely), size=10, rotation=0, ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), )) plt.subplot(ax1) fig.suptitle('MEC Performance During Deadlock Experiment') def get_time(): _time_ = dt.datetime.utcnow() return _time_ def gosh_dist(_range): return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range def get_tasks(): global tasks tasks = {} _t = r.randrange(2, 4) while len(tasks) < _t: a = list(_tasks.keys())[gosh_dist(5)] tasks[a] = _tasks[a] _t_time = waiting_time_init() return tasks, _t_time def waiting_time_init(): # t_time = {i: [round(r.uniform(0.1, 0.4), 3), round(r.uniform(2,5), 3)] for i in # tasks} # t_time = {'ti': [execution_time, latency], ..} t_time = {i: [round(r.uniform(0.4, 0.8), 3), round((tasks[i]['period']) / (tasks[i]['wcet']), 3)] for i in tasks} # t_time = {'ti': [execution_time, latency], ..} return t_time # Callback Function on Connection with MQTT Server def on_connect(connect_client, userdata, flags, rc): print("Connected with Code :" +str(rc)) # Subscribe Topic from here connect_client.subscribe(topic, qos=2) # Callback Function on Receiving the Subscribed Topic/Message def on_message(message_client, userdata, msg): global hosts global ho global algo_id global host_dict # print the message received from the subscribed topic details = str(msg.payload, 'utf-8')[2:].split('_') ho = ast.literal_eval(details[0]) # {hostname: ip} algo_id = int(details[1]) hosts = list(ho.values()) host_dict = dict(zip(list(ho.values()), list(ho.keys()))) # {ip: hostname} # print('hosts: ', hosts) _client.loop_stop() def get_mec_details(): global topic global _client global broker_ip username = 'mec' password = 'password' broker_ip = input("Broker's IP: ").strip() broker_port_no = 1883 topic = 'mec' _client = mqtt.Client() _client.on_connect = on_connect _client.on_message = on_message _client.username_pw_set(username, password) _client.connect(broker_ip, broker_port_no, 60) _client.loop_start() def on_connect_task(connect_client, userdata, flags, rc): # print("Connected with Code :" +str(rc)) # Subscribe Topic from here connect_client.subscribe(task_topic, qos=2) # Callback Function on Receiving the Subscribed Topic/Message def on_receive_task(message_client, userdata, msg): global tasks_executed_on_time global tasks_not_executed_on_time # print the message received from the subscribed topic data = str(msg.payload, 'utf-8') received_task = ast.literal_eval(data) for i in received_task: tk = i.split('_')[0] # print('tk: {}'.format(tk)) k = task_record[int(tk.split('.')[-1])][tk] if len(k) < 3: a = received_task[i] k.append(dt.datetime(int(a[0]), int(a[1]), int(a[2]), int(a[3]), int(a[4]), int(a[5]), int(a[6]))) p = float(str(k[2] - k[1]).split(':')[-1]) if p < k[0]: tasks_executed_on_time += 1 else: tasks_not_executed_on_time += 1 elif len(k) == 3: a = received_task[i] t = dt.datetime(int(a[0]), int(a[1]), int(a[2]), int(a[3]), int(a[4]), int(a[5]), int(a[6])) p = float(str(t - k[1]).split(':')[-1]) if p < k[0]: tasks_executed_on_time += 1 else: tasks_not_executed_on_time += 1 def receive_mec_start(): global task_topic global task_client username = 'mec' password = 'password' broker_port_no = 1883 task_topic = client_id_ task_client = mqtt.Client() task_client.on_connect = on_connect_task task_client.on_message = on_receive_task task_client.username_pw_set(username, password) task_client.connect(broker_ip, broker_port_no, 60) task_client.loop_forever() def ip_address(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) return s.getsockname()[0] def get_hostname(): cmd = ['cat /etc/hostname'] hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1] return hostname def send_email(msg): try: server = smtplib.SMTP_SSL('smtp.gmail.com') server.ehlo() server.login(config.email_address, config.password) subject = 'Deadlock results {} {}'.format(filename[algo_id], get_hostname()) # msg = 'Attendance done for {}'.format(_timer) _message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg) server.sendmail(config.email_address, config.send_email, _message) server.quit() print("Email sent!") except Exception as e: print(e) def send_result(host_, data): try: c = paramiko.SSHClient() un = 'mec' pw = 'password' port = 22 c.set_missing_host_key_policy(paramiko.AutoAddPolicy()) c.connect(host_, port, un, pw) for i in data: cmd = ('echo "{}" >> /home/mec/result/client_data.py'.format(i)) # task share : host ip task stdin, stdout, stderr = c.exec_command(cmd) c.close() except Exception as e: print(e) def client_id(client_ip): _id = client_ip.split('.')[-1] if len(_id) == 1: return '00' + _id elif len(_id) == 2: return '0' + _id else: return _id total_task_sent = 0 total_split_task = 0 task_dist = {1:0,2:0,3:0} def task_details(tasks): global task_dist, total_task_sent, total_split_task total_task_sent += len(tasks) for task in tasks: total_split_task += tasks[task]['wcet'] task_dist[tasks[task]['wcet']] += 1 def name_task(task_list, node_id, seq_no): # naming nomenclature of tasks = task_id.node_id.client_id.sequence_no =>t2.110.170.10 # returns task list and waiting_time with proper identification return {i + '.' + str(node_id) + '.' + client_id_ + '.' + str(seq_no): task_list[0][i] for i in task_list[0]}, \ {k + '.' + str(node_id) + '.' + client_id_ + '.' + str(seq_no): task_list[1][k] for k in task_list[1]} def namestr(obj): namespace = globals() return [name for name in namespace if namespace[name] is obj] def split_list(data, _id_, no_of_clients): increment = int(len(data)/no_of_clients) a_range = list(range(0,len(data),increment))[1:]+[len(data)] # 1800 is the range no of tasks for each mec client c = 0 host_sp = {} for i in a_range: host_sp[a_range.index(i)+1] = (c, i) c += increment a,b = host_sp[_id_] return data[a:b] def main(): global record global client_id_ global seq os.system('clear') print("================== Welcome to Client Platform ===================") get_mec_details() client_id_ = client_id(ip_address()) ''' thread_record.append(Thread(target=receive_tasks)) thread_record[-1].daemon = True thread_record[-1].start() ''' redeem_task = Thread(target=receive_mec_start) redeem_task.daemon = True redeem_task.start() while True: time.sleep(1) if len(hosts) > 0: break print('\nClient is connected to servers: \n{}'.format(hosts)) data = {5: dst.mec5, 10: dst.mec10, 15: dst.mec15} cmd = ['hostname'] host_id = str(sp.check_output(cmd, shell=True), 'utf-8')[-2] while True: try: x = input('Enter "y" to start and "stop" to exit: ').strip().lower() _data_ = split_list(data[len(hosts)], int(host_id), no_of_clients=5) if x == 'y': for i in range(len(_data_)): seq = i rand_host = hosts[int(_data_[i]) - 1] # host selection using generated gausian distribution _task_ = get_tasks() # tasks, waiting time _tasks_list = name_task(_task_, client_id(rand_host), i) # id's tasks => ({tasks}, {waiting time}) task_details(_tasks_list[0]) record.append([_tasks_list, host_dict[rand_host]]) for task in _tasks_list[0]: if seq not in task_record: # task_record= {seq_no:{task:[duration,start_time,finish_time]}} task_record[seq] = {task: [_tasks_list[1][task][1], get_time()]} else: task_record[seq][task] = [_tasks_list[1][task][1], get_time()] # client(_tasks_list, rand_host) task_client.publish(client_id(rand_host), "t {}".format(_tasks_list), qos=2) print("Sent {} to {} node_id {} \n\n".format(_tasks_list, rand_host, client_id(rand_host))) drawnow(plot_performance) time.sleep(3) elif x == 'stop': print('\nProgramme terminated') result = f"timely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_executed_on_time} " \ f"\nuntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_not_executed_on_time}" \ f"\nrecord{len(hosts)} = {record} \nhost_names{len(hosts)} = {host_dict}" list_result = [ f"timely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_executed_on_time} ", f"\nuntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_not_executed_on_time}", f"\nrecord{len(hosts)} = {record} ", f"\nhost_names{len(hosts)} = {host_dict}" ] path_ = 'data/raw/' if os.path.exists(path_): cmd = f"echo '' > {get_hostname()[-1]}_{algo_id}_{len(hosts)}data.py" os.system(cmd) else: os.mkdir(path_) cmd = f"echo '' > {get_hostname()[-1]}_{algo_id}_{len(hosts)}data.py" os.system(cmd) for i in list_result: cmd = f'echo "{i}" >> {get_hostname()[-1]}_{algo_id}_{len(hosts)}data.py' os.system(cmd) task_doc = f"{namestr(total_task_sent)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {total_task_sent}" \ f"\n{namestr(total_split_task)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = " \ f"{total_split_task} " \ f"\n{namestr(task_dist)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {task_dist}" cmd = f'echo "{task_doc}" >> {get_hostname()[-1]}_{algo_id}_{len(hosts)}task.py' os.system(cmd) sp.run( ["scp", f"{path_}{get_hostname()[-1]}_{algo_id}_{len(hosts)}data.py", f"mec@{ho['osboxes-0']}:/home/mec/result/python"]) sp.run( ["scp", f"{path_}{get_hostname()[-1]}_{algo_id}_{len(hosts)}data.py", f"mec@{ho['osboxes-0']}:/home/mec/result/linux"]) send_result(ho['osboxes-0'], result) send_email(result) send_email(task_doc) task_client.loop_stop() print('done') time.sleep(1) break except KeyboardInterrupt: print('\nProgramme terminated') task_client.loop_stop() break if __name__ == "__main__": main()
test_search.py
import multiprocessing import numbers import pytest from time import sleep from base.client_base import TestcaseBase from utils.util_log import test_log as log from common import common_func as cf from common import common_type as ct from common.common_type import CaseLabel, CheckTasks from utils.util_pymilvus import * from common.constants import * from pymilvus.orm.types import CONSISTENCY_STRONG, CONSISTENCY_BOUNDED, CONSISTENCY_SESSION, CONSISTENCY_EVENTUALLY prefix = "search_collection" search_num = 10 max_dim = ct.max_dim epsilon = ct.epsilon gracefulTime = ct.gracefulTime default_nb = ct.default_nb default_nb_medium = ct.default_nb_medium default_nq = ct.default_nq default_dim = ct.default_dim default_limit = ct.default_limit default_search_exp = "int64 >= 0" default_search_string_exp = "varchar >= \"0\"" default_search_mix_exp = "int64 >= 0 && varchar >= \"0\"" default_invaild_string_exp = "varchar >= 0" perfix_expr = 'varchar like "0%"' default_search_field = ct.default_float_vec_field_name default_search_params = ct.default_search_params default_int64_field_name = ct.default_int64_field_name default_float_field_name = ct.default_float_field_name default_bool_field_name = ct.default_bool_field_name default_string_field_name = ct.default_string_field_name vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] uid = "test_search" nq = 1 epsilon = 0.001 field_name = default_float_vec_field_name binary_field_name = default_binary_vec_field_name search_param = {"nprobe": 1} entity = gen_entities(1, is_normal=True) entities = gen_entities(default_nb, is_normal=True) raw_vectors, binary_entities = gen_binary_entities(default_nb) default_query, _ = gen_search_vectors_params(field_name, entities, default_top_k, nq) index_name1=cf.gen_unique_str("float") index_name2=cf.gen_unique_str("varhar") class TestCollectionSearchInvalid(TestcaseBase): """ Test case of search interface """ @pytest.fixture(scope="function", params=ct.get_invalid_vectors) def get_invalid_vectors(self, request): yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_fields_type(self, request): if isinstance(request.param, str): pytest.skip("string is valid type for field") yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_fields_value(self, request): if not isinstance(request.param, str): pytest.skip("field value only support string") if request.param == "": pytest.skip("empty field is valid") yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_metric_type(self, request): yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_ints) def get_invalid_limit(self, request): if isinstance(request.param, int) and request.param >= 0: pytest.skip("positive int is valid type for limit") yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_expr_type(self, request): if isinstance(request.param, str): pytest.skip("string is valid type for expr") if request.param is None: pytest.skip("None is valid for expr") yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_expr_value(self, request): if not isinstance(request.param, str): pytest.skip("expression value only support string") if request.param == "": pytest.skip("empty field is valid") yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_expr_bool_value(self, request): yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_partition(self, request): if request.param == []: pytest.skip("empty is valid for partition") if request.param is None: pytest.skip("None is valid for partition") yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_output_fields(self, request): if request.param == []: pytest.skip("empty is valid for output_fields") if request.param is None: pytest.skip("None is valid for output_fields") yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_ints) def get_invalid_travel_timestamp(self, request): if request.param == 9999999999: pytest.skip("9999999999 is valid for travel timestamp") yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_ints) def get_invalid_guarantee_timestamp(self, request): if request.param == 9999999999: pytest.skip("9999999999 is valid for guarantee_timestamp") yield request.param """ ****************************************************************** # The followings are invalid cases ****************************************************************** """ @pytest.mark.tags(CaseLabel.L1) def test_search_no_connection(self): """ target: test search without connection method: create and delete connection, then search expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] # 2. remove connection log.info("test_search_no_connection: removing connection") self.connection_wrap.remove_connection(alias='default') log.info("test_search_no_connection: removed connection") # 3. search without connection log.info("test_search_no_connection: searching without connection") collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "should create connect first"}) @pytest.mark.tags(CaseLabel.L1) def test_search_no_collection(self): """ target: test the scenario which search the non-exist collection method: 1. create collection 2. drop collection 3. search the dropped collection expected: raise exception and report the error """ # 1. initialize without data collection_w = self.init_collection_general(prefix)[0] # 2. Drop collection collection_w.drop() # 3. Search without collection log.info("test_search_no_collection: Searching without collection ") collection_w.search(vectors, default_search_field, default_search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "collection %s doesn't exist!" % collection_w.name}) @pytest.mark.tags(CaseLabel.L2) def test_search_param_missing(self): """ target: test search with incomplete parameters method: search with incomplete parameters expected: raise exception and report the error """ # 1. initialize without data collection_w = self.init_collection_general(prefix)[0] # 2. search collection with missing parameters log.info("test_search_param_missing: Searching collection %s " "with missing parameters" % collection_w.name) try: collection_w.search() except TypeError as e: assert "missing 4 required positional arguments: 'data', " \ "'anns_field', 'param', and 'limit'" in str(e) @pytest.mark.tags(CaseLabel.L2) def test_search_param_invalid_vectors(self, get_invalid_vectors): """ target: test search with invalid parameter values method: search with invalid data expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] # 2. search with invalid field invalid_vectors = get_invalid_vectors log.info("test_search_param_invalid_vectors: searching with " "invalid vectors: {}".format(invalid_vectors)) collection_w.search(invalid_vectors, default_search_field, default_search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "`search_data` value {} is illegal".format(invalid_vectors)}) @pytest.mark.tags(CaseLabel.L2) def test_search_param_invalid_dim(self): """ target: test search with invalid parameter values method: search with invalid dim expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix, True)[0] # 2. search with invalid dim log.info("test_search_param_invalid_dim: searching with invalid dim") wrong_dim = 129 vectors = [[random.random() for _ in range(wrong_dim)] for _ in range(default_nq)] collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "The dimension of query entities " "is different from schema"}) @pytest.mark.tags(CaseLabel.L2) def test_search_param_invalid_field_type(self, get_invalid_fields_type): """ target: test search with invalid parameter type method: search with invalid field type expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] # 2. search with invalid field invalid_search_field = get_invalid_fields_type log.info("test_search_param_invalid_field_type: searching with " "invalid field: %s" % invalid_search_field) collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "`anns_field` value {} is illegal".format(invalid_search_field)}) @pytest.mark.tags(CaseLabel.L2) def test_search_param_invalid_field_value(self, get_invalid_fields_value): """ target: test search with invalid parameter values method: search with invalid field value expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] # 2. search with invalid field invalid_search_field = get_invalid_fields_value log.info("test_search_param_invalid_field_value: searching with " "invalid field: %s" % invalid_search_field) collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "Field %s doesn't exist in schema" % invalid_search_field}) @pytest.mark.tags(CaseLabel.L2) def test_search_param_invalid_metric_type(self, get_invalid_metric_type): """ target: test search with invalid parameter values method: search with invalid metric type expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix, True, 10)[0] # 2. search with invalid metric_type log.info("test_search_param_invalid_metric_type: searching with invalid metric_type") invalid_metric = get_invalid_metric_type search_params = {"metric_type": invalid_metric, "params": {"nprobe": 10}} collection_w.search(vectors[:default_nq], default_search_field, search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "metric type not found"}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("index, params", zip(ct.all_index_types[:9], ct.default_index_params[:9])) def test_search_invalid_params_type(self, index, params): """ target: test search with invalid search params method: test search with invalid params type expected: raise exception and report the error """ if index == "FLAT": pytest.skip("skip in FLAT index") # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000, is_index=True)[0:4] # 2. create index and load default_index = {"index_type": index, "params": params, "metric_type": "L2"} collection_w.create_index("float_vector", default_index) collection_w.load() # 3. search invalid_search_params = cf.gen_invaild_search_params_type() message = "Search params check failed" for invalid_search_param in invalid_search_params: if index == invalid_search_param["index_type"]: search_params = {"metric_type": "L2", "params": invalid_search_param["search_params"]} collection_w.search(vectors[:default_nq], default_search_field, search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 0, "err_msg": message}) @pytest.mark.tags(CaseLabel.L2) def test_search_param_invalid_limit_type(self, get_invalid_limit): """ target: test search with invalid limit type method: search with invalid limit type expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] # 2. search with invalid field invalid_limit = get_invalid_limit log.info("test_search_param_invalid_limit_type: searching with " "invalid limit: %s" % invalid_limit) collection_w.search(vectors[:default_nq], default_search_field, default_search_params, invalid_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "`limit` value %s is illegal" % invalid_limit}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("limit", [0, 16385]) def test_search_param_invalid_limit_value(self, limit): """ target: test search with invalid limit value method: search with invalid limit: 0 and maximum expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] # 2. search with invalid limit (topK) log.info("test_search_param_invalid_limit_value: searching with " "invalid limit (topK) = %s" % limit) err_msg = "limit %d is too large!" % limit if limit == 0: err_msg = "`limit` value 0 is illegal" collection_w.search(vectors[:default_nq], default_search_field, default_search_params, limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": err_msg}) @pytest.mark.tags(CaseLabel.L2) def test_search_param_invalid_expr_type(self, get_invalid_expr_type): """ target: test search with invalid parameter type method: search with invalid search expressions expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] # 2 search with invalid expr invalid_search_expr = get_invalid_expr_type log.info("test_search_param_invalid_expr_type: searching with " "invalid expr: {}".format(invalid_search_expr)) collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, invalid_search_expr, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "The type of expr must be string ," "but {} is given".format(type(invalid_search_expr))}) @pytest.mark.tags(CaseLabel.L2) def test_search_param_invalid_expr_value(self, get_invalid_expr_value): """ target: test search with invalid parameter values method: search with invalid search expressions expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] # 2 search with invalid expr invalid_search_expr = get_invalid_expr_value log.info("test_search_param_invalid_expr_value: searching with " "invalid expr: %s" % invalid_search_expr) collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, invalid_search_expr, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "invalid expression %s" % invalid_search_expr}) @pytest.mark.tags(CaseLabel.L2) def test_search_param_invalid_expr_bool(self, get_invalid_expr_bool_value): """ target: test search with invalid parameter values method: search with invalid bool search expressions expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix, True, is_all_data_type=True)[0] # 2 search with invalid bool expr invalid_search_expr_bool = f"{default_bool_field_name} == {get_invalid_expr_bool_value}" log.info("test_search_param_invalid_expr_bool: searching with " "invalid expr: %s" % invalid_search_expr_bool) collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, invalid_search_expr_bool, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "failed to create query plan"}) @pytest.mark.tags(CaseLabel.L2) def test_search_partition_invalid_type(self, get_invalid_partition): """ target: test search invalid partition method: search with invalid partition type expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] # 2. search the invalid partition partition_name = get_invalid_partition err_msg = "`partition_name_array` value {} is illegal".format(partition_name) collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, partition_name, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": err_msg}) @pytest.mark.tags(CaseLabel.L2) def test_search_with_output_fields_invalid_type(self, get_invalid_output_fields): """ target: test search with output fields method: search with invalid output_field expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] # 2. search log.info("test_search_with_output_fields_invalid_type: Searching collection %s" % collection_w.name) output_fields = get_invalid_output_fields err_msg = "`output_fields` value {} is illegal".format(output_fields) collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, output_fields=output_fields, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: err_msg}) @pytest.mark.tags(CaseLabel.L1) def test_search_release_collection(self): """ target: test the scenario which search the released collection method: 1. create collection 2. release collection 3. search the released collection expected: raise exception and report the error """ # 1. initialize without data collection_w = self.init_collection_general(prefix, True, 10)[0] # 2. release collection collection_w.release() # 3. Search the released collection log.info("test_search_release_collection: Searching without collection ") collection_w.search(vectors, default_search_field, default_search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "collection %s was not loaded " "into memory" % collection_w.name}) @pytest.mark.tags(CaseLabel.L2) def test_search_release_partition(self): """ target: test the scenario which search the released collection method: 1. create collection 2. release partition 3. search the released partition expected: raise exception and report the error """ # 1. initialize with data partition_num = 1 collection_w = self.init_collection_general(prefix, True, 10, partition_num, is_index=True)[0] par = collection_w.partitions par_name = par[partition_num].name par[partition_num].load() # 2. release partition par[partition_num].release() # 3. Search the released partition log.info("test_search_release_partition: Searching the released partition") limit = 10 collection_w.search(vectors, default_search_field, default_search_params, limit, default_search_exp, [par_name], check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "partition has been released"}) @pytest.mark.skip("enable this later using session/strong consistency") @pytest.mark.tags(CaseLabel.L1) def test_search_with_empty_collection(self): """ target: test search with empty connection method: 1. search the empty collection before load 2. search the empty collection after load 3. search collection with data inserted but not load again expected: 1. raise exception if not loaded 2. return topk=0 if loaded 3. return topk successfully """ # 1. initialize without data collection_w = self.init_collection_general(prefix)[0] # 2. search collection without data before load log.info("test_search_with_empty_collection: Searching empty collection %s" % collection_w.name) err_msg = "collection" + collection_w.name + "was not loaded into memory" collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, timeout=1, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": err_msg}) # 3. search collection without data after load collection_w.load() collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": [], "limit": 0}) # 4. search with data inserted but not load again data = cf.gen_default_dataframe_data(nb=2000) insert_res = collection_w.insert(data)[0] # Using bounded staleness, maybe we cannot search the "inserted" requests, # since the search requests arrived query nodes earlier than query nodes consume the insert requests. collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, guarantee_timestamp=insert_res.timestamp, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_res.primary_keys, "limit": default_limit}) @pytest.mark.tags(CaseLabel.L2) def test_search_with_empty_collection_with_partition(self): """ target: test search with empty collection method: 1. collection an empty collection with partitions 2. load 3. search expected: return 0 result """ # 1. initialize without data collection_w = self.init_collection_general(prefix, partition_num=1)[0] par = collection_w.partitions # 2. search collection without data after load collection_w.load() collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": [], "limit": 0}) # 2. search a partition without data after load collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, [par[1].name], check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": [], "limit": 0}) @pytest.mark.tags(CaseLabel.L1) def test_search_partition_deleted(self): """ target: test search deleted partition method: 1. create a collection with partitions 2. delete a partition 3. search the deleted partition expected: raise exception and report the error """ # 1. initialize with data partition_num = 1 collection_w = self.init_collection_general(prefix, True, 1000, partition_num)[0] # 2. delete partitions log.info("test_search_partition_deleted: deleting a partition") par = collection_w.partitions deleted_par_name = par[partition_num].name collection_w.drop_partition(deleted_par_name) log.info("test_search_partition_deleted: deleted a partition") collection_w.load() # 3. search after delete partitions log.info("test_search_partition_deleted: searching deleted partition") collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, [deleted_par_name], check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "PartitonName: %s not found" % deleted_par_name}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.xfail(reason="issue 6731") @pytest.mark.parametrize("index, params", zip(ct.all_index_types[:9], ct.default_index_params[:9])) def test_search_different_index_invalid_params(self, index, params): """ target: test search with different index method: test search with different index expected: searched successfully """ # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000, partition_num=1, is_index=True)[0:4] # 2. create different index if params.get("m"): if (default_dim % params["m"]) != 0: params["m"] = default_dim // 4 log.info("test_search_different_index_invalid_params: Creating index-%s" % index) default_index = {"index_type": index, "params": params, "metric_type": "L2"} collection_w.create_index("float_vector", default_index) log.info("test_search_different_index_invalid_params: Created index-%s" % index) collection_w.load() # 3. search log.info("test_search_different_index_invalid_params: Searching after creating index-%s" % index) collection_w.search(vectors, default_search_field, default_search_params, default_limit, default_search_exp, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit}) @pytest.mark.tags(CaseLabel.L2) def test_search_index_partition_not_existed(self): """ target: test search not existed partition method: search with not existed partition expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix, True)[0] # 2. create index default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} collection_w.create_index("float_vector", default_index) # 3. search the non exist partition partition_name = "search_non_exist" collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, [partition_name], check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "PartitonName: %s not found" % partition_name}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.xfail(reason="issue 15407") def test_search_param_invalid_binary(self): """ target: test search within binary data (invalid parameter) method: search with wrong metric type expected: raise exception and report the error """ # 1. initialize with binary data collection_w = self.init_collection_general(prefix, True, is_binary=True)[0] # 2. create index default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"} collection_w.create_index("binary_vector", default_index) # 3. search with exception binary_vectors = cf.gen_binary_vectors(3000, default_dim)[1] wrong_search_params = {"metric_type": "L2", "params": {"nprobe": 10}} collection_w.search(binary_vectors[:default_nq], "binary_vector", wrong_search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "unsupported"}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.xfail(reason="issue 15407") def test_search_binary_flat_with_L2(self): """ target: search binary collection using FlAT with L2 method: search binary collection using FLAT with L2 expected: raise exception and report error """ # 1. initialize with binary data collection_w = self.init_collection_general(prefix, True, is_binary=True)[0] # 2. search and assert query_raw_vector, binary_vectors = cf.gen_binary_vectors(2, default_dim) search_params = {"metric_type": "L2", "params": {"nprobe": 10}} collection_w.search(binary_vectors[:default_nq], "binary_vector", search_params, default_limit, "int64 >= 0", check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "Search failed"}) @pytest.mark.tags(CaseLabel.L2) def test_search_with_output_fields_not_exist(self): """ target: test search with output fields method: search with non-exist output_field expected: raise exception """ # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)[0:4] # 2. search log.info("test_search_with_output_fields_not_exist: Searching collection %s" % collection_w.name) collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, output_fields=["int63"], check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "Field int63 not exist"}) @pytest.mark.tags(CaseLabel.L1) @pytest.mark.parametrize("output_fields", [[default_search_field], ["%"]]) def test_search_output_field_vector(self, output_fields): """ target: test search with vector as output field method: search with one vector output_field or wildcard for vector expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix, True)[0] # 2. search log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name) collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, output_fields=output_fields, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "Search doesn't support " "vector field as output_fields"}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("output_fields", [["*%"], ["**"], ["*", "@"]]) def test_search_output_field_invalid_wildcard(self, output_fields): """ target: test search with invalid output wildcard method: search with invalid output_field wildcard expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix, True)[0] # 2. search log.info("test_search_output_field_invalid_wildcard: Searching collection %s" % collection_w.name) collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, output_fields=output_fields, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": f"Field {output_fields[-1]} not exist"}) @pytest.mark.tags(CaseLabel.L2) def test_search_param_invalid_travel_timestamp(self, get_invalid_travel_timestamp): """ target: test search with invalid travel timestamp method: search with invalid travel timestamp expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix, True, 10)[0] # 2. search with invalid travel timestamp log.info("test_search_param_invalid_travel_timestamp: searching with invalid travel timestamp") invalid_travel_time = get_invalid_travel_timestamp collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, travel_timestamp=invalid_travel_time, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "`travel_timestamp` value %s is illegal" % invalid_travel_time}) @pytest.mark.tags(CaseLabel.L2) def test_search_param_invalid_guarantee_timestamp(self, get_invalid_guarantee_timestamp): """ target: test search with invalid guarantee timestamp method: search with invalid guarantee timestamp expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix, True, 10)[0] # 2. search with invalid travel timestamp log.info("test_search_param_invalid_guarantee_timestamp: searching with invalid guarantee timestamp") invalid_guarantee_time = get_invalid_guarantee_timestamp collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, guarantee_timestamp=invalid_guarantee_time, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "`guarantee_timestamp` value %s is illegal" % invalid_guarantee_time}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("round_decimal", [7, -2, 999, 1.0, None, [1], "string", {}]) def test_search_invalid_round_decimal(self, round_decimal): """ target: test search with invalid round decimal method: search with invalid round decimal expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix, True, nb=10)[0] # 2. search log.info("test_search_invalid_round_decimal: Searching collection %s" % collection_w.name) collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, round_decimal=round_decimal, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": f"`round_decimal` value {round_decimal} is illegal"}) class TestCollectionSearch(TestcaseBase): """ Test case of search interface """ @pytest.fixture(scope="function", params=[default_nb, default_nb_medium]) def nb(self, request): yield request.param @pytest.fixture(scope="function", params=[2, 500]) def nq(self, request): yield request.param @pytest.fixture(scope="function", params=[8, 128]) def dim(self, request): yield request.param @pytest.fixture(scope="function", params=[False, True]) def auto_id(self, request): yield request.param @pytest.fixture(scope="function", params=[False, True]) def _async(self, request): yield request.param """ ****************************************************************** # The following are valid base cases ****************************************************************** """ @pytest.mark.tags(CaseLabel.L0) def test_search_normal(self, nq, dim, auto_id): """ target: test search normal case method: create connection, collection, insert and search expected: 1. search returned with 0 before travel timestamp 2. search successfully with limit(topK) after travel timestamp """ # 1. initialize with data collection_w, _, _, insert_ids, time_stamp = \ self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:5] # 2. search before insert time_stamp log.info("test_search_normal: searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, travel_timestamp=time_stamp - 1, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": [], "limit": 0}) # 3. search after insert time_stamp collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, travel_timestamp=time_stamp, guarantee_timestamp=0, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit}) @pytest.mark.tags(CaseLabel.L0) def test_search_with_hit_vectors(self, nq, dim, auto_id): """ target: test search with vectors in collections method: create connections,collection insert and search vectors in collections expected: search successfully with limit(topK) and can be hit at top 1 (min distance is 0) """ collection_w, _vectors, _, insert_ids = \ self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:4] # get vectors that inserted into collection vectors = np.array(_vectors[0]).tolist() vectors = [vectors[i][-1] for i in range(nq)] log.info("test_search_with_hit_vectors: searching collection %s" % collection_w.name) search_res, _ = collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit}) log.info("test_search_with_hit_vectors: checking the distance of top 1") for hits in search_res: # verify that top 1 hit is itself,so min distance is 0 assert hits.distances[0] == 0.0 @pytest.mark.tags(CaseLabel.L1) @pytest.mark.parametrize("dup_times", [1, 2, 3]) def test_search_with_dup_primary_key(self, dim, auto_id, _async, dup_times): """ target: test search with duplicate primary key method: 1.insert same data twice 2.search expected: search results are de-duplicated """ # initialize with data nb = ct.default_nb nq = ct.default_nq collection_w, insert_data, _, insert_ids = self.init_collection_general(prefix, True, nb, auto_id=auto_id, dim=dim)[0:4] # insert dup data multi times for i in range(dup_times): insert_res, _ = collection_w.insert(insert_data[0]) insert_ids.extend(insert_res.primary_keys) # search vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] search_res, _ = collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) if _async: search_res.done() search_res = search_res.result() # assert that search results are de-duplicated for hits in search_res: ids = hits.ids assert sorted(list(set(ids))) == sorted(ids) @pytest.mark.tags(CaseLabel.L1) def test_search_with_empty_vectors(self, dim, auto_id, _async): """ target: test search with empty query vector method: search using empty query vector expected: search successfully with 0 results """ # 1. initialize without data collection_w = self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0] # 2. search collection without data log.info("test_search_with_empty_vectors: Searching collection %s " "using empty vector" % collection_w.name) collection_w.search([], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": 0, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_with_ndarray(self, dim, auto_id, _async): """ target: test search with ndarray method: search using ndarray data expected: search successfully """ # 1. initialize without data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:4] # 2. search collection without data log.info("test_search_with_ndarray: Searching collection %s " "using ndarray" % collection_w.name) vectors = np.random.randn(default_nq, dim) collection_w.search(vectors, default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("search_params", [{}, {"params": {}}, {"params": {"nprobe": 10}}]) def test_search_normal_default_params(self, dim, auto_id, search_params, _async): """ target: test search normal case method: create connection, collection, insert and search expected: search successfully with limit(topK) """ # 1. initialize with data collection_w, _, _, insert_ids = \ self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:4] # 2. search log.info("test_search_normal_default_params: searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] collection_w.search(vectors[:default_nq], default_search_field, search_params, default_limit, default_search_exp, _async=_async, travel_timestamp=0, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L1) @pytest.mark.xfail(reason="issue #13611") def test_search_before_after_delete(self, nq, dim, auto_id, _async): """ target: test search function before and after deletion method: 1. search the collection 2. delete a partition 3. search the collection expected: the deleted entities should not be searched """ # 1. initialize with data nb = 1000 limit = 1000 partition_num = 1 collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, partition_num, auto_id=auto_id, dim=dim)[0:4] # 2. search all the partitions before partition deletion vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] log.info("test_search_before_after_delete: searching before deleting partitions") collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": limit, "_async": _async}) # 3. delete partitions log.info("test_search_before_after_delete: deleting a partition") par = collection_w.partitions deleted_entity_num = par[partition_num].num_entities print(deleted_entity_num) entity_num = nb - deleted_entity_num collection_w.drop_partition(par[partition_num].name) log.info("test_search_before_after_delete: deleted a partition") collection_w.load() # 4. search non-deleted part after delete partitions log.info("test_search_before_after_delete: searching after deleting partitions") collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids[:entity_num], "limit": limit - deleted_entity_num, "_async": _async}) @pytest.mark.tags(CaseLabel.L1) def test_search_collection_after_release_load(self, nb, nq, dim, auto_id, _async): """ target: search the pre-released collection after load method: 1. create collection 2. release collection 3. load collection 4. search the pre-released collection expected: search successfully """ # 1. initialize without data collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb, 1, auto_id=auto_id, dim=dim)[0:5] # 2. release collection log.info("test_search_collection_after_release_load: releasing collection %s" % collection_w.name) collection_w.release() log.info("test_search_collection_after_release_load: released collection %s" % collection_w.name) # 3. Search the pre-released collection after load log.info("test_search_collection_after_release_load: loading collection %s" % collection_w.name) collection_w.load() log.info("test_search_collection_after_release_load: searching after load") vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, travel_timestamp=time_stamp, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L1) def test_search_load_flush_load(self, nb, nq, dim, auto_id, _async): """ target: test search when load before flush method: 1. insert data and load 2. flush, and load 3. search the collection expected: search success with limit(topK) """ # 1. initialize with data collection_w = self.init_collection_general(prefix, auto_id=auto_id, dim=dim)[0] # 2. insert data insert_ids = cf.insert_data(collection_w, nb, auto_id=auto_id, dim=dim)[3] # 3. load data collection_w.load() # 4. flush and load collection_w.num_entities collection_w.load() # 5. search vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.skip("enable this later using session/strong consistency") @pytest.mark.tags(CaseLabel.L1) def test_search_new_data(self, nq, dim, auto_id, _async): """ target: test search new inserted data without load method: 1. search the collection 2. insert new data 3. search the collection without load again 4. Use guarantee_timestamp to guarantee data consistency expected: new data should be searched """ # 1. initialize with data limit = 1000 nb_old = 500 collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb_old, auto_id=auto_id, dim=dim)[0:5] # 2. search for original data after load vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] log.info("test_search_new_data: searching for original data after load") collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, travel_timestamp=time_stamp + 1, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": nb_old, "_async": _async}) # 3. insert new data nb_new = 300 _, _, _, insert_ids_new, time_stamp = cf.insert_data(collection_w, nb_new, auto_id=auto_id, dim=dim, insert_offset=nb_old) insert_ids.extend(insert_ids_new) # 4. search for new data without load # Using bounded staleness, maybe we could not search the "inserted" entities, # since the search requests arrived query nodes earlier than query nodes consume the insert requests. collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, guarantee_timestamp=time_stamp, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": nb_old + nb_new, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.skip(reason="debug") def test_search_max_dim(self, auto_id, _async): """ target: test search with max configuration method: create connection, collection, insert and search with max dim expected: search successfully with limit(topK) """ # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 100, auto_id=auto_id, dim=max_dim)[0:4] # 2. search nq = 2 log.info("test_search_max_dim: searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(max_dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, nq, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": nq, "_async": _async}) @pytest.mark.tags(CaseLabel.L1) @pytest.mark.parametrize("index, params", zip(ct.all_index_types[:9], ct.default_index_params[:9])) def test_search_after_different_index_with_params(self, dim, index, params, auto_id, _async): """ target: test search after different index method: test search after different index and corresponding search params expected: search successfully with limit(topK) """ # 1. initialize with data collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, 5000, partition_num=1, auto_id=auto_id, dim=dim, is_index=True)[0:5] # 2. create index and load if params.get("m"): if (dim % params["m"]) != 0: params["m"] = dim // 4 if params.get("PQM"): if (dim % params["PQM"]) != 0: params["PQM"] = dim // 4 default_index = {"index_type": index, "params": params, "metric_type": "L2"} collection_w.create_index("float_vector", default_index) collection_w.load() # 3. search search_params = cf.gen_search_param(index) vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] for search_param in search_params: log.info("Searching with search params: {}".format(search_param)) collection_w.search(vectors[:default_nq], default_search_field, search_param, default_limit, default_search_exp, _async=_async, travel_timestamp=time_stamp, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("index, params", zip(ct.all_index_types[:9], ct.default_index_params[:9])) def test_search_after_index_different_metric_type(self, dim, index, params, auto_id, _async): """ target: test search with different metric type method: test search with different metric type expected: searched successfully """ # 1. initialize with data collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, 5000, partition_num=1, auto_id=auto_id, dim=dim, is_index=True)[0:5] # 2. create different index if params.get("m"): if (dim % params["m"]) != 0: params["m"] = dim // 4 if params.get("PQM"): if (dim % params["PQM"]) != 0: params["PQM"] = dim // 4 log.info("test_search_after_index_different_metric_type: Creating index-%s" % index) default_index = {"index_type": index, "params": params, "metric_type": "IP"} collection_w.create_index("float_vector", default_index) log.info("test_search_after_index_different_metric_type: Created index-%s" % index) collection_w.load() # 3. search search_params = cf.gen_search_param(index, "IP") vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] for search_param in search_params: log.info("Searching with search params: {}".format(search_param)) collection_w.search(vectors[:default_nq], default_search_field, search_param, default_limit, default_search_exp, _async=_async, travel_timestamp=time_stamp, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_collection_multiple_times(self, nb, nq, dim, auto_id, _async): """ target: test search for multiple times method: search for multiple times expected: searched successfully """ # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, auto_id=auto_id, dim=dim)[0:4] # 2. search for multiple times vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] for i in range(search_num): log.info("test_search_collection_multiple_times: searching round %d" % (i + 1)) collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_sync_async_multiple_times(self, nb, nq, dim, auto_id): """ target: test async search after sync search case method: create connection, collection, insert, sync search and async search expected: search successfully with limit(topK) """ # 1. initialize with data collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb, auto_id=auto_id, dim=dim)[0:5] # 2. search log.info("test_search_sync_async_multiple_times: searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] for i in range(search_num): log.info("test_search_sync_async_multiple_times: searching round %d" % (i + 1)) for _async in [False, True]: collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, travel_timestamp=time_stamp, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L1) @pytest.mark.skip(reason="issue #12680") # TODO: add one more for binary vectors # @pytest.mark.parametrize("vec_fields", [[cf.gen_float_vec_field(name="test_vector1")], # [cf.gen_binary_vec_field(name="test_vector1")], # [cf.gen_binary_vec_field(), cf.gen_binary_vec_field("test_vector1")]]) def test_search_multiple_vectors_with_one_indexed(self): """ target: test indexing on one vector fields when there are multi float vec fields method: 1. create collection with multiple float vector fields 2. insert data and build index on one of float vector fields 3. load collection and search expected: load and search successfully """ vec_fields = [cf.gen_float_vec_field(name="test_vector1")] schema = cf.gen_schema_multi_vector_fields(vec_fields) collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema) df = cf.gen_dataframe_multi_vec_fields(vec_fields=vec_fields) collection_w.insert(df) assert collection_w.num_entities == ct.default_nb _index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} res, ch = collection_w.create_index(field_name="test_vector1", index_params=_index) assert ch is True collection_w.load() vectors = [[random.random() for _ in range(default_dim)] for _ in range(2)] search_params = {"metric_type": "L2", "params": {"nprobe": 16}} res_1, _ = collection_w.search(data=vectors, anns_field="test_vector1", param=search_params, limit=1) @pytest.mark.tags(CaseLabel.L1) def test_search_index_one_partition(self, nb, auto_id, _async): """ target: test search from partition method: search from one partition expected: searched successfully """ # 1. initialize with data collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb, partition_num=1, auto_id=auto_id, is_index=True)[0:5] # 2. create index default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} collection_w.create_index("float_vector", default_index) collection_w.load() # 3. search in one partition log.info("test_search_index_one_partition: searching (1000 entities) through one partition") limit = 1000 par = collection_w.partitions if limit > par[1].num_entities: limit_check = par[1].num_entities else: limit_check = limit search_params = {"metric_type": "L2", "params": {"nprobe": 128}} collection_w.search(vectors[:default_nq], default_search_field, search_params, limit, default_search_exp, [par[1].name], _async=_async, travel_timestamp=time_stamp, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids[par[0].num_entities:], "limit": limit_check, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_index_partitions(self, nb, nq, dim, auto_id, _async): """ target: test search from partitions method: search from partitions expected: searched successfully """ # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, partition_num=1, auto_id=auto_id, dim=dim, is_index=True)[0:4] vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] # 2. create index default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} collection_w.create_index("float_vector", default_index) collection_w.load() # 3. search through partitions log.info("test_search_index_partitions: searching (1000 entities) through partitions") par = collection_w.partitions log.info("test_search_index_partitions: partitions: %s" % par) limit = 1000 collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, [par[0].name, par[1].name], _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("partition_names", [["(.*)"], ["search(.*)"]]) def test_search_index_partitions_fuzzy(self, nb, nq, dim, partition_names, auto_id, _async): """ target: test search from partitions method: search from partitions with fuzzy partition name expected: searched successfully """ # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, partition_num=1, auto_id=auto_id, dim=dim)[0:4] vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] # 2. create index default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} collection_w.create_index("float_vector", default_index) # 3. search through partitions log.info("test_search_index_partitions_fuzzy: searching through partitions") limit = 1000 limit_check = limit par = collection_w.partitions if partition_names == ["search(.*)"]: insert_ids = insert_ids[par[0].num_entities:] if limit > par[1].num_entities: limit_check = par[1].num_entities collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, partition_names, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": limit_check, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_index_partition_empty(self, nq, dim, auto_id, _async): """ target: test search the empty partition method: search from the empty partition expected: searched successfully with 0 results """ # 1. initialize with data collection_w = self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim, is_index=True)[0] vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] # 2. create empty partition partition_name = "search_partition_empty" collection_w.create_partition(partition_name=partition_name, description="search partition empty") par = collection_w.partitions log.info("test_search_index_partition_empty: partitions: %s" % par) collection_w.load() # 3. create index default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} collection_w.create_index("float_vector", default_index) # 4. search the empty partition log.info("test_search_index_partition_empty: searching %s " "entities through empty partition" % default_limit) collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, [partition_name], _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": [], "limit": 0, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"]) def test_search_binary_jaccard_flat_index(self, nq, dim, auto_id, _async, index): """ target: search binary_collection, and check the result: distance method: compare the return distance value with value computed with JACCARD expected: the return distance equals to the computed value """ # 1. initialize with binary data collection_w, _, binary_raw_vector, insert_ids, time_stamp = self.init_collection_general(prefix, True, 2, is_binary=True, auto_id=auto_id, dim=dim, is_index=True)[0:5] # 2. create index default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "JACCARD"} collection_w.create_index("binary_vector", default_index) collection_w.load() # 3. compute the distance query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim) distance_0 = cf.jaccard(query_raw_vector[0], binary_raw_vector[0]) distance_1 = cf.jaccard(query_raw_vector[0], binary_raw_vector[1]) # 4. search and compare the distance search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}} res = collection_w.search(binary_vectors[:nq], "binary_vector", search_params, default_limit, "int64 >= 0", _async=_async, travel_timestamp=time_stamp, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": 2, "_async": _async})[0] if _async: res.done() res = res.result() assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"]) def test_search_binary_hamming_flat_index(self, nq, dim, auto_id, _async, index): """ target: search binary_collection, and check the result: distance method: compare the return distance value with value computed with HAMMING expected: the return distance equals to the computed value """ # 1. initialize with binary data collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2, is_binary=True, auto_id=auto_id, dim=dim, is_index=True)[0:4] # 2. create index default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "HAMMING"} collection_w.create_index("binary_vector", default_index) # 3. compute the distance collection_w.load() query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim) distance_0 = cf.hamming(query_raw_vector[0], binary_raw_vector[0]) distance_1 = cf.hamming(query_raw_vector[0], binary_raw_vector[1]) # 4. search and compare the distance search_params = {"metric_type": "HAMMING", "params": {"nprobe": 10}} res = collection_w.search(binary_vectors[:nq], "binary_vector", search_params, default_limit, "int64 >= 0", _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": 2, "_async": _async})[0] if _async: res.done() res = res.result() assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon @pytest.mark.tags(CaseLabel.L2) @pytest.mark.xfail(reason="issue 6843") @pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"]) def test_search_binary_tanimoto_flat_index(self, nq, dim, auto_id, _async, index): """ target: search binary_collection, and check the result: distance method: compare the return distance value with value computed with TANIMOTO expected: the return distance equals to the computed value """ # 1. initialize with binary data collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2, is_binary=True, auto_id=auto_id, dim=dim, is_index=True)[0:4] log.info("auto_id= %s, _async= %s" % (auto_id, _async)) # 2. create index default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "TANIMOTO"} collection_w.create_index("binary_vector", default_index) collection_w.load() # 3. compute the distance query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim) distance_0 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[0]) distance_1 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[1]) # 4. search and compare the distance search_params = {"metric_type": "TANIMOTO", "params": {"nprobe": 10}} res = collection_w.search(binary_vectors[:nq], "binary_vector", search_params, default_limit, "int64 >= 0", _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": 2, "_async": _async})[0] if _async: res.done() res = res.result() assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon @pytest.mark.tags(CaseLabel.L2) def test_search_travel_time_without_expression(self, auto_id): """ target: test search using travel time without expression method: 1. create connections,collection 2. first insert, and return with timestamp1 3. second insert, and return with timestamp2 4. search before timestamp1 and timestamp2 expected: 1 data inserted at a timestamp could not be searched before it 2 data inserted at a timestamp could be searched after it """ # 1. create connection, collection and insert nb = 10 collection_w, _, _, insert_ids_1, time_stamp_1 = \ self.init_collection_general(prefix, True, nb, auto_id=auto_id, dim=default_dim)[0:5] # 2. insert for the second time log.info("test_search_travel_time_without_expression: inserting for the second time") _, entities, _, insert_ids_2, time_stamp_2 = cf.insert_data(collection_w, nb, auto_id=auto_id, dim=default_dim, insert_offset=nb)[0:5] # 3. extract vectors inserted for the second time entities_list = np.array(entities[0]).tolist() vectors = [entities_list[i][-1] for i in range(default_nq)] # 4. search with insert timestamp1 log.info("test_search_travel_time_without_expression: searching collection %s with time_stamp_1 '%d'" % (collection_w.name, time_stamp_1)) search_res = collection_w.search(vectors, default_search_field, default_search_params, default_limit, travel_timestamp=time_stamp_1, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids_1, "limit": default_limit})[0] log.info("test_search_travel_time_without_expression: checking that data inserted " "after time_stamp_2 is not searched at time_stamp_1") for i in range(len(search_res)): assert insert_ids_2[i] not in search_res[i].ids # 5. search with insert timestamp2 log.info("test_search_travel_time_without_expression: searching collection %s with time_stamp_2 '%d'" % (collection_w.name, time_stamp_2)) search_res = collection_w.search(vectors, default_search_field, default_search_params, default_limit, travel_timestamp=time_stamp_2, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids_1 + insert_ids_2, "limit": default_limit})[0] log.info("test_search_travel_time_without_expression: checking that data inserted " "after time_stamp_2 is searched at time_stamp_2") for i in range(len(search_res)): assert insert_ids_2[i] in search_res[i].ids @pytest.mark.tags(CaseLabel.L1) @pytest.mark.parametrize("expression", cf.gen_normal_expressions()) def test_search_with_expression(self, dim, expression, _async): """ target: test search with different expressions method: test search with different expressions expected: searched successfully with correct limit(topK) """ # 1. initialize with data nb = 1000 collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb, dim=dim, is_index=True)[0:4] # filter result with expression in collection _vectors = _vectors[0] expression = expression.replace("&&", "and").replace("||", "or") filter_ids = [] for i, _id in enumerate(insert_ids): int64 = _vectors.int64[i] float = _vectors.float[i] if not expression or eval(expression): filter_ids.append(_id) # 2. create index index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}} collection_w.create_index("float_vector", index_param) collection_w.load() # 3. search with expression log.info("test_search_with_expression: searching with expression: %s" % expression) vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] search_res, _ = collection_w.search(vectors[:default_nq], default_search_field, default_search_params, nb, expression, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": min(nb, len(filter_ids)), "_async": _async}) if _async: search_res.done() search_res = search_res.result() filter_ids_set = set(filter_ids) for hits in search_res: ids = hits.ids assert set(ids).issubset(filter_ids_set) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("bool_type", [True, False, "true", "false"]) def test_search_with_expression_bool(self, dim, auto_id, _async, bool_type): """ target: test search with different bool expressions method: search with different bool expressions expected: searched successfully with correct limit(topK) """ # 1. initialize with data nb = 1000 collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb, is_all_data_type=True, auto_id=auto_id, dim=dim)[0:4] # 2. create index index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}} collection_w.create_index("float_vector", index_param) collection_w.load() # 3. filter result with expression in collection filter_ids = [] bool_type_cmp = bool_type if bool_type == "true": bool_type_cmp = True if bool_type == "false": bool_type_cmp = False for i, _id in enumerate(insert_ids): if _vectors[0][f"{default_bool_field_name}"][i] == bool_type_cmp: filter_ids.append(_id) # 4. search with different expressions expression = f"{default_bool_field_name} == {bool_type}" log.info("test_search_with_expression_bool: searching with bool expression: %s" % expression) vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] search_res, _ = collection_w.search(vectors[:default_nq], default_search_field, default_search_params, nb, expression, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": min(nb, len(filter_ids)), "_async": _async}) if _async: search_res.done() search_res = search_res.result() filter_ids_set = set(filter_ids) for hits in search_res: ids = hits.ids assert set(ids).issubset(filter_ids_set) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("expression", cf.gen_normal_expressions_field(default_float_field_name)) def test_search_with_expression_auto_id(self, dim, expression, _async): """ target: test search with different expressions method: test search with different expressions with auto id expected: searched successfully with correct limit(topK) """ # 1. initialize with data nb = 1000 collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb, auto_id=True, dim=dim, is_index=True)[0:4] # filter result with expression in collection _vectors = _vectors[0] expression = expression.replace("&&", "and").replace("||", "or") filter_ids = [] for i, _id in enumerate(insert_ids): exec(f"{default_float_field_name} = _vectors.{default_float_field_name}[i]") if not expression or eval(expression): filter_ids.append(_id) # 2. create index index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}} collection_w.create_index("float_vector", index_param) collection_w.load() # 3. search with different expressions log.info("test_search_with_expression_auto_id: searching with expression: %s" % expression) vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] search_res, _ = collection_w.search(vectors[:default_nq], default_search_field, default_search_params, nb, expression, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": min(nb, len(filter_ids)), "_async": _async}) if _async: search_res.done() search_res = search_res.result() filter_ids_set = set(filter_ids) for hits in search_res: ids = hits.ids assert set(ids).issubset(filter_ids_set) @pytest.mark.tags(CaseLabel.L2) def test_search_expression_all_data_type(self, nb, nq, dim, auto_id, _async): """ target: test search using all supported data types method: search using different supported data types expected: search success """ # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, is_all_data_type=True, auto_id=auto_id, dim=dim)[0:4] # 2. search log.info("test_search_expression_all_data_type: Searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] search_exp = "int64 >= 0 && int32 >= 0 && int16 >= 0 " \ "&& int8 >= 0 && float >= 0 && double >= 0" res = collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, search_exp, _async=_async, output_fields=[default_int64_field_name, default_float_field_name], check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit, "_async": _async})[0] if _async: res.done() res = res.result() assert len(res[0][0].entity._row_data) != 0 assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data @pytest.mark.tags(CaseLabel.L2) def test_search_with_output_fields_empty(self, nb, nq, dim, auto_id, _async): """ target: test search with output fields method: search with empty output_field expected: search success """ # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, auto_id=auto_id, dim=dim)[0:4] # 2. search log.info("test_search_with_output_fields_empty: Searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] res = collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, output_fields=[], check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit, "_async": _async})[0] if _async: res.done() res = res.result() assert len(res[0][0].entity._row_data) == 0 @pytest.mark.tags(CaseLabel.L1) def test_search_with_output_field(self, auto_id, _async): """ target: test search with output fields method: search with one output_field expected: search success """ # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, auto_id=auto_id)[0:4] # 2. search log.info("test_search_with_output_field: Searching collection %s" % collection_w.name) res = collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, output_fields=[default_int64_field_name], check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async})[0] if _async: res.done() res = res.result() assert len(res[0][0].entity._row_data) != 0 assert default_int64_field_name in res[0][0].entity._row_data @pytest.mark.tags(CaseLabel.L2) def test_search_with_output_fields(self, nb, nq, dim, auto_id, _async): """ target: test search with output fields method: search with multiple output_field expected: search success """ # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, is_all_data_type=True, auto_id=auto_id, dim=dim)[0:4] # 2. search log.info("test_search_with_output_fields: Searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] res = collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, output_fields=[default_int64_field_name, default_float_field_name], check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit, "_async": _async})[0] if _async: res.done() res = res.result() assert len(res[0][0].entity._row_data) != 0 assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("output_fields", [["*"], ["*", default_float_field_name]]) def test_search_with_output_field_wildcard(self, output_fields, auto_id, _async): """ target: test search with output fields using wildcard method: search with one output_field (wildcard) expected: search success """ # 1. initialize with data collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, auto_id=auto_id)[0:4] # 2. search log.info("test_search_with_output_field_wildcard: Searching collection %s" % collection_w.name) res = collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, output_fields=output_fields, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async})[0] if _async: res.done() res = res.result() assert len(res[0][0].entity._row_data) != 0 assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data @pytest.mark.tags(CaseLabel.L2) def test_search_multi_collections(self, nb, nq, dim, auto_id, _async): """ target: test search multi collections of L2 method: add vectors into 10 collections, and search expected: search status ok, the length of result """ self._connect() collection_num = 10 for i in range(collection_num): # 1. initialize with data log.info("test_search_multi_collections: search round %d" % (i + 1)) collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, auto_id=auto_id, dim=dim)[0:4] # 2. search vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] log.info("test_search_multi_collections: searching %s entities (nq = %s) from collection %s" % (default_limit, nq, collection_w.name)) collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_concurrent_multi_threads(self, nb, nq, dim, auto_id, _async): """ target: test concurrent search with multi-processes method: search with 10 processes, each process uses dependent connection expected: status ok and the returned vectors should be query_records """ # 1. initialize with data threads_num = 10 threads = [] collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb, auto_id=auto_id, dim=dim)[0:5] def search(collection_w): vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, travel_timestamp=time_stamp, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) # 2. search with multi-processes log.info("test_search_concurrent_multi_threads: searching with %s processes" % threads_num) for i in range(threads_num): t = threading.Thread(target=search, args=(collection_w,)) threads.append(t) t.start() time.sleep(0.2) for t in threads: t.join() @pytest.mark.skip(reason="Not running for now") @pytest.mark.tags(CaseLabel.L2) def test_search_insert_in_parallel(self): """ target: test search and insert in parallel method: One process do search while other process do insert expected: No exception """ c_name = cf.gen_unique_str(prefix) collection_w = self.init_collection_wrap(name=c_name) default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} collection_w.create_index(ct.default_float_vec_field_name, default_index) collection_w.load() def do_insert(): df = cf.gen_default_dataframe_data(10000) for i in range(11): collection_w.insert(df) log.info(f'Collection num entities is : {collection_w.num_entities}') def do_search(): while True: results, _ = collection_w.search(cf.gen_vectors(nq, ct.default_dim), default_search_field, default_search_params, default_limit, default_search_exp, timeout=30) ids = [] for res in results: ids.extend(res.ids) expr = f'{ct.default_int64_field_name} in {ids}' collection_w.query(expr, output_fields=[ct.default_int64_field_name, ct.default_float_field_name], timeout=30) p_insert = multiprocessing.Process(target=do_insert, args=()) p_search = multiprocessing.Process(target=do_search, args=(), daemon=True) p_insert.start() p_search.start() p_insert.join() @pytest.mark.tags(CaseLabel.L1) @pytest.mark.parametrize("round_decimal", [0, 1, 2, 3, 4, 5, 6]) def test_search_round_decimal(self, round_decimal): """ target: test search with valid round decimal method: search with valid round decimal expected: search successfully """ import math tmp_nb = 500 tmp_nq = 1 tmp_limit = 5 # 1. initialize with data collection_w = self.init_collection_general(prefix, True, nb=tmp_nb)[0] # 2. search log.info("test_search_round_decimal: Searching collection %s" % collection_w.name) res, _ = collection_w.search(vectors[:tmp_nq], default_search_field, default_search_params, tmp_limit) res_round, _ = collection_w.search(vectors[:tmp_nq], default_search_field, default_search_params, tmp_limit, round_decimal=round_decimal) abs_tol = pow(10, 1 - round_decimal) # log.debug(f'abs_tol: {abs_tol}') for i in range(tmp_limit): dis_expect = round(res[0][i].distance, round_decimal) dis_actual = res_round[0][i].distance # log.debug(f'actual: {dis_actual}, expect: {dis_expect}') # abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) assert math.isclose(dis_actual, dis_expect, rel_tol=0, abs_tol=abs_tol) @pytest.mark.tags(CaseLabel.L1) def test_search_with_expression_large(self, dim): """ target: test search with large expression method: test search with large expression expected: searched successfully """ # 1. initialize with data nb = 10000 collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, dim=dim, is_index=True)[0:4] # 2. create index index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}} collection_w.create_index("float_vector", index_param) collection_w.load() # 3. search with expression expression = f"0 < {default_int64_field_name} < 5001" log.info("test_search_with_expression: searching with expression: %s" % expression) nums = 5000 vectors = [[random.random() for _ in range(dim)] for _ in range(nums)] search_res, _ = collection_w.search(vectors, default_search_field, default_search_params, default_limit, expression, check_task=CheckTasks.check_search_results, check_items={ "nq": nums, "ids": insert_ids, "limit": default_limit, }) @pytest.mark.tags(CaseLabel.L1) def test_search_with_expression_large_two(self, dim): """ target: test search with large expression method: test one of the collection ids to another collection search for it, with the large expression expected: searched successfully """ # 1. initialize with data nb = 10000 collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, dim=dim, is_index=True)[0:4] # 2. create index index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}} collection_w.create_index("float_vector", index_param) collection_w.load() nums = 5000 vectors = [[random.random() for _ in range(dim)] for _ in range(nums)] vectors_id = [random.randint(0,nums)for _ in range(nums)] expression = f"{default_int64_field_name} in {vectors_id}" search_res, _ = collection_w.search(vectors, default_search_field, default_search_params, default_limit, expression, check_task=CheckTasks.check_search_results, check_items={ "nq": nums, "ids": insert_ids, "limit": default_limit, }) @pytest.mark.tags(CaseLabel.L1) def test_search_with_consistency_bounded(self, nq, dim, auto_id, _async): """ target: test search with different consistency level method: 1. create a collection 2. insert data 3. search with consistency_level is "bounded" expected: searched successfully """ limit = 1000 nb_old = 500 collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb_old, auto_id=auto_id, dim=dim)[0:4] # 2. search for original data after load vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": nb_old, "_async": _async, }) kwargs = {} consistency_level = kwargs.get("consistency_level", CONSISTENCY_BOUNDED) kwargs.update({"consistency_level": consistency_level}) nb_new = 400 _, _, _, insert_ids_new, _= cf.insert_data(collection_w, nb_new, auto_id=auto_id, dim=dim, insert_offset=nb_old) insert_ids.extend(insert_ids_new) collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, **kwargs, ) @pytest.mark.tags(CaseLabel.L1) def test_search_with_consistency_strong(self, nq, dim, auto_id, _async): """ target: test search with different consistency level method: 1. create a collection 2. insert data 3. search with consistency_level is "Strong" expected: searched successfully """ limit = 1000 nb_old = 500 collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb_old, auto_id=auto_id, dim=dim)[0:4] # 2. search for original data after load vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": nb_old, "_async": _async}) nb_new = 400 _, _, _, insert_ids_new, _ = cf.insert_data(collection_w, nb_new, auto_id=auto_id, dim=dim, insert_offset=nb_old) insert_ids.extend(insert_ids_new) kwargs = {} consistency_level = kwargs.get("consistency_level", CONSISTENCY_STRONG) kwargs.update({"consistency_level": consistency_level}) collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, **kwargs, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": nb_old + nb_new, "_async": _async}) @pytest.mark.tags(CaseLabel.L1) def test_search_with_consistency_eventually(self, nq, dim, auto_id, _async): """ target: test search with different consistency level method: 1. create a collection 2. insert data 3. search with consistency_level is "eventually" expected: searched successfully """ limit = 1000 nb_old = 500 collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb_old, auto_id=auto_id, dim=dim)[0:4] # 2. search for original data after load vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": nb_old, "_async": _async}) nb_new = 400 _, _, _, insert_ids_new, _= cf.insert_data(collection_w, nb_new, auto_id=auto_id, dim=dim, insert_offset=nb_old) insert_ids.extend(insert_ids_new) kwargs = {} consistency_level = kwargs.get("consistency_level", CONSISTENCY_EVENTUALLY) kwargs.update({"consistency_level": consistency_level}) collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, **kwargs ) @pytest.mark.tags(CaseLabel.L1) def test_search_with_consistency_session(self, nq, dim, auto_id, _async): """ target: test search with different consistency level method: 1. create a collection 2. insert data 3. search with consistency_level is "session" expected: searched successfully """ limit = 1000 nb_old = 500 collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb_old, auto_id=auto_id, dim=dim)[0:4] # 2. search for original data after load vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": nb_old, "_async": _async}) kwargs = {} consistency_level = kwargs.get("consistency_level", CONSISTENCY_SESSION) kwargs.update({"consistency_level": consistency_level}) nb_new = 400 _, _, _, insert_ids_new, _= cf.insert_data(collection_w, nb_new, auto_id=auto_id, dim=dim, insert_offset=nb_old) insert_ids.extend(insert_ids_new) collection_w.search(vectors[:nq], default_search_field, default_search_params, limit, default_search_exp, _async=_async, **kwargs, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": nb_old + nb_new, "_async": _async}) class TestSearchBase(TestcaseBase): @pytest.fixture( scope="function", params=[1, 10] ) def get_top_k(self, request): yield request.param @pytest.fixture( scope="function", params=[1, 10, 1100] ) def get_nq(self, request): yield request.param @pytest.fixture(scope="function", params=[8, 128]) def dim(self, request): yield request.param @pytest.fixture(scope="function", params=[False, True]) def auto_id(self, request): yield request.param @pytest.fixture(scope="function", params=[False, True]) def _async(self, request): yield request.param @pytest.mark.tags(CaseLabel.L2) def test_search_flat_top_k(self, get_nq): """ target: test basic search function, all the search params is correct, change top-k value method: search with the given vectors, check the result expected: the length of the result is top_k """ top_k = 16385 # max top k is 16384 nq = get_nq collection_w, data, _, insert_ids = self.init_collection_general(prefix, insert_data=True, nb=nq)[0:4] collection_w.load() if top_k <= max_top_k: res, _ = collection_w.search(vectors[:nq], default_search_field, default_search_params, top_k) assert len(res[0]) <= top_k else: collection_w.search(vectors[:nq], default_search_field, default_search_params, top_k, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "no Available QueryNode result, " "filter reason limit %s is too large," % top_k}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("index, params", zip(ct.all_index_types[:9], ct.default_index_params[:9])) def test_search_index_empty_partition(self, index, params): """ target: test basic search function, all the search params are correct, test all index params, and build method: add vectors into collection, search with the given vectors, check the result expected: the length of the result is top_k, search collection with partition tag return empty """ top_k = ct.default_top_k nq = ct.default_nq dim = ct.default_dim # 1. initialize with data collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nq, partition_num=1, dim=dim, is_index=True)[0:5] vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] # 2. create patition partition_name = "search_partition_empty" collection_w.create_partition(partition_name=partition_name, description="search partition empty") par = collection_w.partitions collection_w.load() # 3. create different index if params.get("m"): if (dim % params["m"]) != 0: params["m"] = dim // 4 if params.get("PQM"): if (dim % params["PQM"]) != 0: params["PQM"] = dim // 4 default_index = {"index_type": index, "params": params, "metric_type": "L2"} collection_w.create_index("float_vector", default_index) collection_w.load() # 4. search res, _ = collection_w.search(vectors[:nq], default_search_field, default_search_params, top_k, default_search_exp) assert len(res[0]) <= top_k collection_w.search(vectors[:nq], default_search_field, default_search_params, top_k, default_search_exp, [partition_name], check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": [], "limit": 0}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("index, params", zip(ct.all_index_types[:9], ct.default_index_params[:9])) def test_search_index_partitions(self, index, params, get_top_k): """ target: test basic search function, all the search params are correct, test all index params, and build method: search collection with the given vectors and tags, check the result expected: the length of the result is top_k """ top_k = get_top_k nq = ct.default_nq dim = ct.default_dim # 1. initialize with data collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nq, partition_num=1, dim=dim, is_index=True)[0:5] vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] # 2. create patition partition_name = ct.default_partition_name par = collection_w.partitions collection_w.load() # 3. create different index if params.get("m"): if (dim % params["m"]) != 0: params["m"] = dim // 4 if params.get("PQM"): if (dim % params["PQM"]) != 0: params["PQM"] = dim // 4 default_index = {"index_type": index, "params": params, "metric_type": "L2"} collection_w.create_index("float_vector", default_index) collection_w.load() res, _ = collection_w.search(vectors[:nq], default_search_field, ct.default_search_params, top_k, default_search_exp, [partition_name]) assert len(res[0]) <= top_k @pytest.mark.tags(CaseLabel.L2) def test_search_ip_flat(self, get_top_k): """ target: test basic search function, all the search params are correct, change top-k value method: search with the given vectors, check the result expected: the length of the result is top_k """ top_k = get_top_k nq = ct.default_nq dim = ct.default_dim # 1. initialize with data collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nq, dim=dim, is_index=True)[0:5] vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] # 2. create ip index default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "IP"} collection_w.create_index("float_vector", default_index) collection_w.load() res, _ = collection_w.search(vectors[:nq], default_search_field, ct.default_search_params, top_k, default_search_exp) assert len(res[0]) <= top_k @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("index, params", zip(ct.all_index_types[:9], ct.default_index_params[:9])) def test_search_ip_after_index(self, index, params): """ target: test basic search function, all the search params are correct, test all index params, and build method: search with the given vectors, check the result expected: the length of the result is top_k """ top_k = ct.default_top_k nq = ct.default_nq dim = ct.default_dim # 1. initialize with data collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nq, dim=dim, is_index=True)[0:5] vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] # 2. create ip index default_index = {"index_type": index, "params": params, "metric_type": "IP"} collection_w.create_index("float_vector", default_index) collection_w.load() res, _ = collection_w.search(vectors[:nq], default_search_field, ct.default_search_params, top_k, default_search_exp) assert len(res[0]) <= top_k @pytest.mark.tags(CaseLabel.L1) @pytest.mark.parametrize("dim", [2, 8, 128, 768]) @pytest.mark.parametrize("nb", [1, 2, 10, 100]) def test_search_ip_brute_force(self, nb, dim): """ target: https://github.com/milvus-io/milvus/issues/17378. Ensure the logic of IP distances won't be changed. method: search with the given vectors, check the result expected: The inner product of vector themselves should be positive. """ top_k = 1 # 1. initialize with data collection_w, insert_entities, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, is_binary=False, dim=dim)[0:5] insert_vectors = insert_entities[0][default_search_field].tolist() # 2. load collection. collection_w.load() # 3. search and then check if the distances are expected. res, _ = collection_w.search(insert_vectors[:nb], default_search_field, ct.default_search_ip_params, top_k, default_search_exp) for i, v in enumerate(insert_vectors): assert len(res[i]) == 1 ref = ip(v, v) got = res[i][0].distance assert abs(got - ref) <= epsilon @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("index, params", zip(ct.all_index_types[:9], ct.default_index_params[:9])) def test_search_ip_index_empty_partition(self, index, params): """ target: test basic search function, all the search params are correct, test all index params, and build method: add vectors into collection, search with the given vectors, check the result expected: the length of the result is top_k, search collection with partition tag return empty """ top_k = ct.default_top_k nq = ct.default_nq dim = ct.default_dim # 1. initialize with data collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nq, partition_num=1, dim=dim, is_index=True)[0:5] vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] # 2. create patition partition_name = "search_partition_empty" collection_w.create_partition(partition_name=partition_name, description="search partition empty") par = collection_w.partitions collection_w.load() # 3. create different index default_index = {"index_type": index, "params": params, "metric_type": "IP"} collection_w.create_index("float_vector", default_index) collection_w.load() # 4. search res, _ = collection_w.search(vectors[:nq], default_search_field, default_search_params, top_k, default_search_exp) assert len(res[0]) <= top_k collection_w.search(vectors[:nq], default_search_field, default_search_params, top_k, default_search_exp, [partition_name], check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": [], "limit": 0}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("index, params", zip(ct.all_index_types[:9], ct.default_index_params[:9])) def test_search_ip_index_partitions(self, index, params): """ target: test basic search function, all the search params are correct, test all index params, and build method: search collection with the given vectors and tags, check the result expected: the length of the result is top_k """ top_k = ct.default_top_k nq = ct.default_nq dim = ct.default_dim # 1. initialize with data collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nq, partition_num=1, dim=dim, is_index=True)[0:5] vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] # 2. create patition par_name = collection_w.partitions[0].name collection_w.load() # 3. create different index default_index = {"index_type": index, "params": params, "metric_type": "IP"} collection_w.create_index("float_vector", default_index) collection_w.load() # 4. search res, _ = collection_w.search(vectors[:nq], default_search_field, default_search_params, top_k, default_search_exp, [par_name]) assert len(res[0]) <= top_k @pytest.mark.tags(CaseLabel.L2) def test_search_without_connect(self): """ target: test search vectors without connection method: use disconnected instance, call search method and check if search successfully expected: raise exception """ self._connect() collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, ct.default_nq, is_index=True)[0:5] vectors = [[random.random() for _ in range(ct.default_dim)] for _ in range(nq)] collection_w.load() self.connection_wrap.remove_connection(ct.default_alias) res_list, _ = self.connection_wrap.list_connections() assert ct.default_alias not in res_list res, _ = collection_w.search(vectors[:nq], default_search_field, ct.default_search_params, ct.default_top_k, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 0, "err_msg": "'should create connect first.'"}) @pytest.mark.tags(CaseLabel.L2) # @pytest.mark.timeout(300) def test_search_concurrent_multithreads_single_connection(self, _async): """ target: test concurrent search with multi processes method: search with 10 processes, each process uses dependent connection expected: status ok and the returned vectors should be query_records """ threads_num = 10 threads = [] collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, ct.default_nb)[0:5] def search(collection_w): vectors = [[random.random() for _ in range(ct.default_dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, default_limit, default_search_exp, _async=_async, travel_timestamp=time_stamp, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) # 2. search with multi-processes log.info("test_search_concurrent_multi_threads: searching with %s processes" % threads_num) for i in range(threads_num): t = threading.Thread(target=search, args=(collection_w,)) threads.append(t) t.start() time.sleep(0.2) for t in threads: t.join() @pytest.mark.tags(CaseLabel.L2) def test_search_multi_collections(self): """ target: test search multi collections of L2 method: add vectors into 10 collections, and search expected: search status ok, the length of result """ num = 10 top_k = 10 nq = 20 for i in range(num): collection = gen_unique_str(uid + str(i)) collection_w, _, _, insert_ids, time_stamp = \ self.init_collection_general(collection, True, ct.default_nb)[0:5] assert len(insert_ids) == default_nb vectors = [[random.random() for _ in range(ct.default_dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, top_k, default_search_exp, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": top_k}) class TestSearchDSL(TestcaseBase): @pytest.mark.tags(CaseLabel.L0) def test_query_vector_only(self): """ target: test search normal scenario method: search vector only expected: search status ok, the length of result """ collection_w, _, _, insert_ids, time_stamp = \ self.init_collection_general(prefix, True, ct.default_nb)[0:5] vectors = [[random.random() for _ in range(ct.default_dim)] for _ in range(nq)] collection_w.search(vectors[:nq], default_search_field, default_search_params, ct.default_top_k, default_search_exp, check_task=CheckTasks.check_search_results, check_items={"nq": nq, "ids": insert_ids, "limit": ct.default_top_k}) class TestsearchString(TestcaseBase): """ ****************************************************************** The following cases are used to test search about string ****************************************************************** """ @pytest.fixture(scope="function", params=[default_nb, default_nb_medium]) def nb(self, request): yield request.param @pytest.fixture(scope="function", params=[2, 500]) def nq(self, request): yield request.param @pytest.fixture(scope="function", params=[8, 128]) def dim(self, request): yield request.param @pytest.fixture(scope="function", params=[False, True]) def auto_id(self, request): yield request.param @pytest.fixture(scope="function", params=[False, True]) def _async(self, request): yield request.param @pytest.mark.tags(CaseLabel.L2) def test_search_string_field_not_primary(self, auto_id, _async): """ target: test search with string expr and string field is not primary method: create collection and insert data create index and collection load collection search uses string expr in string field, string field is not primary expected: Search successfully """ # 1. initialize with data collection_w, _, _, insert_ids = \ self.init_collection_general(prefix, True, auto_id=auto_id, dim=default_dim)[0:4] # 2. search log.info("test_search_string_field_not_primary: searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] output_fields = [default_string_field_name, default_float_field_name] collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_string_exp, output_fields=output_fields, _async=_async, travel_timestamp=0, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_string_field_is_primary_true(self, dim, _async): """ target: test search with string expr and string field is primary method: create collection and insert data create index and collection load collection search uses string expr in string field ,string field is primary expected: Search successfully """ # 1. initialize with data collection_w, _, _, insert_ids = \ self.init_collection_general(prefix, True, dim=dim, primary_field=ct.default_string_field_name)[0:4] # 2. search log.info("test_search_string_field_is_primary_true: searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] output_fields = [default_string_field_name, default_float_field_name] collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_string_exp, output_fields=output_fields, _async=_async, travel_timestamp=0, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_string_mix_expr(self, dim, auto_id, _async): """ target: test search with mix string and int expr method: create collection and insert data create index and collection load collection search uses mix expr expected: Search successfully """ # 1. initialize with data collection_w, _, _, insert_ids = \ self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:4] # 2. search log.info("test_search_string_mix_expr: searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] output_fields = [default_string_field_name, default_float_field_name] collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_search_mix_exp, output_fields=output_fields, _async=_async, travel_timestamp=0, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_string_with_invalid_expr(self, auto_id): """ target: test search data method: create collection and insert data create index and collection load collection search uses invalid string expr expected: Raise exception """ # 1. initialize with data collection_w, _, _, insert_ids = \ self.init_collection_general(prefix, True, auto_id=auto_id, dim=default_dim)[0:4] # 2. search log.info("test_search_string_with_invalid_expr: searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, default_invaild_string_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "failed to create query plan: type mismatch"} ) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("expression", cf.gen_normal_string_expressions(ct.default_string_field_name)) def test_search_with_different_string_expr(self, dim, expression, _async): """ target: test search with different string expressions method: test search with different string expressions expected: searched successfully with correct limit(topK) """ # 1. initialize with data nb = 1000 collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb, dim=dim, is_index=True)[0:4] # filter result with expression in collection _vectors = _vectors[0] filter_ids = [] expression = expression.replace("&&", "and").replace("||", "or") for i, _id in enumerate(insert_ids): int64 = _vectors.int64[i] varchar = _vectors.varchar[i] if not expression or eval(expression): filter_ids.append(_id) # 2. create index index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}} collection_w.create_index("float_vector", index_param) collection_w.load() # 3. search with expression log.info("test_search_with_expression: searching with expression: %s" % expression) vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] search_res, _ = collection_w.search(vectors[:default_nq], default_search_field, default_search_params, nb, expression, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": min(nb, len(filter_ids)), "_async": _async}) if _async: search_res.done() search_res = search_res.result() filter_ids_set = set(filter_ids) for hits in search_res: ids = hits.ids assert set(ids).issubset(filter_ids_set) @pytest.mark.tags(CaseLabel.L2) def test_search_string_field_is_primary_binary(self, dim, _async): """ target: test search with string expr and string field is primary method: create collection and insert data create index and collection load collection search uses string expr in string field ,string field is primary expected: Search successfully """ # 1. initialize with binary data collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2, is_binary=True, dim=dim, is_index=True, primary_field=ct.default_string_field_name)[0:4] # 2. create index default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"} collection_w.create_index("binary_vector", default_index) collection_w.load() # 3. search with exception binary_vectors = cf.gen_binary_vectors(3000, dim)[1] search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}} output_fields = [default_string_field_name] collection_w.search(binary_vectors[:default_nq], "binary_vector", search_params, default_limit, default_search_string_exp, output_fields=output_fields, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": 2, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_string_field_binary(self, auto_id, dim, _async): """ target: test search with string expr and string field is not primary method: create an binary collection and insert data create index and collection load collection search uses string expr in string field, string field is not primary expected: Search successfully """ # 1. initialize with binary data collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2, is_binary=True, auto_id=auto_id, dim=dim, is_index=True)[0:4] # 2. create index default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"} collection_w.create_index("binary_vector", default_index) collection_w.load() # 2. search with exception binary_vectors = cf.gen_binary_vectors(3000, dim)[1] search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}} collection_w.search(binary_vectors[:default_nq], "binary_vector", search_params, default_limit, default_search_string_exp, _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": 2, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_mix_expr_with_binary(self, dim, auto_id, _async): """ target: test search with mix string and int expr method: create an binary collection and insert data create index and collection load collection search uses mix expr expected: Search successfully """ # 1. initialize with data collection_w, _, _, insert_ids = \ self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim, is_binary=True, is_index=True)[0:4] # 2. create index default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"} collection_w.create_index("binary_vector", default_index) collection_w.load() # 2. search log.info("test_search_mix_expr_with_binary: searching collection %s" % collection_w.name) binary_vectors = cf.gen_binary_vectors(3000, dim)[1] search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}} output_fields = [default_string_field_name, default_float_field_name] collection_w.search(binary_vectors[:default_nq], "binary_vector", search_params, default_limit, default_search_mix_exp, output_fields=output_fields, _async=_async, travel_timestamp=0, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_string_field_not_primary_perfix(self, auto_id, _async): """ target: test search with string expr and string field is not primary method: create collection and insert data create index and collection load collection search uses string expr in string field, string field is not primary expected: Search successfully """ # 1. initialize with data collection_w, _, _, insert_ids = \ self.init_collection_general(prefix, True, auto_id=auto_id, dim=default_dim)[0:4] index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}} collection_w.create_index("float_vector", index_param, index_name="a") index_param_two ={} collection_w.create_index("varchar", index_param_two, index_name="b") collection_w.load() # 2. search log.info("test_search_string_field_not_primary: searching collection %s" % collection_w.name) vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] output_fields = [default_float_field_name, default_string_field_name] collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, perfix_expr, output_fields=output_fields, _async=_async, travel_timestamp=0, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": 1, "_async": _async} ) @pytest.mark.tags(CaseLabel.L1) def test_search_all_index_with_compare_expr(self, _async): """ target: test delete after creating index method: 1.create collection , insert data, primary_field is string field 2.create string and float index ,delete entities, query 3.search expected: assert index and deleted id not in search result """ # create collection, insert tmp_nb, flush and load collection_w, vectors, _, insert_ids = self.init_collection_general(prefix, insert_data=True, primary_field=ct.default_string_field_name)[0:4] # create index index_params_one = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}} collection_w.create_index(ct.default_float_vec_field_name, index_params_one, index_name=index_name1) index_params_two ={} collection_w.create_index(ct.default_string_field_name, index_params=index_params_two, index_name=index_name2) assert collection_w.has_index(index_name=index_name2) collection_w.release() collection_w.load() # delete entity expr = 'float >= int64' # search with id 0 vectors vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] output_fields = [default_int64_field_name, default_float_field_name, default_string_field_name] collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, expr, output_fields=output_fields, _async=_async, travel_timestamp=0, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async} )
tester_cppk.py
#!/usr/bin/python3.6 # This script implements a regression testing the the REST API # It sends several requests in parallel threads to the TSP backend, checks responses and measures the execution timeouts # Designed by Nikita Novikov # Clover Group, Feb 21 2019 import json import time import glob import requests import threading import sys import argparse from pprint import pprint def send(idx: int, file: str, req: dict, resps: list): print(f"Request #{idx} ({file}) sent") resp = requests.post(url, json=req, headers={"Content-Type": "application/json"}) print(f"Request #{idx} received response") try: p_resp = resp.json() is_error = p_resp.get("errorCode") is not None except json.JSONDecodeError: p_resp = str(resp) is_error = True resps.append((idx, p_resp, is_error)) if __name__ == "__main__": if sys.version_info[0] != 3 or sys.version_info[1] < 6: print("This script requires Python version 3.6+") sys.exit(1) parser = argparse.ArgumentParser(description="Run requests to TSP") parser.add_argument("--host", default="127.0.0.1", help="IP address of TSP server, defaults to localhost") parser.add_argument("--port", default=8080, type=int, help="Port number, defaults to 8080") parser.add_argument("--batch-size", default=1, type=int, help="Request batch size, defaults to 1") parser.add_argument("--timeout", default=1000.0, type=float, help="Request timeout in seconds, defaults to 180") parser.add_argument("files", metavar="FILE", type=str, nargs="+", help="File name or mask") args = parser.parse_args() host = args.host port = args.port # TSP server. Localhost or production url = f'http://{host}:{port}/streamJob/from-influxdb/to-jdbc/?run_async=1' print(f"Sending to {url}") files = sum([glob.glob(p) for p in args.files], []) reqs = [(f, json.load(open(f, "r"))) for f in files] batch_size = args.batch_size timeout = args.timeout req_count = len(reqs) for start_idx in range(0, req_count, batch_size): print(f"Sending batch of requests " f"#{start_idx}--{min(req_count, start_idx + batch_size) - 1}") batch = reqs[start_idx:start_idx + batch_size] threads = [] responses = [] for idx, (f, r) in enumerate(batch): threads.append(threading.Thread( target=send, args=(start_idx + idx, f, r, responses), daemon=True)) t1 = time.time() for t in threads: t.start() # thread sync for t in threads: t.join(timeout) if t.is_alive(): print("Thread timed out") t2 = time.time() if t2 - t1 > timeout: print("WARNING: Time limit exceeded") print(f'Time: {t2 - t1:.3f}') error_indices = [(r[0], r[1]) for r in responses if r[2]] if error_indices: print("Errors occurred while processing the following requests:") for (i, e) in error_indices: print(f"Request #{i}") pprint(e) sys.exit(1) # Sleep for 5 seconds after a batch being processed time.sleep(5)
client.py
# Copyright 2019 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import namedtuple import json from pyee import ExecutorEventEmitter import time import traceback from threading import Event, Thread from websocket import (WebSocketApp, WebSocketConnectionClosedException, WebSocketException) from mycroft_bus_client.message import Message from mycroft_bus_client.util import create_echo_function import logging LOG = logging.getLogger(__name__) MessageBusClientConf = namedtuple('MessageBusClientConf', ['host', 'port', 'route', 'ssl']) class MessageBusClient: def __init__(self, host='0.0.0.0', port=8181, route='/core', ssl=False): self.config = MessageBusClientConf(host, port, route, ssl) self.emitter = ExecutorEventEmitter() self.client = self.create_client() self.retry = 5 self.connected_event = Event() self.started_running = False @staticmethod def build_url(host, port, route, ssl): return '{scheme}://{host}:{port}{route}'.format( scheme='wss' if ssl else 'ws', host=host, port=str(port), route=route) def create_client(self): url = MessageBusClient.build_url(ssl=self.config.ssl, host=self.config.host, port=self.config.port, route=self.config.route) return WebSocketApp(url, on_open=self.on_open, on_close=self.on_close, on_error=self.on_error, on_message=self.on_message) def on_open(self): LOG.info("Connected") self.connected_event.set() self.emitter.emit("open") # Restore reconnect timer to 5 seconds on sucessful connect self.retry = 5 def on_close(self): self.emitter.emit("close") def on_error(self, error): """ On error start trying to reconnect to the websocket. """ if isinstance(error, WebSocketConnectionClosedException): LOG.warning('Could not send message because connection has closed') else: LOG.exception('=== ' + repr(error) + ' ===') try: self.emitter.emit('error', error) if self.client.keep_running: self.client.close() except Exception as e: LOG.error('Exception closing websocket: ' + repr(e)) LOG.warning( "Message Bus Client will reconnect in %d seconds." % self.retry ) time.sleep(self.retry) self.retry = min(self.retry * 2, 60) try: self.emitter.emit('reconnecting') self.client = self.create_client() self.run_forever() except WebSocketException: pass def on_message(self, message): parsed_message = Message.deserialize(message) self.emitter.emit('message', message) self.emitter.emit(parsed_message.msg_type, parsed_message) def emit(self, message): if not self.connected_event.wait(10): if not self.started_running: raise ValueError('You must execute run_forever() ' 'before emitting messages') self.connected_event.wait() try: if hasattr(message, 'serialize'): self.client.send(message.serialize()) else: self.client.send(json.dumps(message.__dict__)) except WebSocketConnectionClosedException: LOG.warning('Could not send {} message because connection ' 'has been closed'.format(message.msg_type)) def wait_for_response(self, message, reply_type=None, timeout=None): """Send a message and wait for a response. Args: message (Message): message to send reply_type (str): the message type of the expected reply. Defaults to "<message.msg_type>.response". timeout: seconds to wait before timeout, defaults to 3 Returns: The received message or None if the response timed out """ response = [] def handler(message): """Receive response data.""" response.append(message) # Setup response handler self.once(reply_type or message.msg_type + '.response', handler) # Send request self.emit(message) # Wait for response start_time = time.monotonic() while len(response) == 0: time.sleep(0.2) if time.monotonic() - start_time > (timeout or 3.0): try: self.remove(reply_type, handler) except (ValueError, KeyError): # ValueError occurs on pyee 1.0.1 removing handlers # registered with once. # KeyError may theoretically occur if the event occurs as # the handler is removed pass return None return response[0] def on(self, event_name, func): self.emitter.on(event_name, func) def once(self, event_name, func): self.emitter.once(event_name, func) def remove(self, event_name, func): try: if not event_name in self.emitter._events: LOG.debug("Not able to find '"+str(event_name)+"'") self.emitter.remove_listener(event_name, func) except ValueError: LOG.warning('Failed to remove event {}: {}'.format(event_name, str(func))) for line in traceback.format_stack(): LOG.warning(line.strip()) if not event_name in self.emitter._events: LOG.debug("Not able to find '"+str(event_name)+"'") LOG.warning("Existing events: " + str(self.emitter._events)) for evt in self.emitter._events: LOG.warning(" "+str(evt)) LOG.warning(" "+str(self.emitter._events[evt])) if event_name in self.emitter._events: LOG.debug("Removing found '"+str(event_name)+"'") else: LOG.debug("Not able to find '"+str(event_name)+"'") LOG.warning('----- End dump -----') def remove_all_listeners(self, event_name): """Remove all listeners connected to event_name. Arguments: event_name: event from which to remove listeners """ if event_name is None: raise ValueError self.emitter.remove_all_listeners(event_name) def run_forever(self): self.started_running = True self.client.run_forever() def close(self): self.client.close() self.connected_event.clear() def run_in_thread(self): """Launches the run_forever in a separate daemon thread.""" t = Thread(target=self.run_forever) t.daemon = True t.start() return t def echo(): message_bus_client = MessageBusClient() def repeat_utterance(message): message.msg_type = 'speak' message_bus_client.emit(message) message_bus_client.on('message', create_echo_function(None)) message_bus_client.on('recognizer_loop:utterance', repeat_utterance) message_bus_client.run_forever() if __name__ == "__main__": echo()
player.py
# -*- coding: utf-8 -*- import subprocess import threading import os import random import logging from os.path import expanduser from sys import platform, version_info, platform from sys import exit from time import sleep import collections import json import socket try: import psutil except: pass if platform.startswith('win'): import win32pipe, win32file, pywintypes try: from urllib import unquote except: from urllib.parse import unquote ''' In case of import from win.py ''' try: from .cjkwrap import wrap except: pass ''' In case of import from win.py ''' try: from .encodings import get_encodings except: pass logger = logging.getLogger(__name__) try: # Forced testing from shutil import which def pywhich (cmd): pr = which(cmd) if pr: return pr else: return None except: # Versions prior to Python 3.3 don't have shutil.which def pywhich (cmd, mode=os.F_OK | os.X_OK, path=None): ''' Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. Note: This function was backported from the Python 3 source code. ''' # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn) # If we're given a path with a directory part, look it up directly # rather than referring to PATH directories. This includes checking # relative to the current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get('PATH', os.defpath) if not path: return None path = path.split(os.pathsep) if platform.startswith('win'): # The current directory takes precedence on Windows. if os.curdir not in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get('PATHEXT', '').split(os.pathsep) # See if the given file matches any of the expected path # extensions. This will allow us to short circuit when given # "python.exe". If it does match, only test that one, otherwise we # have to try others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None def find_vlc_on_windows(config_dir=None): PLAYER_CMD = '' for path in ('C:\\Program Files\\VideoLAN\\VLC\\vlc.exe', 'C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe' ): if os.path.exists(path): PLAYER_CMD = path break return PLAYER_CMD #result = [] #for root, dirs, files in os.walk(path): # for name in files: # if fnmatch.fnmatch(name, pattern): # result.append(os.path.join(root, name)) #return result def find_mpv_on_windows(): for a_path in ( os.path.join(os.getenv('APPDATA'), 'pyradio', 'mpv', 'mpv.exe'), os.path.join(os.getenv('APPDATA'), 'mpv', 'mpv.exe'), os.path.join(expanduser("~"), 'mpv', 'mpv.exe') ): if os.path.exists(a_path): return a_path return 'mpv' def find_mplayer_on_windows(): for a_path in ( os.path.join(os.getenv('APPDATA'), 'pyradio', 'mplayer', 'mplayer.exe'), os.path.join(os.getenv('APPDATA'), 'mplayer', 'mplayer.exe'), os.path.join(expanduser("~"), 'mplayer', 'mplayer.exe') ): if os.path.exists(a_path): return a_path return 'mplayer' def info_dict_to_list(info, fix_highlight, max_width): max_len = 0 for a_title in info.keys(): if len(a_title) > max_len: max_len = len(a_title) if version_info < (3, 0) and type(info[a_title]).__name__ != 'str': try: info[a_title] = info[a_title].encode('utf-8', 'replace') except: info[a_title] = '' info[a_title] = info[a_title].replace('_','¸') # logger.error('DE info\n{}\n\n'.format(info)) a_list = [] for n in info.keys(): a_list.extend(wrap(n.rjust(max_len, ' ') + ': |' + info[n], width=max_width, subsequent_indent=(2+max_len)*'_')) # logger.error('DE a_list\n\n{}\n\n'.format(a_list)) ''' make sure title is not alone in line ''' for a_title in ('URL:', 'site:'): for n, an_item in enumerate(a_list): if an_item.endswith(a_title): url = a_list[n+1].split('_|')[1] # merge items bar = '' if a_title.endswith('L:') else '|' a_list[n] = a_list[n] + ' ' + bar + url a_list.pop(n+1) break # logger.error('DE a_list\n\n{}\n\n'.format(a_list)) a_list[0] = a_list[0].replace('|', '') if fix_highlight: for x in fix_highlight: for n, an_item in enumerate(a_list): if x[0] in an_item: rep_name = n if x[1] in an_item: web_name = n break for n in range(rep_name + 1, web_name): a_list[n] = '|' + a_list[n] return a_list class Player(object): ''' Media player class. Playing is handled by player sub classes ''' process = None update_thread = None icy_title_prefix = 'Title: ' title_prefix = '' # Input: old user input - used to early suppress output # in case of consecutive equal messages # Volume: old volume input - used to suppress output (and firing of delay thread) # in case of consecutive equal volume messages # Title: old title input - printed by delay thread oldUserInput = {'Input': '', 'Volume': '', 'Title': ''} ''' volume percentage ''' volume = -1 delay_thread = None connection_timeout_thread = None ''' make it possible to change volume but not show it ''' show_volume = True muted = False ctrl_c_pressed = False ''' When found in station transmission, playback is on ''' _playback_token_tuple = ( 'AO: [', ) icy_tokens = () icy_audio_tokens = {} playback_is_on = False _station_encoding = 'utf-8' # used to stop mpv update thread on python3 stop_mpv_status_update_thread = False # used to stop vlc update thread on windows stop_win_vlc_status_update_thread = False # bitrate, url, audio_format etc. _icy_data = {} GET_TITLE = b'{ "command": ["get_property", "metadata"], "request_id": 100 }\n' GET_AUDIO_FORMAT = b'{ "command": ["get_property", "audio-out-params"], "request_id": 200 }\n' GET_AUDIO_CODEC = b'{ "command": ["get_property", "audio-codec"], "request_id": 300 }\n' GET_AUDIO_CODEC_NAME = b'{ "command": ["get_property", "audio-codec-name"], "request_id": 400 }\n' all_config_files = {} def __init__(self, config, outputStream, playback_timeout_counter, playback_timeout_handler, info_display_handler): self.outputStream = outputStream self._cnf = config self.config_encoding = self._cnf.default_encoding self.config_dir = self._cnf.stations_dir try: self.playback_timeout = int(self._cnf.connection_timeout_int) except ValueError: self.playback_timeout = 10 self.force_http = self._cnf.force_http self.playback_timeout_counter = playback_timeout_counter self.playback_timeout_handler = playback_timeout_handler self.info_display_handler = info_display_handler self.status_update_lock = outputStream.lock self.config_files = [] self._get_all_config_files() #if self.WIN and self.PLAYER_NAME == 'vlc': if platform.startswith('win'): ''' delete old vlc files (vlc_log.*) ''' from .del_vlc_log import RemoveWinVlcLogFiles threading.Thread(target=RemoveWinVlcLogFiles(self.config_dir)).start() def _get_all_config_files(self): ''' MPV config files ''' config_files = [] config_files = [expanduser("~") + "/.config/mpv/mpv.conf"] if platform.startswith('darwin'): config_files.append("/usr/local/etc/mpv/mpv.conf") elif platform.startswith('win'): config_files[0] = os.path.join(os.getenv('APPDATA'), "mpv", "mpv.conf") else: # linux, freebsd, etc. config_files.append("/etc/mpv/mpv.conf") self.all_config_files['mpv'] = config_files[:] ''' MPlayer config files ''' config_files = [] config_files = [expanduser("~") + "/.mplayer/config"] if platform.startswith('darwin'): config_files.append("/usr/local/etc/mplayer/mplayer.conf") elif platform.startswith('win'): if os.path.exists('C:\\mplayer\\mplayer.exe'): config_files[0] = 'C:\\mplayer\mplayer\\config' elif os.path.exists(os.path.join(os.getenv('USERPROFILE'), "mplayer", "mplayer.exe")): config_files[0] = os.path.join(os.getenv('USERPROFILE'), "mplayer", "mplayer", "config") elif os.path.exists(os.path.join(os.getenv('APPDATA'), "pyradio", "mplayer", "mplayer.exe")): config_files[0] = os.path.join(os.getenv('APPDATA'), "pyradio", "mplayer", "mplayer", "config") else: config_files = [] self.all_config_files['mplayer'] = config_files[:] @property def profile_name(self): return self._cnf.profile_name @profile_name.setter def progile_name(self, value): raise ValueError('property is read only') @property def profile_token(self): return '[' + self.profile_name + ']' @profile_token.setter def profile_token(self, value): raise ValueError('property is read only') def __del__(self): self.close() def _url_to_use(self, streamUrl): if self.force_http: return streamUrl.replace('https://', 'http://') else: return streamUrl def save_volume(self): pass def icy_data(self, a_member): ret = '' with self.status_update_lock: if self._icy_data: if a_member in self._icy_data: ret = self._icy_data[a_member] return ret def icy_data_available(self): with self.status_update_lock: l = len(self._icy_data) if l == 0: return False return True def get_info_string(self, a_station, max_width=60): guide = ( ('Reported Name', 'icy-name'), ('Website', 'icy-url'), ('Genre', 'icy-genre'), ('Bitrate', 'icy-br'), ('Audio', 'audio_format'), ('Codec Name', 'codec-name'), ('Codec', 'codec') ) enc = get_encodings() if self._station_encoding == '': this_enc = self._config_encoding else: this_enc = self._station_encoding try: this_enc_string = [x for x in enc if x[0] == this_enc][0][2] except: this_enc_string = 'Unknown' enc_to_show = '{0} ({1})'.format(this_enc, this_enc_string) info = collections.OrderedDict() info['Playlist Name'] = a_station[0] for x in guide: if x[1] in self._icy_data.keys(): info[x[0]] = self._icy_data[x[1]].strip() else: info[x[0]] = '' if x[0] == 'Bitrate': if info[x[0]]: info[x[0]] += ' kb/s' if x[0] == 'Genre': info['Encoding'] = enc_to_show if x[0].startswith('Reported'): info['Station URL'] = a_station[1].strip() info['Website'] = unquote(info['Website']).strip() a_list = [] fix_highlight = ( ('Reported ', 'Station URL:'), ('Website:', 'Genre:'), ('Genre:', 'Encoding:') ) a_list = info_dict_to_list(info, fix_highlight, max_width) if 'Codec:' not in a_list[-1]: a_list[n] = '|' + a_list[n] ret = '|' + '\n'.join(a_list).replace('Encoding: |', 'Encoding: ').replace('URL: |', 'URL: ').replace('\n', '\n|') tail = '' if 'icy-name' in self._icy_data.keys(): if a_station[0] != self._icy_data['icy-name'] and \ self._icy_data['icy-name'] and \ self._icy_data['icy-name'] != '(null)': tail = '\n\nPress |r| to rename station to |Reported Name|, or' # logger.error('DE ret\n{}\n'.format(ret)) return ret + '\n\n|Highlighted values| are user specified.\nOther values are station provided (live) data.', tail def _do_save_volume(self, config_string): if not self.config_files: if logger.isEnabledFor(logging.DEBUG): logger.debug('Volume not saved!!! (config file not found!!!)') return 'Volume not saved!!!' ret_strings = ('Volume: already saved...', 'Volume: {}% saved', 'Volume: {}% NOT saved (Error writing file)', 'Volume: NOT saved!') log_strings = ('Volume is -1. Aborting...', 'Volume is {}%. Saving...', 'Error saving profile "{}"', 'Error saving volume...') if self.volume == -1: ''' inform no change ''' if (logger.isEnabledFor(logging.DEBUG)): logger.debug(log_strings[0]) return ret_strings[0] elif self.volume == -2: if (logger.isEnabledFor(logging.DEBUG)): logger.debug(log_strings[3]) return ret_strings[3] else: ''' change volume ''' if (logger.isEnabledFor(logging.DEBUG)): logger.debug(log_strings[1].format(self.volume)) profile_found = False config_file = self.config_files[0] ret_string = ret_strings[1].format(str(self.volume)) if os.path.exists(config_file): # if platform.startswith('win'): # """ This is actually only for mplayer # which does not support profiles on Windows # """ # with open(config_file, 'r') as c_file: # config_string = c_file.read() # if "volume=" in config_string: # config_string = config_string.replace('#Volume set from pyradio\n', '') # vol = config_string.splitlines() # for i, v_string in enumerate(vol): # if v_string.startswith('volume'): # vol[i] = '#Volume set from pyradio\nvolume={}'.format(self.volume) # break # config_string = '\n'.join(vol) # else: # out = config_string + 'volume={}'.format(self.volume) # config_string = out # try: # with open(config_file, "w") as c_file: # c_file.write(config_string) # volume = self.volume # self.volume = -1 # self.PROFILE_FROM_USER = True # return ret_strings[1].format(str(volume)) # except: # if (logger.isEnabledFor(logging.DEBUG)): # logger.debug(log_strings[2].format(config_file)) # return ret_strings[2].format(str(self.volume)) # else: if self.PROFILE_FROM_USER: with open(config_file, 'r') as c_file: config_string = c_file.read() if self.profile_token in config_string: profile_found = True ''' split on self.profile_token last item has our options ''' sections = config_string.split(self.profile_token) ''' split at [ - i.e. isolate consecutive profiles first item has pyradio options ''' py_section = sections[-1].split('[') ''' split to lines in order to get '^volume=' ''' py_options = py_section[0].split('\n') ''' replace volume line ''' vol_set = False for i, opt in enumerate(py_options): if opt.startswith('volume='): py_options[i]='volume=' + str(self.volume) vol_set = True break ''' or add it if it does not exist ''' if not vol_set: py_options.append('volume=' + str(self.volume)) ''' join lines together in py_section's first item ''' py_section[0] = '\n'.join(py_options) ''' join consecutive profiles (if exist) in last item of sections ''' if len(py_section) > 1: sections[-1] = '['.join(py_section) else: sections[-1] = py_section[0] ''' finally get the string back together ''' config_string = self.profile_token.join(sections) try: with open(config_file, 'w') as c_file: c_file.write(config_string) self.volume = -1 except EnvironmentError: if (logger.isEnabledFor(logging.DEBUG)): logger.debug(log_strings[2].format(config_file)) return ret_strings[2].format(str(self.volume)) ''' no user profile or user config file does not exist ''' if not profile_found: if os.path.isdir(os.path.dirname(config_file)): if os.path.exists(config_file): new_profile_string = '\n' + config_string else: new_profile_string = self.NEW_PROFILE_STRING + config_string else: try: os.mkdir(os.path.dirname(config_file)) except OSError: if (logger.isEnabledFor(logging.DEBUG)): logger.debug(log_strings[2].format(config_file)) return ret_strings[2].format(str(self.volume)) new_profile_string = self.NEW_PROFILE_STRING + config_string try: with open(config_file, 'a') as c_file: c_file.write(new_profile_string.format(str(self.volume))) except EnvironmentError: if (logger.isEnabledFor(logging.DEBUG)): logger.debug(log_strings[2].format(config_file)) return ret_strings[2].format(str(self.volume)) self.volume = -1 self.PROFILE_FROM_USER = True return ret_string def _stop_delay_thread(self): if self.delay_thread is not None: try: self.delay_thread.cancel() except: pass self.delay_thread = None def _is_in_playback_token(self, a_string): for a_token in self._playback_token_tuple: if a_token in a_string: return True return False def updateStatus(self, *args): stop = args[0] process = args[1] stop_player = args[2] detect_if_player_exited = args[3] enable_crash_detection_function = args[4] has_error = False if (logger.isEnabledFor(logging.DEBUG)): logger.debug('updateStatus thread started.') #with lock: # self.oldUserInput['Title'] = 'Connecting to: "{}"'.format(self.name) # self.outputStream.write(msg=self.oldUserInput['Title']) ''' Force volume display even when icy title is not received ''' self.oldUserInput['Title'] = 'Playing: ' + self.name try: out = self.process.stdout while(True): subsystemOutRaw = out.readline() try: subsystemOut = subsystemOutRaw.decode(self._station_encoding, 'replace') except: subsystemOut = subsystemOutRaw.decode('utf-8', 'replace') if subsystemOut == '': break # logger.error('DE subsystemOut = "{0}"'.format(subsystemOut)) if not self._is_accepted_input(subsystemOut): continue subsystemOut = subsystemOut.strip() subsystemOut = subsystemOut.replace('\r', '').replace('\n', '') if self.oldUserInput['Input'] != subsystemOut: if (logger.isEnabledFor(logging.DEBUG)): if version_info < (3, 0): disp = subsystemOut.encode('utf-8', 'replace').strip() logger.debug('User input: {}'.format(disp)) else: logger.debug('User input: {}'.format(subsystemOut)) self.oldUserInput['Input'] = subsystemOut if self.volume_string in subsystemOut: # disable volume for mpv if self.PLAYER_NAME != 'mpv': # logger.error('***** volume') if self.oldUserInput['Volume'] != subsystemOut: self.oldUserInput['Volume'] = subsystemOut self.volume = ''.join(c for c in subsystemOut if c.isdigit()) # IMPORTANT: do this here, so that vlc actual_volume # gets updated in _format_volume_string string_to_show = self._format_volume_string(subsystemOut) + self._format_title_string(self.oldUserInput['Title']) if self.show_volume and self.oldUserInput['Title']: self.outputStream.write(msg=string_to_show, counter='') self.threadUpdateTitle() elif self._is_in_playback_token(subsystemOut): self.stop_timeout_counter_thread = True try: self.connection_timeout_thread.join() except: pass if enable_crash_detection_function: enable_crash_detection_function() if (not self.playback_is_on) and (logger.isEnabledFor(logging.INFO)): logger.info('*** updateStatus(): Start of playback detected ***') #if self.outputStream.last_written_string.startswith('Connecting to'): if self.oldUserInput['Title'] == '': new_input = 'Playing: ' + self.name else: new_input = self.oldUserInput['Title'] self.outputStream.write(msg=new_input, counter='') self.playback_is_on = True self._stop_delay_thread() if 'AO: [' in subsystemOut: with self.status_update_lock: if version_info > (3, 0): self._icy_data['audio_format'] = subsystemOut.split('] ')[1].split(' (')[0] else: self._icy_data['audio_format'] = subsystemOut.split('] ')[1].split(' (')[0].encode('utf-8') self.info_display_handler() if self.PLAYER_NAME == 'mpv' and version_info < (3, 0): for a_cmd in ( b'{ "command": ["get_property", "metadata"], "request_id": 100 }\n', self.GET_AUDIO_CODEC, self.GET_AUDIO_CODEC_NAME): response = self._send_mpv_command( a_cmd, return_response=True) if response: self._get_mpv_metadata(response, lambda: False, enable_crash_detection_function) self.info_display_handler() else: if logger.isEnabledFor(logging.INFO): logger.info('no response!!!') # logger.error('DE 3 {}'.format(self._icy_data)) elif self._is_icy_entry(subsystemOut): if not subsystemOut.endswith('Icy-Title=(null)'): if enable_crash_detection_function: enable_crash_detection_function() # logger.error('***** icy_entry: "{}"'.format(subsystemOut)) title = self._format_title_string(subsystemOut) # logger.error('DE title = "{}"'.format(title)) ok_to_display = False if not self.playback_is_on: if logger.isEnabledFor(logging.INFO): logger.info('*** updateStatus(): Start of playback detected (Icy-Title received) ***') self.playback_is_on = True ''' detect empty Icy-Title ''' title_without_prefix = title[len(self.icy_title_prefix):].strip() # logger.error('DE title_without_prefix = "{}"'.format(title_without_prefix)) if title_without_prefix: #self._stop_delay_thread() # logger.error("***** updating title") if title_without_prefix.strip() == '-': ''' Icy-Title is empty ''' if logger.isEnabledFor(logging.DEBUG): logger.debug('Icy-Title = " - ", not displaying...') else: self.oldUserInput['Title'] = title # make sure title will not pop-up while Volume value is on if self.delay_thread is None: ok_to_display = True if ok_to_display and self.playback_is_on: string_to_show = self.title_prefix + title self.outputStream.write(msg=string_to_show, counter='') else: if logger.isEnabledFor(logging.debug): logger.debug('***** Title change inhibited: ok_to_display = {0}, playbabk_is_on = {1}'.format(ok_to_display, self.playback_is_on)) else: ok_to_display = True if (logger.isEnabledFor(logging.INFO)): logger.info('Icy-Title is NOT valid') if ok_to_display and self.playback_is_on: title = 'Playing: ' + self.name self.oldUserInput['Title'] = title string_to_show = self.title_prefix + title self.outputStream.write(msg=string_to_show, counter='') #else: # if self.oldUserInput['Title'] == '': # self.oldUserInput['Title'] = 'Connecting to: "{}"'.format(self.name) # self.outputStream.write(msg=self.oldUserInput['Title'], counter='') else: for a_token in self.icy_audio_tokens.keys(): if a_token in subsystemOut: if not self.playback_is_on: if logger.isEnabledFor(logging.INFO): logger.info('*** updateStatus(): Start of playback detected (Icy audio token received) ***') self.playback_is_on = True if enable_crash_detection_function: enable_crash_detection_function() # logger.error('DE token = "{}"'.format(a_token)) # logger.error('DE icy_audio_tokens[a_token] = "{}"'.format(self.icy_audio_tokens[a_token])) a_str = subsystemOut.split(a_token) # logger.error('DE str = "{}"'.format(a_str)) with self.status_update_lock: if self.icy_audio_tokens[a_token] == 'icy-br': self._icy_data[self.icy_audio_tokens[a_token]] = a_str[1].replace('kbit/s', '') else: self._icy_data[self.icy_audio_tokens[a_token]] = a_str[1] if self.icy_audio_tokens[a_token] == 'codec': if '[' in self._icy_data['codec']: self._icy_data['codec-name'] = self._icy_data['codec'].split('] ')[0].replace('[', '') self._icy_data['codec'] = self._icy_data['codec'].split('] ')[1] if version_info < (3, 0): for an_item in self._icy_data.keys(): try: self._icy_data[an_item] = self._icy_data[an_item].encode(self._station_encoding, 'replace') except UnicodeDecodeError as e: self._icy_data[an_item] = '' if 'codec-name' in self._icy_data.keys(): self._icy_data['codec-name'] = self._icy_data['codec-name'].replace('"', '') # logger.error('DE audio data\n\n{}\n\n'.format(self._icy_data)) self.info_display_handler() except: if logger.isEnabledFor(logging.ERROR): logger.error('Error in updateStatus thread.', exc_info=True) # return ''' crash detection ''' # logger.error('detect_if_player_exited = {0}, stop = {1}'.format(detect_if_player_exited(), stop())) if not stop(): if not platform.startswith('win'): poll = process.poll() if poll is not None: if not stop(): if detect_if_player_exited(): if logger.isEnabledFor(logging.INFO): logger.info('----==== player disappeared! ====----') stop_player(from_update_thread=True) else: if logger.isEnabledFor(logging.INFO): logger.info('Crash detection is off; waiting to timeout') else: if not stop(): if detect_if_player_exited(): if logger.isEnabledFor(logging.INFO): logger.info('----==== player disappeared! ====----') stop_player(from_update_thread=True) else: if logger.isEnabledFor(logging.INFO): logger.info('Crash detection is off; waiting to timeout') if (logger.isEnabledFor(logging.INFO)): logger.info('updateStatus thread stopped.') def updateMPVStatus(self, *args): stop = args[0] process = args[1] stop_player = args[2] detect_if_player_exited = args[3] enable_crash_detection_function = args[4] if (logger.isEnabledFor(logging.DEBUG)): logger.debug('MPV updateStatus thread started.') while True: try: sock = self._connect_to_socket(self.mpvsocket) finally: if sock: break if stop(): if (logger.isEnabledFor(logging.INFO)): logger.info('MPV updateStatus thread stopped (no connection to socket).') return # Send data message = b'{ "command": ["observe_property", 1, "metadata"] }\n' try: if platform.startswith('win'): win32file.WriteFile(sock, message) else: sock.sendall(message) go_on = True except: # logger.error('DE \n\nBroken pipe\n\n') go_on = False if go_on: while True: if stop(): break try: if platform.startswith('win'): try: data = win32file.ReadFile(sock, 64*1024) except pywintypes.error as e: data = b'' else: data = sock.recvmsg(4096) a_data = self._fix_returned_data(data) # logger.error('DE Received: "{!r}"'.format(a_data)) if a_data == b'' or stop(): break if a_data: all_data = a_data.split(b'\n') for n in all_data: if self._get_mpv_metadata(n, stop, enable_crash_detection_function): self._request_mpv_info_data(sock) else: try: if stop(): break d = json.loads(n) if 'event' in d.keys(): if d['event'] == 'metadata-update': try: if platform.startswith('win'): win32file.WriteFile(sock, self.GET_TITLE) else: sock.sendall(self.GET_TITLE) except: break ret = self._set_mpv_playback_is_on(stop, enable_crash_detection_function) if not ret: break self._request_mpv_info_data(sock) self.info_display_handler() elif d['event'] == 'playback-restart': if not self.playback_is_on: ret = self._set_mpv_playback_is_on(stop, enable_crash_detection_function) if not ret: break self._request_mpv_info_data(sock) self.info_display_handler() except: pass finally: pass self._close_pipe(sock) if not stop(): ''' haven't been asked to stop ''' if detect_if_player_exited(): if logger.isEnabledFor(logging.INFO): logger.info('----==== MPV disappeared! ====----') stop_player(from_update_thread=True) else: if logger.isEnabledFor(logging.INFO): logger.info('Crash detection is off; waiting to timeout') if (logger.isEnabledFor(logging.INFO)): logger.info('MPV updateStatus thread stopped.') def _close_pipe(self, sock): if platform.startswith('win'): win32file.CloseHandle(sock) else: sock.close() def updateWinVLCStatus(self, *args): def do_crash_detection(detect_if_player_exited, stop): if self.playback_is_on: poll = process.poll() if poll is not None: if not stop(): if detect_if_player_exited(): if logger.isEnabledFor(logging.INFO): logger.info('----==== VLC disappeared! ====----') stop_player(from_update_thread=True) return True else: if logger.isEnabledFor(logging.INFO): logger.info('Crash detection is off; waiting to timeout') return False has_error = False if (logger.isEnabledFor(logging.DEBUG)): logger.debug('Win VLC updateStatus thread started.') fn = args[0] enc = args[1] stop = args[2] process = args[3] stop_player = args[4] detect_if_player_exited = args[5] enable_crash_detection_function = args[6] ''' Force volume display even when icy title is not received ''' self.oldUserInput['Title'] = 'Playing: ' + self.name # logger.error('DE ==== {0}\n{1}\n{2}'.format(fn, enc, stop)) #with lock: # self.oldUserInput['Title'] = 'Connecting to: "{}"'.format(self.name) # self.outputStream.write(msg=self.oldUserInput['Title']) go_on = False while not go_on: if stop(): break try: fp = open(fn, mode='rt', encoding=enc, errors='ignore') go_on = True except: pass try: while(True): if stop(): break subsystemOut = fp.readline() subsystemOut = subsystemOut.strip().replace(u'\ufeff', '') subsystemOut = subsystemOut.replace('\r', '').replace('\n', '') if subsystemOut == '': continue # logger.error('DE subsystemOut = "{0}"'.format(subsystemOut)) if not self._is_accepted_input(subsystemOut): continue # logger.error('DE accepted inp = "{0}"'.format(subsystemOut)) if self.oldUserInput['Input'] != subsystemOut: if stop(): break if (logger.isEnabledFor(logging.DEBUG)): if version_info < (3, 0): disp = subsystemOut.encode('utf-8', 'replace').strip() # logger.debug("User input: {}".format(disp)) else: # logger.debug("User input: {}".format(subsystemOut)) pass self.oldUserInput['Input'] = subsystemOut # logger.error('DE subsystemOut = "' + subsystemOut + '"') if self.volume_string in subsystemOut: if stop(): break # logger.error("***** volume") if self.oldUserInput['Volume'] != subsystemOut: self.oldUserInput['Volume'] = subsystemOut self.volume = ''.join(c for c in subsystemOut if c.isdigit()) # IMPORTANT: do this here, so that vlc actual_volume # gets updated in _format_volume_string string_to_show = self._format_volume_string(subsystemOut) + self._format_title_string(self.oldUserInput['Title']) if self.show_volume and self.oldUserInput['Title']: self.outputStream.write(msg=string_to_show, counter='') self.threadUpdateTitle() elif self._is_in_playback_token(subsystemOut): # logger.error('DE \n\ntoken = "' + subsystemOut + '"\n\n') if stop(): break self.stop_timeout_counter_thread = True try: self.connection_timeout_thread.join() except: pass if enable_crash_detection_function: enable_crash_detection_function() if (not self.playback_is_on) and (logger.isEnabledFor(logging.INFO)): logger.info('*** updateWinVLCStatus(): Start of playback detected ***') #if self.outputStream.last_written_string.startswith('Connecting to'): if self.oldUserInput['Title'] == '': new_input = 'Playing: ' + self.name else: new_input = self.oldUserInput['Title'] self.outputStream.write(msg=new_input, counter='') self.playback_is_on = True self._stop_delay_thread() if 'AO: [' in subsystemOut: with self.status_update_lock: if version_info > (3, 0): self._icy_data['audio_format'] = subsystemOut.split('] ')[1].split(' (')[0] else: self._icy_data['audio_format'] = subsystemOut.split('] ')[1].split(' (')[0].encode('utf-8') self.info_display_handler() # logger.error('DE 3 {}'.format(self._icy_data)) elif self._is_icy_entry(subsystemOut): if stop(): break if not self.playback_is_on: if logger.isEnabledFor(logging.INFO): logger.info('*** updateWinVLCStatus(): Start of playback detected (Icy-Title received) ***') self.stop_timeout_counter_thread = True try: self.connection_timeout_thread.join() except: pass self.playback_is_on = True self._stop_delay_thread() if enable_crash_detection_function: enable_crash_detection_function() if not subsystemOut.endswith('Icy-Title=(null)'): # logger.error("***** icy_entry") title = self._format_title_string(subsystemOut) ok_to_display = False if title[len(self.icy_title_prefix):].strip(): self.oldUserInput['Title'] = title # make sure title will not pop-up while Volume value is on if self.delay_thread is None: ok_to_display = True if ok_to_display and self.playback_is_on: string_to_show = self.title_prefix + title self.outputStream.write(msg=string_to_show, counter='') else: ok_to_display = True if (logger.isEnabledFor(logging.INFO)): logger.info('Icy-Title is NOT valid') if ok_to_display and self.playback_is_on: title = 'Playing: ' + self.name self.oldUserInput['Title'] = title string_to_show = self.title_prefix + title self.outputStream.write(msg=string_to_show, counter='') #else: # if self.oldUserInput['Title'] == '': # self.oldUserInput['Title'] = 'Connecting to: "{}"'.format(self.name) # self.outputStream.write(msg=self.oldUserInput['Title'], counter='') else: if stop(): break for a_token in self.icy_audio_tokens.keys(): if a_token in subsystemOut: if not self.playback_is_on: if logger.isEnabledFor(logging.INFO): logger.info('*** updateWinVLCStatus(): Start of playback detected (Icy audio token received) ***') self.stop_timeout_counter_thread = True try: self.connection_timeout_thread.join() except: pass self.playback_is_on = True self._stop_delay_thread() if enable_crash_detection_function: enable_crash_detection_function() # logger.error('DE token = "{}"'.format(a_token)) # logger.error('DE icy_audio_tokens[a_token] = "{}"'.format(self.icy_audio_tokens[a_token])) a_str = subsystemOut.split(a_token) # logger.error('DE str = "{}"'.format(a_str)) with self.status_update_lock: if self.icy_audio_tokens[a_token] == 'icy-br': self._icy_data[self.icy_audio_tokens[a_token]] = a_str[1].replace('kbit/s', '') else: self._icy_data[self.icy_audio_tokens[a_token]] = a_str[1] if self.icy_audio_tokens[a_token] == 'codec': if '[' in self._icy_data['codec']: self._icy_data['codec-name'] = self._icy_data['codec'].split('] ')[0].replace('[', '') self._icy_data['codec'] = self._icy_data['codec'].split('] ')[1] if version_info < (3, 0): for an_item in self._icy_data.keys(): try: self._icy_data[an_item] = self._icy_data[an_item].encode(self._station_encoding, 'replace') except UnicodeDecodeError as e: self._icy_data[an_item] = '' if 'codec-name' in self._icy_data.keys(): self._icy_data['codec-name'] = self._icy_data['codec-name'].replace('"', '') # logger.error('DE audio data\n\n{}\n\n'.format(self._icy_data)) self.info_display_handler() except: has_error = True if logger.isEnabledFor(logging.ERROR): logger.error('Error in Win VLC updateStatus thread.', exc_info=True) if has_error or not stop(): do_crash_detection(detect_if_player_exited, stop) try: fp.close() except: pass def _request_mpv_info_data(self, sock): with self.status_update_lock: ret = len(self._icy_data) if ret == 0: if platform.startswith('win'): win32file.WriteFile(sock, self.GET_TITLE) win32file.WriteFile(sock, self.GET_AUDIO_FORMAT) win32file.WriteFile(sock, self.GET_AUDIO_CODEC) win32file.WriteFile(sock, self.GET_AUDIO_CODEC_NAME) else: sock.sendall(self.GET_TITLE) sock.sendall(self.GET_AUDIO_FORMAT) sock.sendall(self.GET_AUDIO_CODEC) sock.sendall(self.GET_AUDIO_CODEC_NAME) def _get_mpv_metadata(self, *args): ''' Get MPV metadata Parameters ========== a_data (args[0] Data read from socket lock (args[1]) Thread lock stop (args[2]) function to indicate thread stopping Returns ======= True Manipulated no data (other functions must manipulate them) False Data read and manipulated, or stop condition triggered. Other functions do not have to deal with this data, of thread will terminate. Populates ========= self._icy_data Fields: icy-title : Title of song (python 3 only) icy-name : Station name icy-url : Station URL icy-genre : Station genres icy-br : Station bitrate audio_format : XXXXHx stereo/mono 1/2ch format ''' a_data = args[0] stop = args[1] enable_crash_detection_function = args[2] if b'"icy-title":"' in a_data: if version_info > (3, 0): title = a_data.split(b'"icy-title":"')[1].split(b'"}')[0] if title: if title == b'-' or title == b' - ': if logger.isEnabledFor(logging.DEBUG): logger.debug('Icy-Title = " - ", not displaying...') else: try: self.oldUserInput['Title'] = 'Title: ' + title.decode(self._station_encoding, 'replace') except: self.oldUserInput['Title'] = 'Title: ' + title.decode('utf-8', 'replace') string_to_show = self.title_prefix + self.oldUserInput['Title'] if stop(): return False self.outputStream.write(msg=string_to_show, counter='') if not self.playback_is_on: if stop(): return False return self._set_mpv_playback_is_on(stop, enable_crash_detection_function) else: if (logger.isEnabledFor(logging.INFO)): logger.info('Icy-Title is NOT valid') title = 'Playing: ' + self.name string_to_show = self.title_prefix + title if stop(): return False self.outputStream.write(msg=string_to_show, counter='') self.oldUserInput['Title'] = title # logger.info('DE a_data {}'.format(a_data)) if b'icy-br' in a_data: # logger.info('DE check {}'.format(self._icy_data)) if not 'icy-br' in self._icy_data.keys(): for icy in ('icy-name', 'icy-url', 'icy-genre', 'icy-br'): if stop(): return False if version_info < (3, 0): bytes_icy = icy else: bytes_icy = bytes(icy, encoding='utf-8') if icy in ('icy-name', 'icy-genre'): enc = self._station_encoding else: enc = 'utf-8' if bytes_icy in a_data : with self.status_update_lock: if version_info < (3, 0): try: self._icy_data[icy] = a_data.split(bytes_icy + b'":"')[1].split(b'",')[0].split(b'"}')[0].encode(enc, 'replace') except UnicodeDecodeError as e: pass else: try: self._icy_data[icy] = a_data.split(bytes_icy + b'":"')[1].split(b'",')[0].split(b'"}')[0].decode(enc) except UnicodeDecodeError as e: pass # logger.error('DE 0 {}'.format(self._icy_data)) return True elif b'request_id' in a_data and b'"error":"success"' in a_data: if b'"request_id":200' in a_data: try: d = json.loads(a_data) except: d = None if d: self.status_update_lock.acquire() try: self._icy_data['audio_format'] = '{0}Hz {1} {2}ch {3}'.format( d['data']['samplerate'], d['data']['channels'], d['data']['channel-count'], d['data']['format']) finally: self.status_update_lock.release() elif b'"request_id":300' in a_data: self.status_update_lock.acquire() try: if version_info < (3, 0): self._icy_data['codec'] = a_data.split(b'"data":"')[1].split(b'",')[0].encode('utf-8') else: self._icy_data['codec'] = a_data.split(b'"data":"')[1].split(b'",')[0].decode('utf-8') finally: self.status_update_lock.release() self.info_display_handler() elif b'"request_id":400' in a_data: self.status_update_lock.acquire() try: if version_info < (3, 0): self._icy_data['codec-name'] = a_data.split(b'"data":"')[1].split(b'",')[0].encode('utf-8') else: self._icy_data['codec-name'] = a_data.split(b'"data":"')[1].split(b'",')[0].decode('utf-8') finally: self.status_update_lock.release() # logger.error('DE 1 {}'.format(self._icy_data)) self.info_display_handler() return True else: return False def _set_mpv_playback_is_on(self, stop, enable_crash_detection_function): self.stop_timeout_counter_thread = True try: self.connection_timeout_thread.join() except: pass self.detect_if_player_exited = True if (not self.playback_is_on) and (logger.isEnabledFor(logging.INFO)): logger.info('*** _set_mpv_playback_is_on(): Start of playback detected ***') new_input = 'Playing: ' + self.name self.outputStream.write(msg=new_input, counter='') if self.oldUserInput['Title'] == '': self.oldUserInput['Input'] = new_input self.oldUserInput['Title'] = new_input self.playback_is_on = True if stop(): return False enable_crash_detection_function() return True def threadUpdateTitle(self, delay=1): if self.oldUserInput['Title'] != '': self._stop_delay_thread() try: self.delay_thread = threading.Timer(delay, self.updateTitle, [ self.outputStream, None ] ) self.delay_thread.start() except: if (logger.isEnabledFor(logging.DEBUG)): logger.debug('delay thread start failed') def updateTitle(self, *arg, **karg): self._stop_delay_thread() if arg[1]: arg[0].write(msg=arg[1]) else: arg[0].write(msg=self.title_prefix + self._format_title_string(self.oldUserInput['Title'])) def _is_icy_entry(self, a_string): for a_token in self.icy_tokens: if a_token in a_string: return True return False def _format_title_string(self, title_string): return self._title_string_format_text_tag(title_string) def _title_string_format_text_tag(self, a_string): i = a_string.find(' - text="') if i == -1: return a_string else: ret_string = a_string[:i] text_string = a_string[i+9:] final_text_string = text_string[:text_string.find('"')] if ret_string == self.icy_title_prefix + final_text_string: return ret_string else: return ret_string + ': ' + final_text_string def _format_volume_string(self, volume_string): return self._title_string_format_text_tag(volume_string) def isPlaying(self): return bool(self.process) def play(self, name, streamUrl, stop_player, detect_if_player_exited, enable_crash_detection_function=None, encoding='' ): ''' use a multimedia player to play a stream ''' self.close() self.name = name self.oldUserInput = {'Input': '', 'Volume': '', 'Title': ''} self.muted = False self.show_volume = True self.title_prefix = '' self.playback_is_on = False self.delay_thread = None # self.outputStream.write(msg='Station: "{}" - Opening connection...'.format(name), counter='') self.outputStream.write(msg='Station: ' + name + ' - Opening connection...', counter='') if logger.isEnabledFor(logging.INFO): logger.info('Selected Station: ' + name) if encoding: self._station_encoding = encoding else: self._station_encoding = self.config_encoding opts = [] isPlayList = streamUrl.split("?")[0][-3:] in ['m3u', 'pls'] opts = self._buildStartOpts(streamUrl, isPlayList) self.stop_mpv_status_update_thread = False if logger.isEnabledFor(logging.INFO): logger.info('Executing command: {}'.format(' '.join(opts))) if platform.startswith('win') and self.PLAYER_NAME == 'vlc': self.stop_win_vlc_status_update_thread = False ''' Launches vlc windowless ''' startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW self.process = subprocess.Popen(opts, shell=False, startupinfo=startupinfo) self.update_thread = threading.Thread( target=self.updateWinVLCStatus, args=( self._vlc_stdout_log_file, self.config_encoding, lambda: self.stop_win_vlc_status_update_thread, self.process, stop_player, detect_if_player_exited, enable_crash_detection_function ) ) else: if self.PLAYER_NAME == 'mpv' and version_info > (3, 0): self.process = subprocess.Popen(opts, shell=False, stdout=subprocess.DEVNULL, stdin=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.update_thread = threading.Thread( target=self.updateMPVStatus, args=(lambda: self.stop_mpv_status_update_thread, self.process, stop_player, detect_if_player_exited, enable_crash_detection_function ) ) else: self.process = subprocess.Popen( opts, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT ) self.update_thread = threading.Thread( target=self.updateStatus, args=( lambda: self.stop_mpv_status_update_thread, self.process, stop_player, detect_if_player_exited, enable_crash_detection_function ) ) self.update_thread.start() if self.PLAYER_NAME == 'vlc': self._get_volume() # start playback check timer thread self.stop_timeout_counter_thread = False if self.playback_timeout > 0: try: self.connection_timeout_thread = threading.Thread( target=self.playback_timeout_counter, args=(self.playback_timeout, self.name, lambda: self.stop_timeout_counter_thread) ) self.connection_timeout_thread.start() if (logger.isEnabledFor(logging.DEBUG)): logger.debug('playback detection thread started') except: self.connection_timeout_thread = None if (logger.isEnabledFor(logging.ERROR)): logger.error('playback detection thread failed to start') else: if logger.isEnabledFor(logging.DEBUG): logger.debug('playback detection thread not starting (timeout is 0)') if logger.isEnabledFor(logging.INFO): logger.info('----==== {} player started ====----'.format(self.PLAYER_NAME)) def _sendCommand(self, command): ''' send keystroke command to player ''' if(self.process is not None): if logger.isEnabledFor(logging.DEBUG): logger.debug('Sending Command: {}'.format(command).strip()) try: self.process.stdin.write(command.encode('utf-8', 'replace')) self.process.stdin.flush() except: if logger.isEnabledFor(logging.ERROR): logger.error('Error while sending Command: {}'.format(command).strip(), exc_info=True) def close_from_windows(self): ''' kill player instance when window console is closed ''' if self.process: self.close() self._stop() def close(self): ''' kill player instance ''' self._no_mute_on_stop_playback() ''' First close the subprocess ''' self._stop() ''' Here is fallback solution and cleanup ''' self.stop_timeout_counter_thread = True try: self.connection_timeout_thread.join() except: pass self._stop_delay_thread() if self.process is not None: self._kill_process_tree(self.process.pid) self.process.wait() self.process = None try: self.update_thread.join() finally: self.update_thread = None def _kill_process_tree(self, pid): if psutil.pid_exists(pid): parent = psutil.Process(pid) else: if logger.isEnabledFor(logging.DEBUG): logger.debug('PID {} does not exist...'.format(pid)) return try: children = parent.children(recursive=True) try: os.kill(parent.pid, 9) except: pass for child in children: try: os.kill(child.pid, 9) except: pass if logger.isEnabledFor(logging.DEBUG): logger.debug('PID {} (and its children) killed...'.format(pid)) except psutil.NoSuchProcess: pass def _killall(self, name): if name: try: # iterating through each instance of the process for line in os.popen("ps ax | grep " + name + " | grep -v grep"): fields = line.split() if name in fields[4]: # extracting Process ID from the output pid = fields[0] # terminating process # os.kill(int(pid), signal.SIGKILL) os.kill(int(pid), 9) # os.kill(int(pid), 15) except: pass def _buildStartOpts(self, streamUrl, playList): pass def toggleMute(self): ''' mute / unmute player ''' if self.PLAYER_NAME == 'mpv': self.muted = self._mute() elif self.PLAYER_NAME == 'vlc': self._mute() else: self.muted = not self.muted self._mute() if self.muted: self._stop_delay_thread() self.title_prefix = '[Muted] ' self.show_volume = False else: self.title_prefix = '' self.show_volume = True if self.oldUserInput['Title'] == '': self.outputStream.write(msg=self.title_prefix + self._format_title_string(self.oldUserInput['Input']), counter='') else: self.outputStream.write(msg=self.title_prefix + self._format_title_string(self.oldUserInput['Title']), counter='') def _mute(self): ''' to be implemented on subclasses ''' pass def _stop(self): pass def _get_volume(self): ''' get volume, if player can report it ''' pass def volumeUp(self): ''' increase volume ''' if self.muted is not True: self._volume_up() def _volume_up(self): ''' to be implemented on subclasses ''' pass def volumeDown(self): ''' decrease volume ''' if self.muted is not True: self._volume_down() def _volume_down(self): ''' to be implemented on subclasses ''' pass def _no_mute_on_stop_playback(self): ''' make sure player does not stop muted, i.e. volume=0 Currently implemented for vlc only.''' pass def _is_accepted_input(self, input_string): ''' subclasses are able to reject input messages thus limiting message procesing. By default, all messages are accepted. Currently implemented for vlc only.''' return True class MpvPlayer(Player): '''Implementation of Player object for MPV''' PLAYER_NAME = 'mpv' PLAYER_CMD = 'mpv' WIN = False if platform.startswith('win'): WIN = True if WIN: PLAYER_CMD = find_mpv_on_windows() NEW_PROFILE_STRING = 'volume=50\n\n' if pywhich(PLAYER_CMD): executable_found = True else: executable_found = False if executable_found: ''' items of this tuple are considered icy-title and get displayed after first icy-title is received ''' icy_tokens = ('icy-title: ', ) icy_audio_tokens = {} ''' USE_PROFILE -1 : not checked yet 0 : do not use 1 : use profile ''' USE_PROFILE = -1 ''' True if profile comes from ~/.config/mpv/mpv.conf ''' PROFILE_FROM_USER = False ''' String to denote volume change ''' volume_string = 'Volume: ' if platform.startswith('win'): mpvsocket = r'\\.\pipe\mpvsocket.{}'.format(os.getpid()) else: mpvsocket = '/tmp/mpvsocket.{}'.format(os.getpid()) if logger.isEnabledFor(logging.DEBUG): logger.debug('mpv socket is "{}"'.format(self.mpvsocket)) if os.path.exists(mpvsocket): os.system('rm ' + mpvsocket + ' 2>/dev/null'); commands = { 'volume_up': b'{ "command": ["cycle", "volume", "up"], "request_id": 1000 }\n', 'volume_down': b'{ "command": ["cycle", "volume", "down"], "request_id": 1001 }\n', 'mute': b'{ "command": ["cycle", "mute"], "request_id": 1002 }\n', 'pause': b'{ "command": ["pause"], "request_id": 1003 }\n', 'quit': b'{ "command": ["quit"], "request_id": 1004}\n', } def __init__(self, config, outputStream, playback_timeout_counter, playback_timeout_handler, info_display_handler): config.PLAYER_NAME = 'mpv' super(MpvPlayer, self).__init__( config, outputStream, playback_timeout_counter, playback_timeout_handler, info_display_handler ) self.config_files = self.all_config_files['mpv'] def save_volume(self): ''' Saving Volume in Windows does not work; Profiles not supported... ''' if int(self.volume) > 999: self.volume = -2 return self._do_save_volume(self.profile_token + '\nvolume={}\n') def _configHasProfile(self): ''' Checks if mpv config has [pyradio] entry / profile. Profile example: [pyradio] volume-max=300 volume=50''' self.PROFILE_FROM_USER = False for i, config_file in enumerate(self.config_files): if os.path.exists(config_file): with open(config_file) as f: config_string = f.read() if self.profile_token in config_string: if i == 0: self.PROFILE_FROM_USER = True return 1 ''' profile not found in config create a default profile ''' try: with open(self.config_files[0], 'a') as f: f.write('\n[{}]\n'.format(self.profile_name)) f.write(self.NEW_PROFILE_STRING) self.PROFILE_FROM_USER = True return 1 except: return 0 def _buildStartOpts(self, streamUrl, playList=False): ''' Builds the options to pass to mpv subprocess.''' ''' Test for newer MPV versions as it supports different IPC flags. ''' p = subprocess.Popen([self.PLAYER_CMD, '--no-video', '--input-ipc-server'], stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=False) out = p.communicate() if 'not found' not in str(out[0]): if logger.isEnabledFor(logging.DEBUG): logger.debug('--input-ipc-server is supported.') newerMpv = 1 else: if logger.isEnabledFor(logging.DEBUG): logger.debug('--input-ipc-server is not supported.') newerMpv = 0 if playList: if newerMpv: opts = [self.PLAYER_CMD, '--no-video', '--quiet', '--playlist=' + self._url_to_use(streamUrl), '--input-ipc-server=' + self.mpvsocket] else: opts = [self.PLAYER_CMD, '--no-video', '--quiet', '--playlist=' + self._url_to_use(streamUrl), '--input-unix-socket=' + self.mpvsocket] else: if newerMpv: opts = [self.PLAYER_CMD, '--no-video', '--quiet', self._url_to_use(streamUrl), '--input-ipc-server=' + self.mpvsocket] else: opts = [self.PLAYER_CMD, '--no-video', '--quiet', self._url_to_use(streamUrl), '--input-unix-socket=' + self.mpvsocket] ''' this will set the profile too ''' params = [] if self._cnf.command_line_params: params = self._cnf.command_line_params.split(' ') ''' Do I have user profile in config? If so, can I use it? ''' if self.USE_PROFILE == -1: self.USE_PROFILE = self._configHasProfile() if self.USE_PROFILE == 1: opts.append('--profile=' + self.profile_name) if (logger.isEnabledFor(logging.INFO)): logger.info('Using profile: "[{}]"'.format(self.profile_name)) else: if (logger.isEnabledFor(logging.INFO)): if self.USE_PROFILE == 0: logger.info('Profile "[{}]" not found in config file!!!'.format(self.profile_name)) else: logger.info('No usable profile found') ''' add command line parameters ''' if params: for a_param in params: opts.append(a_param) return opts def _fix_returned_data(self, data): if isinstance(data, tuple): if 'int' in str(type(data[0])): a_data = data[1] else: a_data = data[0] else: a_data = data return a_data def _mute(self): ''' mute mpv ''' ret = self._send_mpv_command('mute') while not ret: ret = self._send_mpv_command('mute') return self._get_mute_status() def _get_mute_status(self): while True: sock = self._connect_to_socket(self.mpvsocket) try: if platform.startswith('win'): win32file.WriteFile(sock, b'{ "command": ["get_property", "mute"], "request_id": 600 }\n') else: sock.sendall(b'{ "command": ["get_property", "mute"], "request_id": 600 }\n') except: self._close_pipe(sock) return # wait for response try: if platform.startswith('win'): try: data = win32file.ReadFile(sock, 64*1024) except pywintypes.error as e: data = b'' else: if version_info < (3, 0): data = sock.recv(4096) else: data = sock.recvmsg(4096) a_data = self._fix_returned_data(data) # logger.error('DE Received: "{!r}"'.format(a_data)) if a_data: all_data = a_data.split(b'\n') for n in all_data: try: d = json.loads(n) if d['error'] == 'success': if isinstance(d['data'], bool): self._close_pipe(sock) return d['data'] except: pass finally: pass self._close_pipe(sock) def pause(self): ''' pause streaming (if possible) ''' self._send_mpv_command('pause') def _stop(self): ''' kill mpv instance ''' self.stop_mpv_status_update_thread = True self._send_mpv_command('quit') if not platform.startswith('win'): os.system('rm ' + self.mpvsocket + ' 2>/dev/null'); self._icy_data = {} def _volume_up(self): ''' increase mpv's volume ''' self._send_mpv_command('volume_up') self._display_mpv_volume_value() def _volume_down(self): ''' decrease mpv's volume ''' self._send_mpv_command('volume_down') self._display_mpv_volume_value() def _format_title_string(self, title_string): ''' format mpv's title ''' return self._title_string_format_text_tag(title_string.replace(self.icy_tokens[0], self.icy_title_prefix)) def _format_volume_string(self, volume_string): ''' format mpv's volume ''' return '[' + volume_string[volume_string.find(self.volume_string):].replace('ume', '')+'] ' def _connect_to_socket(self, server_address): if platform.startswith('win'): count = 0 # logger.error('\n\n_connect_to_socket: {}\n\n'.format(server_address)) try: handle = win32file.CreateFile( server_address, win32file.GENERIC_READ | win32file.GENERIC_WRITE, 0, None, win32file.OPEN_EXISTING, 0, None ) res = win32pipe.SetNamedPipeHandleState(handle, win32pipe.PIPE_READMODE_MESSAGE, None, None) return handle except pywintypes.error as e: return None else: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.connect(server_address) return sock except: self._close_pipe(sock) return None def _send_mpv_command(self, a_command, return_response=False): ''' Send a command to MPV Parameters ========= a_command The command to send. return_response if True, return a string, otherwise return a boolean Returns ======= If return_response is False (default), returns True, if the operation was a success or False if it failed. If return_response if True, return the response we get after issuing the command ('' if failed). ''' #while True: # sock = self._connect_to_socket(self.mpvsocket) # if sock: # break # sleep(.25) sock = self._connect_to_socket(self.mpvsocket) if sock is None: if return_response: return '' else: return False # Send data try: if platform.startswith('win'): if a_command in self.commands.keys(): win32file.WriteFile(sock, self.commands[a_command]) else: win32file.WriteFile(sock, a_command) else: if a_command in self.commands.keys(): sock.sendall(self.commands[a_command]) else: sock.sendall(a_command) except: self._close_pipe(sock) if return_response: return '' else: return False # read the response if platform.startswith('win'): try: data = win32file.ReadFile(sock, 64*1024) except pywintypes.error as e: data = b'' else: try: if version_info < (3, 0): data = sock.recv(4096) else: data = sock.recvmsg(4096) except sock.error as e: data = '' # logger.error('DE data = {}'.format(data)) #sock.colse() #return False # logger.error('DE data = "{}"'.format(data)) self._close_pipe(sock) if return_response: return data else: return True def _display_mpv_volume_value(self): ''' Display volume for MPV Currently working with python 2 and 3 Eventually will be used for python 2 only Python 2 cannot correctly read icy-title from the socket (unicode issue), so it has to read it from stdout. ''' #if version_info > (3, 0): # return vol = 0 while True: sock = self._connect_to_socket(self.mpvsocket) if sock: break sleep(.25) # Send data message = b'{ "command": ["get_property", "volume"] }\n' try: if platform.startswith('win'): win32file.WriteFile(sock, message) else: sock.sendall(message) except: self._close_pipe(sock) return # wait for response got_it = True while got_it: try: if platform.startswith('win'): try: data = win32file.ReadFile(sock, 64*1024) except pywintypes.error as e: data = b'' else: if version_info < (3, 0): data = sock.recv(4096) else: data = sock.recvmsg(4096) # logger.error('DE Received: "{!r}"'.format(a_data)) a_data = self._fix_returned_data(data) if a_data == b'': break if data: all_data = a_data.split(b'\n') for n in all_data: try: d = json.loads(n) if d['error'] == 'success': try: vol = int(d['data']) got_it = False break except: pass except: pass finally: pass self._close_pipe(sock) if self.oldUserInput['Title']: info_string = self._format_title_string(self.oldUserInput['Title']) else: info_string = self._format_title_string(self.oldUserInput['Input']) string_to_show = self._format_volume_string('Volume: ' + str(vol) + '%') + info_string self.outputStream.write(msg=string_to_show, counter='') self.threadUpdateTitle() self.volume = vol class MpPlayer(Player): '''Implementation of Player object for MPlayer''' PLAYER_NAME = 'mplayer' PLAYER_CMD = 'mplayer' WIN = False if platform.startswith('win'): WIN = True if WIN: PLAYER_CMD = find_mplayer_on_windows() NEW_PROFILE_STRING = 'softvol=1\nsoftvol-max=300\nvolstep=1\nvolume=50\n\n' if pywhich(PLAYER_CMD): executable_found = True else: executable_found = False if executable_found: ''' items of this tuple are considered icy-title and get displayed after first icy-title is received ''' icy_tokens = ('ICY Info:', ) # 'audio-data' comes from playback start icy_audio_tokens = { 'Name : ': 'icy-name', 'Genre : ': 'icy-genre', 'Website: ': 'icy-url', 'Bitrate: ': 'icy-br', 'Opening audio decoder: ': 'codec', } ''' USE_PROFILE -1 : not checked yet 0 : do not use 1 : use profile ''' USE_PROFILE = -1 ''' True if profile comes from ~/.mplayer/config ''' PROFILE_FROM_USER = False ''' String to denote volume change ''' volume_string = 'Volume: ' def __init__(self, config, outputStream, playback_timeout_counter, playback_timeout_handler, info_display_handler): config.PLAYER_NAME = 'mplayer' super(MpPlayer, self).__init__( config, outputStream, playback_timeout_counter, playback_timeout_handler, info_display_handler ) self.config_files = self.all_config_files['mplayer'] def save_volume(self): if platform.startswith('win'): return self._do_save_volume('volume={}\r\n') return 0 return self._do_save_volume(self.profile_token + '\nvolstep=1\nvolume={}\n') def _configHasProfile(self): ''' Checks if mplayer config has [pyradio] entry / profile. Profile example: [pyradio] volstep=2 volume=28 ''' self.PROFILE_FROM_USER = False #if platform.startswith('win'): # ''' Existing mplayer Windows implementations # do not support profiles # ''' # return 0 for i, config_file in enumerate(self.config_files): if os.path.exists(config_file): with open(config_file) as f: config_string = f.read() if self.profile_token in config_string: if i == 0: self.PROFILE_FROM_USER = True return 1 ''' profile not found in config create a default profile ''' try: with open(self.config_files[0], 'a') as f: f.write('\n[{}]\n'.format(self.profile_name)) f.write(self.NEW_PROFILE_STRING) self.PROFILE_FROM_USER = True return 1 except: return 0 def _buildStartOpts(self, streamUrl, playList=False): ''' Builds the options to pass to mplayer subprocess.''' opts = [self.PLAYER_CMD, '-vo', 'null', '-quiet'] ''' this will set the profile too ''' params = [] if self._cnf.command_line_params: params = self._cnf.command_line_params.split(' ') ''' Do I have user profile in config? If so, can I use it? ''' if self.USE_PROFILE == -1: self.USE_PROFILE = self._configHasProfile() if self.USE_PROFILE == 1: opts.append('-profile') opts.append(self.profile_name) if (logger.isEnabledFor(logging.INFO)): logger.info('Using profile: "[{}]"'.format(self.profile_name)) else: if (logger.isEnabledFor(logging.INFO)): if self.USE_PROFILE == 0: logger.info('Profile "[{}]" not found in config file!!!'.format(self.profile_name)) else: logger.info('No usable profile found') if playList: opts.append('-playlist') opts.append(self._url_to_use(streamUrl)) ''' add command line parameters ''' if params: for a_param in params: opts.append(a_param) return opts def _mute(self): ''' mute mplayer ''' self._sendCommand('m') def pause(self): ''' pause streaming (if possible) ''' self._sendCommand('p') def _stop(self): ''' kill mplayer instance ''' self.stop_mpv_status_update_thread = True self._sendCommand('q') self._icy_data = {} def _volume_up(self): ''' increase mplayer's volume ''' self._sendCommand('*') def _volume_down(self): ''' decrease mplayer's volume ''' self._sendCommand('/') def _format_title_string(self, title_string): ''' format mplayer's title ''' if "StreamTitle='" in title_string: tmp = title_string[title_string.find("StreamTitle='"):].replace("StreamTitle='", self.icy_title_prefix) ret_string = tmp[:tmp.find("';")] else: ret_string = title_string if '"artist":"' in ret_string: ''' work on format: ICY Info: START_SONG='{"artist":"Clelia Cafiero","title":"M. Mussorgsky-Quadri di un'esposizione"}'; Fund on "ClassicaViva Web Radio: Classical" ''' ret_string = self.icy_title_prefix + ret_string[ret_string.find('"artist":')+10:].replace('","title":"', ' - ').replace('"}\';', '') return self._title_string_format_text_tag(ret_string) def _format_volume_string(self, volume_string): ''' format mplayer's volume ''' return '[' + volume_string[volume_string.find(self.volume_string):].replace(' %','%').replace('ume', '')+'] ' class VlcPlayer(Player): '''Implementation of Player for VLC''' PLAYER_NAME = "vlc" WIN = False if platform.startswith('win'): WIN = True if WIN: # TODO: search and finde vlc.exe # PLAYER_CMD = "C:\\Program Files\\VideoLAN\\VLC\\vlc.exe" PLAYER_CMD = find_vlc_on_windows() if PLAYER_CMD: executable_found = True else: executable_found = False else: PLAYER_CMD = "cvlc" if pywhich(PLAYER_CMD): executable_found = True else: executable_found = False if executable_found: ''' items of this tuple are considered icy-title and get displayed after first icy-title is received ''' icy_tokens = ('New Icy-Title=', ) icy_audio_tokens = { 'Icy-Name:': 'icy-name', 'Icy-Genre:': 'icy-genre', 'icy-name:': 'icy-name', 'icy-genre:': 'icy-genre', 'icy-url:': 'icy-url', 'icy-br:': 'icy-br', 'format:': 'audio_format', 'using audio decoder module ': 'codec-name', } muted = False ''' String to denote volume change ''' volume_string = '( audio volume: ' ''' vlc reports volume in values 0..512 ''' actual_volume = -1 max_volume = 512 ''' When found in station transmission, playback is on ''' if platform.startswith('win'): _playback_token_tuple = ( # ' successfully opened', # 'main audio ', # 'Content-Type: audio', ' Segment #', 'using audio decoder module' ) else: _playback_token_tuple = ( # 'Content-Type: audio', ' Segment #', 'using audio filter module', 'using audio decoder module' ) ''' Windows only variables ''' _vlc_stdout_log_file = '' _port = None win_show_vlc_volume_function = None def __init__(self, config, outputStream, playback_timeout_counter, playback_timeout_handler, info_display_handler): config.PLAYER_NAME = 'vlc' super(VlcPlayer, self).__init__( config, outputStream, playback_timeout_counter, playback_timeout_handler, info_display_handler ) # self.config_files = self.all_config_files['vlc'] def save_volume(self): pass def _buildStartOpts(self, streamUrl, playList=False): ''' Builds the options to pass to vlc subprocess.''' #opts = [self.PLAYER_CMD, "-Irc", "--quiet", streamUrl] if self.WIN: ''' Get a random port (44000-44999) Create a log file for vlc and make sure it is unique and it is created beforehand ''' random.seed() ok_to_go_on = False while True: logger.error('DE getting port for {}'.format(self.config_dir)) self._port = random.randint(44000, 44999) self._vlc_stdout_log_file = os.path.join(self.config_dir, 'vlc_log.' + str(self._port)) if os.path.exists(self._vlc_stdout_log_file): ''' another instance running? ''' logger.error('DE file exists: "{}"'.format(self._vlc_stdout_log_file)) continue try: with open(self._vlc_stdout_log_file, 'w') as f: ok_to_go_on = True except: logger.error('DE file not opened: "{}"'.format(self._vlc_stdout_log_file)) continue if ok_to_go_on: break opts = [self.PLAYER_CMD, '-Irc', '--rc-host', '127.0.0.1:' + str(self._port), '--file-logging', '--logmode', 'text', '--log-verbose', '4', '--logfile', self._vlc_stdout_log_file, '-vv', self._url_to_use(streamUrl)] if logger.isEnabledFor(logging.INFO): logger.info('vlc listening on 127.0.0.1:{}'.format(self._port)) logger.info('vlc log file: "{}"'.format(self._vlc_stdout_log_file)) else: opts = [self.PLAYER_CMD, '-Irc', '-vv', self._url_to_use(streamUrl)] ''' take care of command line parameters ''' params = [] if self._cnf.command_line_params: params = self._cnf.command_line_params.split(' ') ''' add command line parameters ''' if params: for a_param in params: opts.append(a_param) return opts def _mute(self): ''' mute vlc ''' logger.error('DE vlc_mute(): muted = {}'.format(self.muted)) if self.muted: if self.WIN: self._win_set_volume(self._unmuted_volume) else: self._sendCommand('volume {}\n'.format(self.actual_volume)) if logger.isEnabledFor(logging.DEBUG): logger.debug('VLC unmuted: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume))) self.muted = False else: if self.actual_volume == -1: self._get_volume() if self.WIN: self._win_mute() else: self._sendCommand('volume 0\n') self.muted = True if logger.isEnabledFor(logging.DEBUG): logger.debug('VLC muted: 0 (0%)') def pause(self): ''' pause streaming (if possible) ''' if self.WIN: self._win_pause() else: self._sendCommand('stop\n') def _stop(self): ''' kill vlc instance ''' self.stop_win_vlc_status_update_thread = True if logger.isEnabledFor(logging.INFO): logger.info('setting self.stop_win_vlc_status_update_thread = True') if self.ctrl_c_pressed: return if self.WIN: if self.process: logger.error('>>>> Terminating process') self._req('quit') threading.Thread(target=self._remove_vlc_stdout_log_file, args=()).start() else: self._sendCommand('shutdown\n') self._icy_data = {} def _remove_vlc_stdout_log_file(self): file_to_remove = self._vlc_stdout_log_file if file_to_remove: while os.path.exists(file_to_remove): try: os.remove(file_to_remove) if logger.isEnabledFor(logging.DEBUG): logger.debug('vlc log file removed: "' + file_to_remove + "'") except: pass # logger.error('DE Failed {}'.format(count)) def _volume_up(self): ''' increase vlc's volume ''' if self.WIN: self._win_volup() self._win_show_vlc_volume() else: self._sendCommand('volup\n') def _volume_down(self): ''' decrease vlc's volume ''' if self.WIN: self._win_voldown() self._win_show_vlc_volume() else: self._sendCommand('voldown\n') def _format_volume_string(self, volume_string=None): ''' format vlc's volume ''' if not self.WIN: dec_sep = '.' if '.' in volume_string else ',' self.actual_volume = int(volume_string.split(self.volume_string)[1].split(dec_sep)[0].split()[0]) return '[Vol: {}%] '.format(int(100 * self.actual_volume / self.max_volume)) def _format_title_string(self, title_string): ''' format vlc's title ''' sp = title_string.split(self.icy_tokens[0]) if sp[0] == title_string: ret_string = title_string else: ret_string = self.icy_title_prefix + sp[1] return self._title_string_format_text_tag(ret_string) def _is_accepted_input(self, input_string): ''' vlc input filtering ''' ret = False if self.WIN: ''' adding _playback_token_tuple contents here otherwise they may not be handled at all... ''' accept_filter = (self.volume_string, 'error', 'debug: ', 'format: ', 'using: ', 'Content-Type', 'main audio', 'Segment #', 'icy-', 'Icy-' ) else: accept_filter = (self.volume_string, 'error', 'http stream debug: ', 'format: ', ': using', 'icy-', 'Icy-', ) reject_filter = () for n in accept_filter: if n in input_string: ret = True break if ret: for n in reject_filter: if n in input_string: ret = False break return ret def _get_volume(self): ''' get vlc's actual_volume''' self.show_volume = False if self.WIN: self._win_get_volume() else: self._sendCommand('volume\n') self.show_volume = True def _no_mute_on_stop_playback(self): ''' make sure vlc does not stop muted ''' if self.ctrl_c_pressed: return if self.isPlaying(): if self.actual_volume == -1: self._get_volume() if self.actual_volume == -1: self.actual_volume = 0 if self.actual_volume == 0: self.actual_volume = int(self.max_volume*0.25) if self.WIN: self._win_set_volume(self.actual_volume) else: self._sendCommand('volume {}\n'.format(self.actual_volume)) if logger.isEnabledFor(logging.DEBUG): logger.debug('Unmuting VLC on exit: {} (25%)'.format(self.actual_volume)) elif self.muted: if self.actual_volume > 0: if self.WIN: self._win_set_volume(self.actual_volume) else: self._sendCommand('volume {}\n'.format(self.actual_volume)) if logger.isEnabledFor(logging.DEBUG): logger.debug('VLC volume restored on exit: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume))) self.show_volume = True ''' WINDOWS PART ''' def _req(self, msg, ret_function=None, full=True): response = '' try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: # Connect to server and send data sock.settimeout(0.7) sock.connect(('127.0.0.1', self._port)) response = '' received = '' sock.sendall(bytes(msg + '\n', 'utf-8')) if msg != 'quit': try: while (True): received = (sock.recv(4096)).decode() response = response + received if full: if response.count('\r\n') > 1: sock.close() break else: if response.count('\r\n') > 0: sock.close() break except: response = response + received sock.close() except: pass if msg == 'quit': self.process.terminate() self.process = None if ret_function: ret_function(response) return response def _thrededreq(self, msg, ret_function=None): threading.Thread(target=self._req, args=(msg,ret_function)).start() def _win_show_vlc_volume(self): #if self.win_show_vlc_volume_function: self._win_get_volume() pvol = int((self.actual_volume + 1) / self.max_volume * 100 * 2) if pvol > 0: avol = '[Vol: {}%] '.format(pvol) if self.show_volume and self.oldUserInput['Title']: self.outputStream.write(msg=avol + self.oldUserInput['Title'], counter='') self.threadUpdateTitle() def _win_get_volume(self): self._thrededreq('volume', self._get_volume_response) def _get_volume_response(self, msg): parts = msg.split('\r\n') for n in parts: if 'volume' in n: vol = n.split(': ')[-1].replace(' )', '') for n in ('.', ','): ind = vol.find(n) if ind > -1: vol = vol[:ind] break try: self.actual_volume = int(vol) except ValueError: logger.error('DE _get_volume_response: ValueError: vol = {}'.format(vol)) return logger.error('DE _get_volume_response: vol = {}'.format(vol)) break if self.actual_volume == 0: self.muted = True else: self.muted = False #self.print_response(vol) def _win_volup(self): self._thrededreq('volup 1') def _win_voldown(self): self._thrededreq('voldown 1') def _win_set_volume(self, vol): ivol = int(vol) self._thrededreq('volume ' + str(ivol)) self.actual_volume = ivol def _win_mute(self): self._win_get_volume() self._unmuted_volume = self.actual_volume self._thrededreq('volume 0') self.actual_volume = 0 self.muted = True def _win_pause(self): self._thrededreq('pause') def _win_is_playing(self): self._thrededreq('is_playing', self._win_get_playing_state) def _win_get_playing_state(self, msg): parts = msg.split('\r\n') rep = False for n in parts: if n == '1' or 'play state:' in n: rep = True break #self.print_response(rep) def probePlayer(requested_player=''): ''' Probes the multimedia players which are available on the host system. ''' ret_player = None if logger.isEnabledFor(logging.INFO): logger.info('Probing available multimedia players...') implementedPlayers = Player.__subclasses__() if logger.isEnabledFor(logging.INFO): logger.info('Implemented players: ' + ', '.join([player.PLAYER_NAME for player in implementedPlayers])) if requested_player: req = requested_player.split(',') for r_player in req: if r_player == 'cvlc': r_player = 'vlc' for player in implementedPlayers: if player.PLAYER_NAME == r_player: ret_player = check_player(player) if ret_player is not None: return ret_player if ret_player is None: if logger.isEnabledFor(logging.INFO): logger.info('Requested player "{}" not supported'.format(r_player)) else: for player in implementedPlayers: ret_player = check_player(player) if ret_player is not None: break return ret_player def check_player(a_player): try: p = subprocess.Popen([a_player.PLAYER_CMD, '--help'], stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=False) p.terminate() if logger.isEnabledFor(logging.INFO): logger.info('{} supported.'.format(str(a_player))) return a_player except OSError: if logger.isEnabledFor(logging.DEBUG): logger.debug('{} not supported.'.format(str(a_player))) return None
bonjournal.py
import sys import os from datetime import date from datetime import datetime import glob import shutil from threading import Thread fg_colors = ["\033[30m","\033[31m","\033[32m","\033[33m","\033[34m","\033[35m","\033[36m","\033[37m"] fg_color = fg_colors[7]; bg_colors = ["\033[40m","\033[41m","\033[42m","\033[43m","\033[44m","\033[45m","\033[46m","\033[47m"] bg_color = bg_colors[0]; #global syntax prompt = "$ " response = "> " key_input = "< " #get text editor if len(sys.argv) < 2: text_editor="notepad" else: text_editor=sys.argv[1] #get file explorer if len(sys.argv) < 3: file_browser="explorer" else: file_browser=sys.argv[2] #get clear command if file_browser=="explorer": clear_command="cls" else: clear_command="clear" #set up log dir log_path = os.path.dirname(os.path.abspath(__file__))+"/logs" if not os.path.exists(log_path): os.mkdir(log_path) #set up journal index index_path = log_path+"/index.bji" if not os.path.exists(index_path): open(index_path,'a').close() #prime screen os.system(clear_command) #print header print(fg_color+"welcome to BonJournal") print(response+"press enter for a list of commands.") def showHelp(): print(response+"exit - exits BonJournal") print(response+"clear - clears the screen") print(response+"list - print a list of your journals") print(response+"create - create a new journal") print(response+"destroy - destroy an existing journal") print(response+"show - show journals in explorer") print(response+"write - write a new journal entry") print(response+"read - read latest entries from journal") print(response+"key - search for keyword within a journal") def listJournals(): with open(index_path, 'r') as f: journals = f.read().split('\n'); for journ in journals: parts = journ.split('|') name = parts[0]; if name != "": bg = int(parts[1]) fg = int(parts[2]) num_files = len(os.listdir(log_path+'/'+name)) if num_files>0: num_files -= 1 print(bg_color+fg_color+response+bg_colors[bg]+fg_colors[fg]+name+bg_color+fg_color+" ("+str(num_files)+")"); print(bg_color+fg_color+response+"-end of list-") def createJournal(): name_valid=False while not name_valid: print(response+"Enter \033[1mname\033[22m of new journal") name = raw_input(prompt) if not os.path.exists(log_path+'/'+name): name_valid=True else: print(response+bg_colors[1]+fg_colors[7]+"err:"+bg_color+fg_color+" Journal "+name+" already exists.") print(response+"Select the \033[1mbackground color\033[22m for your new journal"); for col in range(len(bg_colors)): print(bg_colors[col]+response+str(col)+bg_color) colorB = int(raw_input(bg_color+key_input)) print(response+"Select the \033[1mforeground color\033[22m") for col in range(len(fg_colors)): print(fg_colors[col]+response+str(col)) colorF = int(raw_input(fg_color+key_input)) with open(index_path, 'a') as f: f.write(name+"|"+str(colorB)+"|"+str(colorF)+'\n') os.mkdir(log_path+'/'+name) print(response+"Journal "+bg_colors[colorB]+fg_colors[colorF]+name+fg_color+bg_color+" has been created.") def destroyJournal(name): journPath = log_path+"/"+name if os.path.exists(journPath): shutil.rmtree(journPath); index="" found=False with open(index_path, 'r') as f: journs = f.readlines(); for line in journs: if line.split('|')[0] != name: index+=line else: found=True if found: os.remove(index_path) with open(index_path, 'w') as f: f.write(index) print(response+bg_colors[2]+fg_colors[7]+"success:"+bg_color+fg_color+" Journal "+name+" has been destroyed") else: print(response+bg_colors[1]+fg_colors[7]+"err:"+bg_color+fg_color+" Journal "+name+" could not be found") def show(): if file_browser == "explorer": os.system(file_browser + " "+log_path.replace('/','\\')) else: os.system(file_browser + " "+log_path) def getColors(name): with open(index_path, 'r') as f: journs = f.readlines() for journ in journs: parts = journ.split('|') if parts[0]==name: return (parts[1]+'|'+parts[2]) return "7|0" def writeJournal(name): journPath = log_path+"/"+name if not os.path.exists(journPath): print(response+bg_colors[1]+fg_colors[7]+"err:"+bg_color+fg_color+" Journal "+name+" could not be found") return #determine the next filename key_path = journPath+"/keys.bjk" if not os.path.exists(key_path): open(key_path, 'w').close() file_index = "0" with open(key_path, 'r') as f: lines = f.readlines() if len(lines) > 0: file_index=str(int(lines[len(lines)-1].split('|')[0])+1) #open the file file_path = journPath+"/"+file_index+".bj" open(file_path, 'w').close() t=Thread(target = lambda: os.system(text_editor + " " + file_path)) t.start() #collect keywords colors = getColors(name).split('|') print(response+"turning a new leaf in "+bg_colors[int(colors[0])]+fg_colors[int(colors[1])]+name+bg_color+fg_color) for i in range(3): print(response+".") print(response+"enter \033[1msearch keys\033[22m for entry followed by \033[1mdone\033[22m") key="foo" line=file_index while key != "done" and key != "": key=raw_input(key_input) if key != "done" and key != "": line += ("|"+key) with open(key_path, 'a') as f: f.write(line+'\n') print(response+"Ok. Search keys have been saved...") def readJournal(name): journPath = log_path+"/"+name if not os.path.exists(journPath): print(response+bg_colors[1]+fg_colors[7]+"err:"+bg_color+fg_color+" Journal "+name+" could not be found") return #get list of all .bj's in the journPath files=sorted(glob.glob(journPath+"/*.bj"),key=lambda name: int(filter(str.isdigit,name))) for file in files: file = file.replace('\\','/') #print(file) colors = getColors(name).split('|') print(response+"Opening "+bg_colors[int(colors[0])]+fg_colors[int(colors[1])]+name+bg_color+fg_color+" to latest entry"); index=len(files)-1 if(index<0): print(response+"Nothing here... Write some journals first, and then come back to read them") return dispJournal(files[index],colors,None) cmd = "foo" while cmd != "l" and cmd != "": if index==0: prev="" else: prev="J for previous" if index==len(files)-1: nxt="" else: nxt="K for next" print(response+bg_colors[7]+fg_colors[4]+prev+", "+nxt+", L to leave"+fg_color+bg_color); cmd = raw_input(key_input) if cmd == "j": index-=1; if(index<0): index=0; print(response+"No older entries"); continue dispJournal(files[index],colors,None) elif cmd == "k": index += 1; if index >= len(files): index = len(files)-1; print(response+"No newer entries"); continue dispJournal(files[index],colors,None) def dispJournal(path,colors,key): os.system(clear_command) with open(path, 'r') as f: datestr = datetime.fromtimestamp(os.path.getmtime(path)).strftime('%m/%d/%Y %H:%M'); date = "Date: "+datestr+"\n"; copy = f.read(); if key != None: copy = copy.replace(key,bg_colors[7-int(colors[0])]+fg_colors[7-int(colors[1])]+key+bg_colors[int(colors[0])]+fg_colors[int(colors[1])]) copy = copy.replace(key.lower(),bg_colors[7-int(colors[0])]+fg_colors[7-int(colors[1])]+key+bg_colors[int(colors[0])]+fg_colors[int(colors[1])]) print(bg_colors[int(colors[0])]+fg_colors[int(colors[1])]+date+copy+fg_color+bg_color) def keySearch(keyword, name): journPath = log_path+"/"+name if not os.path.exists(journPath): print(response+bg_colors[1]+fg_colors[7]+"err:"+bg_color+fg_color+" Journal "+name+" could not be found") return #determine the next filename key_path = journPath+"/keys.bjk" if not os.path.exists(key_path): open(key_path, 'w').close() file_results = [] with open(key_path, 'r') as f: lines = f.readlines() for line in lines: parts = line.split('|') for part in parts: if part.lower().rstrip() == keyword.lower(): file_results.append(journPath+'/'+parts[0]+'.bj') print(response+str(len(file_results))+" result(s) found"); for res in file_results: print(res) if len(file_results) == 0: return #let user cycle through results colors = getColors(name).split('|') index = len(file_results)-1 dispJournal(file_results[index],colors,keyword) cmd = "foo" while cmd != "l" and cmd != "": if index==0: prev="" else: prev="J for previous" if index==len(file_results)-1: nxt="" else: nxt="K for next" print(response+bg_colors[7]+fg_colors[4]+prev+", "+nxt+", L to leave"+fg_color+bg_color); cmd = raw_input(key_input) if cmd == "j": index-=1; if(index<0): index=0; print(response+"No older entries"); continue dispJournal(file_results[index],colors,keyword) elif cmd == "k": index += 1; if index >= len(file_results): index = len(file_results)-1; print(response+"No newer entries"); continue dispJournal(file_results[index],colors,keyword) #main function="" while function != "close" and function != "exit": #input prompt function = raw_input(bg_color+fg_color+prompt) parts = function.split(" ") #determine command if function == "" or parts[0] == 'help' or parts[0] == '?': showHelp() elif function == "list": listJournals() elif function == "create": createJournal() elif parts[0] == "destroy": if len(parts) == 1 or parts[1]=="": print(response+bg_colors[1]+fg_colors[7]+"err:"+bg_color+fg_color+" usage $ destroy <journal_name>") else: destroyJournal(parts[1]) elif function == "show": show(); elif parts[0] == "write": if len(parts) == 1: print(response+bg_colors[1]+fg_colors[7]+"err:"+bg_color+fg_color+" usage $ write <journal_name>") else: writeJournal(parts[1]) elif parts[0] == "read": if len(parts) == 1: print(response+bg_colors[1]+fg_colors[7]+"err:"+bg_color+fg_color+" usage $ read <journal_name>") else: readJournal(parts[1]) elif function == "clear": os.system(clear_command) elif parts[0] == "key": if len(parts) < 3 or parts[1]=="" or parts[2]=="": print(response+bg_colors[1]+fg_colors[7]+"err:"+bg_color+fg_color+" usage $ key <keyword> <journal_name>") else: keySearch(parts[1],parts[2]) #revert to my default theme #os.system('color 0a') print("\033[0m") os.system(clear_command)
waitingbar.py
import sys import threading import time from itertools import cycle class WaitingBar(object): ''' This class prints a fancy waiting bar with Greek chars and spins. It uses a thread to keep printing the bar while the main program runs Usage: THE_BAR = WaitingBar('Your Message Here') # Do something slow here (...) THE_BAR.stop() copyright phoemur - 2016 ''' def __init__(self, message='[*] Wait until loading is complete...'): self.MESSAGE = ' ' + str(message) self.CYCLES = ['-', '-', '\\', '\\', '|', '|', '/', '/', '-', '-', '\\', '\\', '|', '|', '/', '/'] self.intab = u'abcdefghijklmnopqrstuvwxyzáàãâéèẽêíìîĩóòôõúùũûçABCDEFGHIJKLMNOPQRSTUVWXYZÁÀÃÂÉÈẼÊÍÌÎĨÓÒÔÕÚÙŨÛÇ' self.outab = u'αβ¢ΔεϝγηιφκλμνΩπσϼΣτυϞωχψζααααεεεειιιιΩΩΩΩυυυυ¢αβ¢ΔεϝγηιφκλμνΩπσϼΣτυϞωχψζααααεεεειιιιΩΩΩΩυυυυ¢' self.TABLE = {x: y for x, y in zip(self.intab, self.outab)} self.event = threading.Event() self.waiting_bar = threading.Thread(target=self.start, args=(self.event,)) self.waiting_bar.start() def start(self, e): for index in cycle(range(len(self.MESSAGE))): if e.is_set(): break if not self.MESSAGE[index].isalpha(): continue for c in self.CYCLES: buff = list(self.MESSAGE) buff.append(c) try: if sys.stdout.encoding.upper() == 'UTF-8': buff[index] = self.TABLE[buff[index]] else: buff[index] = buff[index].swapcase() except KeyError: pass sys.stdout.write(''.join(buff)) time.sleep(0.05) sys.stdout.write('\r') sys.stdout.flush() def stop(self): self.event.set() self.waiting_bar.join() sys.stdout.write(self.MESSAGE + ' \n') if __name__ == '__main__': ''' A simple example to demonstrate the class in action ''' # Start the bar THE_BAR = WaitingBar('[*] Calculating useless stuff...') # Do something slow import math from pprint import pprint a_list = {a: b for a, b in zip(range(1, 41), map(math.factorial, range(1, 41)))} time.sleep(20) # Stop the bar and print result THE_BAR.stop() pprint(a_list)
Main.py
''' Main entry function for the overall python based server. This will load in individual pipe sub-servers and run their threads. Initial version just runs a test server. ''' ''' Note on security: Loading arbitrary python code can be unsafe. As a light protection, the pipe server will only load modules that are part of extensions that have been given explicit permission to run python. Permissions will be held in a json file, holding the extension id (from content.xml) and its permission state (generally true). A special exception will be made for the modding api's id, so it can load without permission set up. The permission file will be generated if it doesn't already exist, but otherwise is left untouched to avoid overwriting user settings. The general idea is that, if some random extension added a python plugin to be loaded which may be unsafe, by default it will be rejected until the user of that extension gives it explicit permission. TODO: change permissions to be folder name based instead of id based. TODO: maybe use multiprocessing instead of threading. TODO: think of a safe, scalable way to handle restarting threads, particularly subthreads that a user server thread may have started, which might get orphaned when that thread function exceptions out on pipe closure. (Currently pipe servers are responsible for restarting their own subthreads.) TODO: rethink server restart behavior; perhaps they should not auto-restart, but instead be fully killed when the x4 pipe closes, and then only restarted when x4 MD api requests the restart. In this way, mods can change their python side code on the fly, reload their save, and the new code would get loaded. (The md api would need to re-announce servers whenever the game or ui reloads, as well as whenever the server resets.) (Perhaps this is less robust in some way?) (Manual effort needed to clean out the imported packages, similar to what is done in some gui code for rerunning scripts.) Overall, it is probably reasonably easy for developers to just shut down this host server and restart it, if they want to update their server code; x4 side should automatically reconnect. temp copy of test args: -t -x "C:\Steam\steamapps\common\X4 Foundations" -m "extensions\sn_measure_perf\python\Measure_Perf.py" ''' # Manually list the version for now, since packed exe won't have # access to the change_log. version = '1.2' # Setup include path to this package. import sys import json from pathlib import Path from collections import defaultdict import argparse import time # To support packages cross-referencing each other, set up this # top level as a package, findable on the sys path. # Extra 'frozen' stuff is to support pyinstaller generated exes. # Note: # Running from python, home_path is X4_Projects (or whatever the parent # folder to this package is. # Running from exe, home_path is the folder with the exe itself. # In either case, main_path will be to Main.py or the exe. if getattr(sys, 'frozen', False): # Note: _MEIPASS gets the directory the packed exe unpacked into, # eg. in appdata/temp. Need 'executable' for the original exe path. home_path = Path(sys.executable).parent main_path = home_path else: home_path = Path(__file__).resolve().parents[1] main_path = Path(__file__).resolve().parent if str(home_path) not in sys.path: sys.path.append(str(home_path)) #from X4_Python_Pipe_Server.Servers import Test1 #from X4_Python_Pipe_Server.Servers import Send_Keys from X4_Python_Pipe_Server.Classes import Server_Thread from X4_Python_Pipe_Server.Classes import Pipe_Server, Pipe_Client from X4_Python_Pipe_Server.Classes import Client_Garbage_Collected import win32api import winerror import win32file import win32pipe import threading import traceback # Note: in other projects importlib.machinery could be used directly, # but appears to be failing when pyinstalling this package, so do # a more directly import of machinery. from importlib import machinery # Flag to use during development, for extra exception throws. developer = False # Use a python test client, instead of needing x4 open. # Note: putting breakpoints on tested modules requires opening them from # their extension folder path, not their git repo path that was symlinked over. test_python_client = 0 # Name of the host pipe. pipe_name = 'x4_python_host' # Loaded permissions from pipe_permissions.json. permissions = None # Permissions can be placed alongside the exe or Main.py. # Or maybe in current working directory? # Go with the exe/main directory. permissions_path = main_path / 'permissions.json' def Main(): ''' Launch the server. This generally does not return. ''' # Set up command line arguments. argparser = argparse.ArgumentParser( description = ('Host pipe server for X4 interprocess communication.' ' This will launch extension python modules that are' ' registered by the game through the pipe api.'), ) argparser.add_argument( '-p', '--permissions-path', default = None, help = 'Optional path to a permissions.json file specifying which' ' extensions are allowed to load modules. If not given, the' ' main server directory is used.' ) argparser.add_argument( '-t', '--test', action='store_true', help = 'Puts this server into test mode. Requires following args:' ' --x4-path, --test_module' ) argparser.add_argument( '-x', '--x4-path', default = None, metavar = 'Path', help = 'Path to the X4 installation folder. Only needed in test mode.') argparser.add_argument( '-m', '--module', default = None, help = 'Path to a specific python module to run in test mode,' ' relative to the x4-path.' ) #argparser.add_argument( # '-v', '--verbose', # action='store_true', # help = 'Print extra messages.' ) args = argparser.parse_args(sys.argv[1:]) if args.permissions_path: global permissions_path permissions_path = Path.cwd() / (Path(args.permissions_path).resolve()) # The directory should exist. if not permissions_path.parent.exists(): print('Error: permissions_path directory not found') return # Check if running in test mode. if args.test: global test_python_client test_python_client = True if not args.x4_path: print('Error: x4_path required in test mode') return if not args.module: print('Error: module required in test mode') return # Make x4 path absolute. args.x4_path = Path.cwd() / (Path(args.x4_path).resolve()) if not args.x4_path.exists(): print('Error: x4_path invalid: {}'.format(args.x4_path)) return # Keep module path relative. args.module = Path(args.module) module_path = args.x4_path / args.module if not module_path.exists(): print('Error: module invalid: {}'.format(module_path)) return # List of directly launched threads. threads = [] # List of relative path strings received from x4, to python server # modules that have been loaded before. module_relpaths = [] print('X4 Python Pipe Server v{}\n'.format(version)) # Load permissions, if the permissions file found. Load_Permissions() # Put this into a loop, to keep rebooting the server when the # pipe gets disconnected (eg. x4 loaded a save). shutdown = False while not shutdown: # Start up the baseline control pipe, listening for particular errors. # TODO: maybe reuse Server_Thread somehow, though don't actually # want a separate thread for this. try: pipe = Pipe_Server(pipe_name) # For python testing, kick off a client thread. if test_python_client: # Set up the reader in another thread. reader_thread = threading.Thread(target = Pipe_Client_Test, args = (args,)) reader_thread.start() # Wait for client. pipe.Connect() # Clear out any old x4 path; the game may have shut down and # relaunched from a different location. x4_path = None # Listen to runtime messages, announcing relative paths to # python modules to load from extensions. while 1: message = pipe.Read() print('Received: ' + message) # A ping will be sent first, testing the pipe from x4 side. if message == 'ping': pass # Handle restart requests similar to pipe disconnect exceptions. elif message == 'restart': raise Reset_Requested() elif message.startswith('package.path:'): message = message.replace('package.path:','') # Parse into the base x4 path. # Example return: # ".\?.lua;C:\Steam\steamapps\common\X4 Foundations\lua\?.lua;C:\Steam\steamapps\common\X4 Foundations\lua\?\init.lua;" # Split and convert to proper Paths. paths = [Path(x) for x in message.split(';')] # Search for a wanted path. x4_path = None for path in paths: # Different ways to possibly do this. # This approach will iterate through parents to find the # "lua" folder, then get its parent. # (The folder should not be assumed to match the default # x4 installation folder name, since a user may have # changed it if running multiple installs.) test_path = path # Loop while more parents are present. while test_path.parents: # Check the parent. test_path = test_path.parent if test_path.stem == "lua": x4_path = test_path.parent break # Stop looping once an x4_path found. if x4_path: break elif message.startswith('modules:'): message = message.replace('modules:','') # If no x4_path yet seen, ignore. if not x4_path: continue # Break apart the modules. Semicolon separated, with an # ending separator. # This list will end with an empty entry, even if the message # has no paths, so can throw away the last list item. module_paths = [Path(x) for x in message.split(';')[:-1]] # Handle each path. for module_path in module_paths: # If this module has already been processed, ignore it. # This will happen when x4 reloads saves and such, and all # md scripts re-announce their server files. if module_path in module_relpaths: print('Module was already loaded: {}'.format(module_path)) continue # Put together the full path. full_path = x4_path / module_path # Check if this module is part of an extension # that has permission to run, and skip if not. if not Check_Permission(x4_path, module_path): continue # Record this path as seen. module_relpaths.append(module_path) # Import the module. module = Import(full_path) # Pull out the main() function. main = getattr(module, 'main', None) # Start the thread. if main != None: thread = Server_Thread(module.main, test = test_python_client) threads.append(thread) else: print('Module lacks "main()": {}'.format(module_path)) except (win32api.error, Client_Garbage_Collected) as ex: # win32api.error exceptions have the fields: # winerror : integer error code (eg. 109) # funcname : Name of function that errored, eg. 'ReadFile' # strerror : String description of error # If just in testing mode, assume the tests completed and # shut down. if test_python_client: print('Stopping test.') shutdown = True elif isinstance(ex, Client_Garbage_Collected): print('Pipe client garbage collected, restarting.') # If another host was already running, there will have been # an error when trying to set up the pipe. elif ex.funcname == 'CreateNamedPipe': print('Pipe creation error. Is another instance already running?') shutdown = True # If X4 was reloaded, this results in a ERROR_BROKEN_PIPE error # (assuming x4 lua was wrestled into closing its pipe properly # on garbage collection). # Update: as of x4 3.0 or so, garbage collection started crashing # the game, so this error is only expected when x4 shuts down # entirely. elif ex.winerror == winerror.ERROR_BROKEN_PIPE: # Keep running the server. print('Pipe client disconnected.') # This should now loop back and restart the pipe, if # shutdown wasn't set. if not shutdown: print('Restarting host.') except Exception as ex: # Any other exception, reraise for now. raise ex finally: # Close the pipe if open. # This will error if the exit condition was a CreateNamedPipe # error, so just wrap it for safety. try: pipe.Close() except Exception as ex: pass # Let subthreads keep running; they internally loop. #if threads: # print('Shutting down subthreads.') ## Close all subthreads. #for thread in threads: # thread.Close() ## Wait for closures to complete. #for thread in threads: # thread.Join() #base_thread = Server_Thread(Control) # TODO: dynamically load in server modules from extensions. # Need to check which extensions are enabled/disabled, and determine # what the protocol will be for file naming. #-Removed; old test code for hardcoded server paths. ## Start all server threads. ## Just a test for now. #threads = [ # Server_Thread(Test1.main), # Server_Thread(Send_Keys.main), #] ## Wait for them all to finish. #for thread in threads: # thread.Join() return def Import(full_path): ''' Code for importing a module, broken out for convenience. ''' try: # Attempt to load/run the module. module = machinery.SourceFileLoader( # Provide the name sys will use for this module. # Use the basename to get rid of any path, and prefix # to ensure the name is unique (don't want to collide # with other loaded modules). 'user_module_' + full_path.name.replace(' ','_'), # Just grab the name; it should be found on included paths. str(full_path) ).load_module() print('Imported {}'.format(full_path)) except Exception as ex: module = None # Make a nice message, to prevent a full stack trace being # dropped on the user. print('Failed to import {}'.format(full_path)) print('Exception of type "{}" encountered.\n'.format( type(ex).__name__)) ex_text = str(ex) if ex_text: print(ex_text) # In dev mode, print the exception traceback. if developer: print(traceback.format_exc()) # Raise it again, just in case vs can helpfully break # at the problem point. (This won't help with the gui up.) raise ex #else: # Print('Enable developer mode for exception stack trace.') return module def Load_Permissions(): ''' Loads the permissions json file, or creates one if needed. ''' global permissions if permissions_path.exists(): try: with open(permissions_path, 'r') as file: permissions = json.load(file) print('Loaded permissions file at {}\n'.format(permissions_path)) except Exception as ex: print('Error when loading permissions file') # If nothing was loaded, write (or overwrite) the default permissions file. if permissions == None: permissions = { 'instructions': 'Set which extensions are allowed to load modules,' ' based on extension id (in content.xml).', # Workshop id of the mod support apis. 'ws_2042901274' : True, } print('Generating default permissions file at {}\n'.format(permissions_path)) with open(permissions_path, 'w') as file: json.dump(permissions, file, indent = 2) return def Check_Permission(x4_path, module_path): ''' Check if the module on the given path has permission to run. Return True if permitted, else False with a printed message. ''' try: # Find the extension's root folder. if not module_path.as_posix().startswith('extensions/'): raise Exception('Module is not in extensions') # The module_path should start with 'extensions', so find the # second folder. # (Note: pathlib is dump and doesn't allow negative indices on parents.) ext_dir = x4_path / [x for x in module_path.parents][-3] # Load the content.xml. Can do xml or raw text; text should # be good enough for now (avoid adding lxml to the exe). content_text = (ext_dir / 'content.xml').read_text() # The first id="..." should be the extension id. content_id = content_text.split('id="')[1].split('"')[0] # Check its permission. if permissions.get(content_id) == True: return True print('\n'.join([ '', 'Rejecting module due to missing permission:', ' content_id: {}'.format(content_id), ' path: {}'.format(x4_path / module_path), 'To allow loading, enable this content_id in {}'.format(permissions_path), '', ])) return False except Exception as ex: print('\n'.join([ '', 'Rejecting module due to error during extension id permission check:', ' path: {}'.format(x4_path / module_path), '{}: {}'.format(type(ex).__name__, ex if str(ex) else 'Unspecified'), '', ])) return False def Pipe_Client_Test(args): ''' Function to mimic the x4 client. ''' pipe = Pipe_Client(pipe_name) if not args.x4_path or not args.x4_path.exists(): raise Exception('Test error: invalid x4 path') # Example lua package path. #package_path = r".\?.lua;C:\Steam\steamapps\common\X4 Foundations\lua\?.lua;C:\Steam\steamapps\common\X4 Foundations\lua\?\init.lua;" package_path = r".\?.lua;{0}\lua\?.lua;{0}\lua\?\init.lua;".format(args.x4_path) # Announce the package path. pipe.Write("package.path:" + package_path) # Announce module relative paths. # Just one test module for now. # Give as_posix style. modules = [ args.module.as_posix(), ] # Separated with ';', end with a ';'. message = ';'.join(modules) + ';' pipe.Write("modules:" + message) # Keep-alive blocking read. pipe.Read() return if __name__ == '__main__': Main()
meter_device_gateway.py
''' ================================================================================================================================================================ meter_device_gateway.py ===================== Implements state management of and integration with a Meter Gateway, which is connected to the machine running this code (server) using UART serial. The reference design for the Meter Gateway is a 'PiGate' board that mounts onto a a Raspberry Pi, connecting through its GPIO pins, and communicates with Meter Nodes (the 'Horus' board and its sensor(s)) using a RFM69 packet radio. The Meter Gateway is essentially a conduit between the implementation here and the Meter Node(s) connected to it. It maintains minimal state on these nodes and passes all metering data through for ingest by the server. While this implementation includes everything needed to represent and control the gateway, it doesn't include higher-order control and supervision of the metering system (or sub-system if part of a broader solution). Nor does it encapsulate the nodes connected to it - that is a largely stylistic decision in favour of a flatter rather than nested structuring. It does, however implement the messaging needed to control a particular node (which is called through more abstracted in the implementation of the node class). The implementation assumes (for now) that all meter nodes are metering the same thing, with the same unit of measure. As the implementation is only being used for metering electricity consumption, this is likely to weaken the attempts made at abstraction through the implementation. ================================================================================================================================================================ ''' # ============================================================================================================================================================== # IMPORTS # ============================================================================================================================================================== import threading from enum import Enum from time import sleep import arrow from meterman import gateway_messages as gmsg import serial from meterman import app_base as base # ============================================================================================================================================================== # GLOBAL CONSTANTS # ============================================================================================================================================================== DEF_SERIAL_PORT = '/dev/ttyAMA0' DEF_SERIAL_BAUD = 115200 A_UNKNOWN = 'UNKNOWN' PURGE_RX_MSG_AGE_SECS = 600 # Device Statuses class DeviceStatus(Enum): INIT = 0 UP = 1 DARK = 2 # ============================================================================================================================================================== # GLOBAL VARS # ============================================================================================================================================================== # ============================================================================================================================================================== # IMPLEMENTATION # ============================================================================================================================================================== class MeterDeviceGateway: def __init__(self, meter_device_manager, network_id, gateway_id, label='Gateway', serial_port=DEF_SERIAL_PORT, serial_baud=DEF_SERIAL_BAUD, log_file=base.log_file): self.meter_device_manager = meter_device_manager self.label = label self.state = DeviceStatus.INIT self.last_seen = A_UNKNOWN self.when_booted = A_UNKNOWN self.free_ram = A_UNKNOWN self.last_time_drift = A_UNKNOWN self.log_level = A_UNKNOWN self.encrypt_key = A_UNKNOWN self.network_id = network_id self.gateway_id = gateway_id self.uuid = network_id + '.' + gateway_id self.tx_power = A_UNKNOWN self.logger = base.get_logger(logger_name=('gway_' + self.uuid), log_file=log_file) self.message_proc_functions = {} self.register_msg_proc_func(gmsg.SMSG_GETTIME_DEFN) self.register_msg_proc_func(gmsg.SMSG_SETTIME_ACK_DEFN) self.register_msg_proc_func(gmsg.SMSG_SETTIME_NACK_DEFN) self.register_msg_proc_func(gmsg.SMSG_GWSNAP_DEFN) self.register_msg_proc_func(gmsg.SMSG_SETGITR_ACK_DEFN) self.register_msg_proc_func(gmsg.SMSG_SETGITR_NACK_DEFN) self.register_msg_proc_func(gmsg.SMSG_NODESNAP_DEFN) self.register_msg_proc_func(gmsg.SMSG_GETNODESNAP_NACK_DEFN) self.register_msg_proc_func(gmsg.SMSG_MTRUPDATE_NO_IRMS_DEFN) self.register_msg_proc_func(gmsg.SMSG_MTRUPDATE_WITH_IRMS_DEFN) self.register_msg_proc_func(gmsg.SMSG_MTRREBASE_DEFN) self.register_msg_proc_func(gmsg.SMSG_SETMTRVAL_ACK_DEFN) self.register_msg_proc_func(gmsg.SMSG_SETMTRVAL_NACK_DEFN) self.register_msg_proc_func(gmsg.SMSG_SETMTRINT_ACK_DEFN) self.register_msg_proc_func(gmsg.SMSG_SETMTRINT_NACK_DEFN) self.register_msg_proc_func(gmsg.SMSG_SETPLED_ACK_DEFN) self.register_msg_proc_func(gmsg.SMSG_SETPLED_NACK_DEFN) self.register_msg_proc_func(gmsg.SMSG_NODEDARK_DEFN) self.register_msg_proc_func(gmsg.SMSG_GPMSG_DEFN) self.serial_port = serial_port self.serial_baud = serial_baud self.serial_conn = serial.Serial(serial_port, serial_baud, timeout=1, write_timeout=1) self.logger.info('Started connection to gateway ' + self.uuid + ' on ' + serial_port + ' at ' + serial_baud + ' baud') self.serial_tx_msg_buffer = [] self.serial_rx_msg_objects = {} self.rx_msg_objects_seq = 0 self.serial_thread = threading.Thread(target=self.proc_serial_msg) self.serial_thread.daemon = True # Daemonize thread self.serial_thread.start() # Start the execution def register_msg_proc_func(self, message_definition): self.message_proc_functions[message_definition['smsg_type']] = 'proc_msg_' + str.lower(message_definition['smsg_type']) # ---------------------------------------------------------------------------------------------------------------------------------------------------------- # MESSAGE PROCESSING - TO GATEWAY/NODES (TX) # ---------------------------------------------------------------------------------------------------------------------------------------------------------- # All requests are responded to and processed asynchronously. def get_gateway_snapshot(self): # Requests a dump of the gateway's state. self.tx_serial_msg(gmsg.get_gateway_snapshot_msg()) def set_gateway_inst_tmp_rate(self, node_id, tmp_poll_rate, tmp_poll_period): # temporarily changes polling rate of meternode for gateway instructions (e.g. to send new meter value with minimal delay). self.tx_serial_msg(gmsg.set_gateway_inst_tmp_rate_msg(node_id, tmp_poll_rate, tmp_poll_period)) def get_node_snapshot(self, node_id=254): # Requests a dump of a node's state from the Gateway. self.tx_serial_msg(gmsg.get_node_snapshot_msg(node_id)) def set_gateway_time(self): # Sends request to gateway to set time to server's local time as Unix UTC Epoch. self.tx_serial_msg(gmsg.set_gateway_time_msg(arrow.utcnow().timestamp)) def set_node_gw_inst_tmp_rate(self, node_id, tmp_ginr_poll_rate, tmp_ginr_poll_time): # Requests a temporary increase of a node's meter GINR rate self.tx_serial_msg(gmsg.set_gw_inst_tmp_rate_msg(node_id, tmp_ginr_poll_rate, tmp_ginr_poll_time)) def set_node_meter_value(self, node_id, new_meter_value): # Requests a reset of a node's meter value to the value specified. self.tx_serial_msg(gmsg.set_node_meter_value_msg(node_id, new_meter_value)) def set_node_meter_interval(self, node_id, new_meter_interval): # Requests a change of a node's metering interval to the value specified. The interval is the period in seconds at which read entries are created # i.e. (resolution). self.tx_serial_msg(gmsg.set_node_meter_interval_msg(node_id, new_meter_interval)) def set_node_puck_led(self, node_id, new_puck_led_rate, new_puck_led_time): # Requests a change of a node's puck LED rate and time. self.tx_serial_msg(gmsg.set_node_puck_led_msg(node_id, new_puck_led_rate, new_puck_led_time)) def send_gp_msg(self, node_id, message): self.tx_serial_msg(gmsg.general_purpose_msg(node_id, message)) # ---------------------------------------------------------------------------------------------------------------------------------------------------------- # MESSAGE PROCESSING - FROM GATEWAY/NODES (RX) # ---------------------------------------------------------------------------------------------------------------------------------------------------------- def serial_rx_msg_buffer_add(self, msg_obj): self.rx_msg_objects_seq += 1 msg_obj['network_id'] = self.network_id msg_obj['gateway_id'] = self.gateway_id self.serial_rx_msg_objects[str(arrow.utcnow().timestamp) + '/' + str(self.rx_msg_objects_seq)] = msg_obj # '/' is < 9 def serial_rx_msg_buffer_purge(self, secs_old): purge_before = str(arrow.utcnow().shift(seconds=-secs_old).timestamp) self.serial_rx_msg_objects = {key: val for key, val in self.serial_rx_msg_objects.items() if key < purge_before} def proc_msg_gtime(self, msg_obj): # Process 'get time' request from Gateway, returning a SETTIME self.logger.debug("Got time request from gateway {0}.{1}".format(self.network_id, self.gateway_id)) self.set_gateway_time() def proc_msg_stime_ack(self, msg_obj): self.logger.debug("Set time for gateway {0}.{1}".format(self.network_id, self.gateway_id)) def proc_msg_stime_nack(self, msg_obj): self.logger.warn("Failed to set time for gateway {0}.{1}".format(self.network_id, self.gateway_id)) def proc_msg_gwsnap(self, msg_obj): # Process gateway dump/snapshot self.logger.debug("Got gateway snapshot: {0}".format(msg_obj)) rec = msg_obj['HEADER_1'] self.network_id = rec.network_id self.gateway_id = rec.gateway_id self.uuid = rec.network_id + '.' + rec.gateway_id self.when_booted = rec.when_booted self.free_ram = rec.free_ram self.last_time_drift = arrow.utcnow().timestamp - int(rec.gateway_time) self.log_level = rec.log_level self.encrypt_key = rec.encrypt_key self.tx_power = rec.tx_power self.serial_rx_msg_buffer_add(msg_obj) def proc_msg_nosnap(self, msg_obj): # Process node dump/snapshot. self.logger.debug("Got node snapshot(s): {0}".format(msg_obj)) self.serial_rx_msg_buffer_add(msg_obj) def proc_msg_nosnap_nack(self, msg_obj): rec = msg_obj['HEADER_1'] self.logger.warn("Failed to get node snapshot for node {0}.{1}".format(self.network_id, rec.node_id)) def proc_msg_mup_(self, msg_obj): # Process node meter update event from Gateway, creating a meter update object to pass to meter device manager self.logger.debug("Got meter update (network={0}): {1}".format(self.network_id, msg_obj)) self.serial_rx_msg_buffer_add(msg_obj) def proc_msg_mupc(self, msg_obj): # Process node meter update event from Gateway, creating a meter update object to pass to meter device manager self.logger.debug("Got meter update with IRMS (network={0}): {1}".format(self.network_id, msg_obj)) self.serial_rx_msg_buffer_add(msg_obj) def proc_msg_mreb(self, msg_obj): # Process node meter rebase event from Gateway self.logger.debug("Got meter rebase (network={0}): {1}".format(self.network_id, msg_obj)) self.serial_rx_msg_buffer_add(msg_obj) def proc_msg_sgitr_ack(self, msg_obj): rec = msg_obj['HEADER_1'] self.logger.debug("Set meter GINR rate for node {0}.{1}".format(self.network_id, rec.node_id)) def proc_msg_sgitr_nack(self, msg_obj): rec = msg_obj['HEADER_1'] self.logger.warn("Failed to set meter GINR rate for node {0}.{1}".format(self.network_id, rec.node_id)) def proc_msg_smval_ack(self, msg_obj): rec = msg_obj['HEADER_1'] self.logger.debug("Set meter value for node {0}.{1}".format(self.network_id, rec.node_id)) def proc_msg_smval_nack(self, msg_obj): rec = msg_obj['HEADER_1'] self.logger.warn("Failed to set meter value for node {0}.{1}".format(self.network_id, rec.node_id)) def proc_msg_spled_ack(self, msg_obj): rec = msg_obj['HEADER_1'] self.logger.debug("Set puck LED for node {0}.{1}".format(self.network_id, rec.node_id)) def proc_msg_spled_nack(self, msg_obj): rec = msg_obj['HEADER_1'] self.logger.warn("Failed to set meter value for node {0}.{1}".format(self.network_id, rec.node_id)) def proc_msg_smint_ack(self, msg_obj): rec = msg_obj['HEADER_1'] self.logger.debug("Set meter interval for node {0}.{1}".format(self.network_id, rec.node_id)) def proc_msg_smint_nack(self, msg_obj): rec = msg_obj['HEADER_1'] self.logger.warn("Failed to set meter value for node {0}.{1}".format(self.network_id, rec.node_id)) def proc_msg_ndark(self, msg_obj): # Process node dark message - indicating that gateway hasn't received proof of life from node rec = msg_obj['HEADER_1'] self.logger.debug("Got node dark notification for node {0}.{1}".format(self.network_id, rec.node_id)) self.serial_rx_msg_buffer_add(msg_obj) def proc_msg_gmsg(self, msg_obj): # Process node broadcast event from Gateway rec = msg_obj['HEADER_1'] self.logger.debug("Got general-purpose message from gateway: {0}".format(msg_obj)) self.serial_rx_msg_buffer_add(msg_obj) def tx_serial_msg(self, message): message = gmsg.SMSG_TX_PREFIX + message + '\r\n' self.serial_tx_msg_buffer.append(message) def rx_serial_msg(self): try: if self.serial_conn.inWaiting() > 0: serial_in = self.serial_conn.readline().strip().decode("latin1") if serial_in.startswith(gmsg.SMSG_RX_PREFIX): self.logger.debug('Got serial data: %s', serial_in) self.last_seen = arrow.utcnow().timestamp # inbound serial line is a message, so drop prefix and convert it from CSV to message object msg_obj = gmsg.get_message_obj(serial_in, self.uuid, self.gateway_id, self.network_id) # pass object to appropriate processor function using dictionary mapping getattr(self, self.message_proc_functions[msg_obj['message_type']])(msg_obj) except serial.serialutil.SerialTimeoutException as err: self.logger.debug('Serial timeout: {0}'.format(err)) except serial.serialutil.SerialException as err: self.logger.warn('Serial exception: {0}'.format(err)) except Exception as err: self.logger.error('Error receiving serial message: {0}'.format(err)) def proc_serial_msg(self): loop_count = 0 while self.serial_conn.isOpen(): try: # read and dispatch inbound lines from serial buffer to appropriate handler loop_count = loop_count + 1 if loop_count < 60 else 1 self.rx_serial_msg() if len(self.serial_tx_msg_buffer) > 0: tx_msg = self.serial_tx_msg_buffer.pop(0) self.serial_conn.write(tx_msg.encode('utf-8')) self.logger.debug("Wrote serial data: " + tx_msg.strip('\r\n')) if loop_count % 30 == 0: self.serial_rx_msg_buffer_purge(PURGE_RX_MSG_AGE_SECS) sleep(0.5) except (KeyboardInterrupt, SystemExit): # should only be called on shutdown self.serial_conn.close() break except Exception as err: self.logger.error('Error processing serial message: {0}'.format(err))
gui.py
# built-ins import csv import io import json import threading import time from collections import namedtuple, OrderedDict # third-party import tkinter as tk import selenium from tkinter import filedialog, messagebox, ttk # current package from formsgdownloader import formsg_driver from formsgdownloader.pyinstaller_utils import get_path from formsgdownloader.tk_utils import MenuAction, YjMenu, YjTreeview HELP_MSG = ''' Version 1.0.0 Prerequisites: - Google Chrome: Please install Google Chrome on the system - https://www.google.com/chrome/ - Chrome Driver: Please download the appropriate Chrome Driver for the Google Chrome - https://chromedriver.chromium.org/downloads Usage: 0. Provide the following configuration options: - Your government email for accessing FormSG - Path to the Chrome Driver - Path to the folder to save the downloaded data 1. Add details for each form: - Enter the "Form Name", this is used for you to identify the entries, and can be anything. - Enter the "Form ID", this is the 24-characters long string at the end of the form's URL: https://form.gov.sg/#!/<Form ID is here>. - Enter the "Form Secret Key", this is the content of the secret key file when you created the form. 2. Download the data: - Click on "Start Download" - Wait for the prompt that the one-time password has been sent to your email, click OK. - Enter the one-time password, and click "Continue". - (Optional) Click on the menu [View] > [Logs] to view the download progress and any errors. '''.strip() FAVICON_PATH = get_path(r'favicon.ico') Widget = namedtuple('Widget', 'name type options geometry_manager geometry_options', defaults=[{}, 'pack', {}]) Action = namedtuple('Action', 'widget_name event callback') Form = namedtuple('Form', 'name id secret_key') class App: def __init__(self, master, menu=None): self.master = master self.menu = menu self.widgets = {} # Data self.chrome_driver_path = tk.StringVar() self.download_path = tk.StringVar() self.forms = OrderedDict() self.form_name = tk.StringVar() self.form_id = tk.StringVar() self.form_secret_key = tk.StringVar() self.email = tk.StringVar() self.one_time_password = tk.StringVar() # Initialize top-level components self.master.protocol('WM_DELETE_WINDOW', self.master.destroy) self.master.title('FormSG Data Downloader') self.master.iconbitmap(FAVICON_PATH) self.initialize_menu() # Initializing various internal components self.populate_widgets(master) self.initialize_widgets() self.bind_actions() self.initialize_log_window_and_logging() self.initialize_help_window() #region Initialization Methods def initialize_log_window_and_logging(self): # Create logging window toplevel_log = tk.Toplevel(self.master) toplevel_log.withdraw() toplevel_log.title('Logs') toplevel_log.protocol('WM_DELETE_WINDOW', toplevel_log.withdraw) toplevel_log.iconbitmap(FAVICON_PATH) text_log = tk.Text(toplevel_log, state='normal') text_log.insert('end', 'Log Messages:\n') text_log['state'] = 'disabled' text_log.pack(fill=tk.BOTH) self.widgets['toplevel_log'] = toplevel_log self.widgets['text_log'] = text_log # Redirect STDOUT to logs self.logStream = io.StringIO() import sys sys.stdout = self.logStream # Poll for updates to logs def poll_log(): while True: self.logStream.seek(0) msg = self.logStream.read() if msg: text_log['state'] = 'normal' text_log.insert('end', msg) text_log['state'] = 'disabled' self.logStream.seek(0) self.logStream.truncate() time.sleep(1) # Polling interval threading.Thread(target=poll_log, daemon=True).start() def initialize_help_window(self): toplevel_help = tk.Toplevel(self.master) toplevel_help.withdraw() toplevel_help.title('Help') toplevel_help.protocol('WM_DELETE_WINDOW', toplevel_help.withdraw) toplevel_help.iconbitmap(FAVICON_PATH) text_help = tk.Text(toplevel_help, state='normal') text_help.insert('end', HELP_MSG) text_help['state'] = 'disabled' text_help.pack(fill=tk.BOTH) self.widgets['toplevel_help'] = toplevel_help self.widgets['text_help'] = text_help def initialize_menu(self): file_menu_items = [ MenuAction('Load session...', self.load_session), MenuAction('Save session', self.save_session), MenuAction('Export forms', self.export_forms), MenuAction('Import forms', self.import_forms), ] for name, handler_or_submenu in reversed(file_menu_items): self.menu.add_to_file_menu(name, handler_or_submenu) additional_top_level_menu_items = [ ('View', [ MenuAction('Logs', self.show_logs), ]), MenuAction('Help', self.show_help), ] for name, handler_or_submenu in additional_top_level_menu_items: self.menu.add_command_or_submenu(name, handler_or_submenu) def populate_widgets(self, master): ROW_PADDING = 3 COL_PADDING = 2 WIDGETS = [ Widget('frame_config', ttk.LabelFrame, {'text': 'Step 0: Configuration'}, 'grid', {'column': 0, 'row': 0, 'padx': 10, 'pady': 10}), Widget('label_email', ttk.Label, { 'parent': 'frame_config', 'text': 'User Email Address:'}, 'grid', {'column': 0, 'row': 0, 'pady': ROW_PADDING, 'padx': COL_PADDING}), Widget('entry_email', ttk.Entry, { 'parent': 'frame_config', 'textvariable': self.email}, 'grid', {'column': 1, 'row': 0, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'sticky': 'EW'}), Widget('button_set-chrome-driver-path', ttk.Button, { 'parent': 'frame_config', 'text': 'Click to set Chrome Driver path:'}, 'grid', {'column': 0, 'row': 1, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'sticky': 'EW'}), Widget('label_chrome-driver-path', ttk.Entry, { 'parent': 'frame_config', 'textvariable': self.chrome_driver_path, 'width': 64}, 'grid', {'column': 1, 'row': 1, 'pady': ROW_PADDING, 'padx': COL_PADDING}), Widget('button_set-download-path', ttk.Button, { 'parent': 'frame_config', 'text': 'Click to set download path:'}, 'grid', {'column': 0, 'row': 2, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'sticky': 'EW'}), Widget('label_download-path', ttk.Entry, {'parent': 'frame_config', 'textvariable': self.download_path, 'width': 64}, 'grid', {'column': 1, 'row': 2, 'pady': ROW_PADDING, 'padx': COL_PADDING}), Widget('frame_form', ttk.LabelFrame, {'text': 'Step 1: Load Forms'}, 'grid', {'column': 0, 'row': 1, 'padx': 10, 'pady': 10}), Widget('label_form-name', ttk.Label, {'parent': 'frame_form', 'text': 'Form Name:'}, 'grid', {'column': 0, 'row': 0, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'sticky': 'E'}), Widget('entry_form-name', ttk.Entry, {'parent': 'frame_form', 'width': 32, 'textvariable': self.form_name}, 'grid', {'column': 1, 'row': 0, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'sticky': 'W'}), Widget('label_form-id', ttk.Label, {'parent': 'frame_form', 'text': 'Form ID:'}, 'grid', {'column': 0, 'row': 1, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'sticky': 'E'}), Widget('entry_form-id', ttk.Entry, {'parent': 'frame_form', 'width': 32, 'textvariable': self.form_id}, 'grid', {'column': 1, 'row': 1, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'sticky': 'W'}), Widget('label_form-secret-key', ttk.Label, {'parent': 'frame_form', 'text': 'Form Secret Key:'}, 'grid', {'column': 0, 'row': 2, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'sticky': 'E'}), Widget('entry_form-secret-key', ttk.Entry, {'parent': 'frame_form', 'width': 32, 'textvariable': self.form_secret_key}, 'grid', {'column': 1, 'row': 2, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'sticky': 'W'}), Widget('button_add-form', ttk.Button, {'parent': 'frame_form', 'text': 'Add Form'}, 'grid', {'column': 0, 'row': 3, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'sticky': 'E'}), Widget('button_load-forms', ttk.Button, {'parent': 'frame_form', 'text': 'Load Forms'}, 'grid', {'column': 1, 'row': 3, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'sticky': 'W'}), Widget('tree_add-form', YjTreeview, {'parent': 'frame_form', 'columns': ('Name', 'ID', 'Secret Key'), 'show': 'tree headings'}, 'grid', {'column': 0, 'row': 4, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'columnspan': 2}), Widget('frame_download', ttk.LabelFrame, {'text': 'Step 2: Download Data'}, 'grid', {'column': 0, 'row': 2, 'padx': 10, 'pady': 10}), Widget('button_download-submissions', ttk.Button, {'parent': 'frame_download', 'text': 'Start Download'}, 'grid', {'column': 0, 'row': 0, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'columnspan': 2, 'sticky': 'EW'}), Widget('label_one-time-password', ttk.Label, {'parent': 'frame_download', 'text': 'One-Time Password:'}, 'grid', {'column': 0, 'row': 1, 'pady': ROW_PADDING, 'padx': COL_PADDING}), Widget('entry_one-time-password', ttk.Entry, {'parent': 'frame_download', 'textvariable': self.one_time_password, 'state': 'disabled'}, 'grid', {'column': 1, 'row': 1, 'pady': ROW_PADDING, 'padx': COL_PADDING}), Widget('button_continue', ttk.Button, {'parent': 'frame_download', 'text': 'Continue', 'state': 'disable'}, 'grid', {'column': 0, 'row': 2, 'pady': ROW_PADDING, 'padx': COL_PADDING, 'columnspan': 2}), ] for name, widget_type, options, geometry_manager, geometry_options in WIDGETS: parent = options.pop('parent', None) parent_widget = self.widgets.get(parent, master) w = widget_type(parent_widget, **options) getattr(w, geometry_manager)(**geometry_options) self.widgets[name] = w master.rowconfigure(0, weight=1) master.rowconfigure(1, weight=1) master.rowconfigure(2, weight=1) master.columnconfigure(0, weight=1) def initialize_widgets(self): pass def bind_actions(self): ACTIONS = [ Action('button_set-chrome-driver-path', '<Button-1>', lambda _: self.set_chrome_driver_path()), Action('button_set-download-path', '<Button-1>', lambda _: self.set_download_path()), Action('button_add-form', '<Button-1>', lambda _: self.add_form()), Action('button_add-form', '<Button-1>', lambda _: self.add_form()), Action('button_load-forms', '<Button-1>', lambda _: self.import_forms()), Action('button_download-submissions', '<Button-1>', lambda _: self.download_all_forms()), ] for widget_name, event, callback in ACTIONS: self.widgets[widget_name].bind(event, callback, '+') #endregion #region GUI Event Handlers def show_logs(self): self.widgets['toplevel_log'].deiconify() def show_help(self): self.widgets['toplevel_help'].deiconify() def save_session(self): file_path = filedialog.asksaveasfilename( initialfile='untitled.formsg', filetypes=[('FormSG Project Files', '.formsg')], defaultextension='.formsg', confirmoverwrite=True) if file_path: print('[-->] Saving session to file:', file_path) data = { 'forms': tuple(self.forms.keys()), 'email': self.email.get(), 'chrome_driver_path': self.chrome_driver_path.get(), 'download_path': self.download_path.get(), } with open(file_path, 'wb') as out_file: out_file.write(json.dumps(data).encode('utf-8')) def load_session(self): file_path = filedialog.askopenfilename( initialfile='untitled.formsg', filetypes=[('FormSG Project Files', '.formsg')], defaultextension='.formsg', ) if file_path: print('[<--] Loading session from file:', file_path) with open(file_path, 'rb') as in_file: raw_data = in_file.read() try: json_data = json.loads(raw_data.decode('utf-8')) except: pass else: for form_details in json_data['forms']: self._add_form(Form(*form_details)) self.email.set(json_data['email']) self.chrome_driver_path.set(json_data['chrome_driver_path']) self.download_path.set(json_data['download_path']) def export_forms(self): file_path = filedialog.asksaveasfilename( initialfile='formsg.csv', filetypes=[('FormSG Credentials File', '.csv')], defaultextension='.csv', confirmoverwrite=True) if file_path: print('[-->] Saving Form SG forms and credentials to:', file_path) with open(file_path, 'wt', encoding='utf-8') as out_file: for form in self.forms.keys(): out_file.write('{name},{id},{secret_key}\n'.format(**form._asdict())) def import_forms(self): cred_file_path = filedialog.askopenfilename(multiple=False, filetype=[('FormSG Credentials File', '*.csv')]) if cred_file_path: with open(cred_file_path, 'rt', encoding='utf-8') as cred_file: content = cred_file.readlines() for details in content: form = Form(*details.strip().split(',')) self._add_form(form) def set_chrome_driver_path(self): self.chrome_driver_path.set(filedialog.askopenfilename(multiple=False, filetype=[('ChromeDriver', 'chromedriver.exe')])) def set_download_path(self): self.download_path.set(filedialog.askdirectory()) def add_form(self): form_name = self.form_name.get().strip() form_id = self.form_id.get().strip() form_secret_key = self.form_secret_key.get().strip() error_details = self.validate_input(form_name, form_id, form_secret_key) if error_details: messagebox.askokcancel('Error', message='Invalid input', detail=error_details, icon='error') else: form = Form(form_name, form_id, form_secret_key) self._add_form(form) def download_all_forms(self): threading.Thread(target=self._download_all_forms, daemon=True).start() def _download_all_forms(self): self.disable_all_widgets() # Initialize selenium_gui selenium_gui._set_forms_details(self.forms) selenium_gui._init( self.download_path.get(), self.chrome_driver_path.get(), force=True) # Log into form.gov.sg self.login_to_formsg() # Download data for each form for form in self.forms: try: selenium_gui.download_csv(form.name) except selenium.common.exceptions.WebDriverException as e: print(f'[!] Error downloading data from form: {form}.') print(e) print('[*] Download finished!') self.enable_all_widgets() #endregion #region GUI Methods def disable_all_widgets(self, excluding=None): if excluding is None: excluding = [] for name, w in self.widgets.items(): if name in excluding: continue try: w['state'] = 'disabled' except (TypeError, tk.TclError): pass def enable_all_widgets(self, excluding=None): if excluding is None: excluding = [] for name, w in self.widgets.items(): if name in excluding: continue try: w['state'] = 'normal' except tk.TclError: pass def clear_widgets(self, *widget_names): for widget in (self.widgets[name] for name in widget_names): if isinstance(widget, tk.Entry): widget.delete(0, 'end') else: raise NotImplementedError( f'{__name__} does not support {type(widget)}') #endregion #region Helper Methods def login_to_formsg(self): selenium_gui.enter_email(self.email.get()) continue_button_press = threading.Event() self.widgets['button_continue'].bind('<Button-1>', lambda _: continue_button_press.set()) self.widgets['entry_one-time-password']['state'] = 'default' self.widgets['button_continue']['state'] = 'default' messagebox.askokcancel('One-Time Password', message='The one-time ' 'password (OTP) has been sent to your email. Enter the OTP in the ' 'main window and click "Continue" to download the data.', icon='info') continue_button_press.wait() self.widgets['entry_one-time-password']['state'] = 'disabled' self.widgets['button_continue']['state'] = 'disabled' otp = self.one_time_password.get() selenium_gui.enter_one_time_password(otp) def _add_form(self, form): if not form in self.forms: self.forms[form] = None self.widgets['tree_add-form'].add_item(*form) @staticmethod def validate_input(form_name, form_id, form_secret_key): errors = [] if form_name == '': errors.append('Form Name cannot be empty.') if form_id == '': errors.append('Form ID cannot be empty.') elif len(form_id) != 24: errors.append('Form ID length incorrect.') if form_secret_key == '': errors.append('Form Secret Key cannot be empty.') elif len(form_secret_key) != 44: errors.append('Form Secret Key length incorrect.') return '\n'.join(errors) #endregion if __name__ == '__main__': root = tk.Tk() menu = YjMenu(root) root.config(menu=menu) app = App(root, menu) root.mainloop()
pyminer.py
#!/usr/bin/python # # Copyright (c) 2011 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # decode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-bit target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-bit nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 bits zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-bit Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 78952 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
rotor_driver.py
#...........Drive a Servo motor with sin-wave PWM signal............ #Author: James Bramante #Date: May 3, 2017 from Adafruit_PWM_Servo_Driver import PWM import threading import time import math import numpy as np class RotorDriver(object): def __init__(self, channels = [0], magnitude = 0.5, period = 4., runtime = 60., comint = 1e-2, periodMultiple = 1): """ Constructor :type channels: list :type magnitude: float :type period: float :type runtime: int :type comint: float :param channels: which channels to control rotors on :param magnitude: % of maximum rotor power :param period: period of the sin wave, in seconds :param runtime: how long to run the rotors for, in seconds :param comint: desired refresh rate of commands to rotors """ #Parse the input parameters self.channels = channels self.mag = np.array([magnitude,magnitude]) self.per = period self.runtime = runtime self.comint = comint self.multiple = int(periodMultiple) #FINAL variables self.PWMFREQ = 250 #Frequency of PWM signal in Hz self.NUMTICK = 4096 #Resolution of signal (i.e. 4096 = 12 bit) self.ZEROSPD = [1577,1573] #PWM-width, in microseconds, of 0 servo movement [Rotor 0 (arm without pressure sensor: dead range: [1551,1603]; Rotor 1: dead range: [1546,1599]] self.MAXSPD = [int(round(1990 * self.ZEROSPD[x]/1500)) for x in range(2)] #PWM-width, in microseconds of maximum forward speed self.MINSPD = 1100 #PWM-width, in microseconds of maximum reverse speed self.SERVO_ADDRESS = 0x40 #Address of I2C servo in hex self.BIAS = 0 #Bias with which to adjust value inside cos self.MULT2A = -0.2056 self.MULT2B = -1.096 self.MULT2C = 0.3652 self.MULT3A = -0.2127 self.MULT3B = -1.134 self.MULT3C = 0.3781 #These coefficients are used to alter the pattern of rotor output #to force the apparatus to oscillate at periods greater than #the system's harmonic period self.COEFFICIENTS = [[1,0,0],[self.MULT2A,self.MULT2B,self.MULT2C],[self.MULT3A,self.MULT3B,self.MULT3C]] #Additional variables self.usPerTick = 1./self.PWMFREQ * 1e6 / self.NUMTICK #microseconds per tick self.spdRange = [400,400] self.spdRange[0] = max(self.MAXSPD[0] - self.ZEROSPD[0], self.ZEROSPD[0] - self.MINSPD) self.spdRange[1] = max(self.MAXSPD[1] - self.ZEROSPD[1], self.ZEROSPD[1] - self.MINSPD) if self.per == 0: self.freq = 0 self.BIAS = 0 else: self.freq = 2. * math.pi / self.per self.running = False self.flipper = 1. self.startTime = 0 #Initialize the rotors self.pwm = PWM(self.SERVO_ADDRESS) self.pwm.setPWMFreq(self.PWMFREQ) self.pwm.setAllPWM(0,math.floor(self.ZEROSPD[0]/self.usPerTick)) def daemonize(self): #Now daemonize the run function self.thread = threading.Thread(target=self.run,args=(),daemon=True) self.thread.start() #The threaded method that runs the rotors in the background def run(self): #Record the start time from which to determine when to stop self.startTime = time.perf_counter() self.running = True while self.running: #Determine new speed as a sin function tim = time.perf_counter() newSpeed = np.array(self.spdRange) * (self.COEFFICIENTS[self.multiple-1][0] * math.cos(self.freq * (tim-self.startTime) + self.BIAS) + self.COEFFICIENTS[self.multiple-1][1] * math.cos(self.freq * (tim-self.startTime) + self.BIAS)**3 + self.COEFFICIENTS[self.multiple-1][2] * math.cos(self.freq * (tim-self.startTime) + self.BIAS)**5) for channel in self.channels: #Direction of the rotor controlled by whether channel is even (-) or odd (+) self.pwm.setPWM(int(channel),0,math.floor((self.ZEROSPD[int(channel%2)]+math.copysign(1,self.flipper)*math.copysign(1,channel%2-1)*newSpeed[int(channel%2)]*float(self.mag[int(channel%2)]))/self.usPerTick)) #self.pwm.setPWM(int(channel),0,math.floor((self.ZEROSPD[int(channel%2)]+math.copysign(1,self.flipper)*math.copysign(1,channel%2-1)*newSpeed[int(channel%2)])/self.usPerTick)) #If we come to the intended end of the run, stop the rotors and exit loop if ((time.perf_counter() - self.startTime) > self.runtime): self.pwm.setAllPWM(0,math.floor(self.ZEROSPD[0]/self.usPerTick)) self.running = False time.sleep(self.comint) if (self.freq==0): self.flipper = -self.flipper def stop(self): self.running = False self.pwm.setAllPWM(0,math.floor(self.ZEROSPD[0]/self.usPerTick)) def setPeriod(self,per): self.per = per if self.per == 0: self.freq = 0 self.BIAS = 0 else: self.freq = 2. * math.pi / self.per self.BIAS = 0 def setMultiple(self,mult): self.multiple = int(mult) def setMagnitude(self,mag): if (isinstance(mag,list) | isinstance(mag,tuple) | isinstance(mag,np.ndarray)): self.mag[0] = mag[0] self.mag[1] = mag[1] else: if np.max(self.mag) > 0: self.mag = np.array([self.mag[0]/np.max(self.mag)*mag,self.mag[1]/np.max(self.mag)*mag]) else: self.mag = np.array([mag,mag]) def setRuntime(self,tim): self.runtime = tim def setComint(self,com): self.comint = com def setChannels(self,chan): self.channels = chan self.pwm.setAllPWM(0,math.floor(0/self.usPerTick)) def addChannels(self,chan): if isinstance(chan,list): self.channels += chan else: self.channels += [chan] self.pwm.setAllPWM(0,math.floor(0/self.usPerTick)) def remChannels(self,chan): if not isinstance(chan,list): chan = [chan] self.channels = [x for x in self.channels if x not in chan] self.pwm.setAllPWM(0,math.floor(0/self.usPerTick))
e2e.py
from multiprocessing import Process from os import environ from time import sleep, time from pytest import fixture, raises import rethinkdb as r from selenium.webdriver import Chrome, Firefox, Remote from selenium.webdriver import ChromeOptions from selenium.webdriver import DesiredCapabilities from selenium.webdriver.common.keys import Keys from .linharn import control_loop TARGET_TABLE = r.db("Brain").table("Targets") OUTPUT_TABLE = r.db("Brain").table("Outputs") JOBS_TABLE = r.db("Brain").table("Jobs") TABLES = [TARGET_TABLE, OUTPUT_TABLE, JOBS_TABLE] # Fixture will delete all jobs, targets, and outputs # before a test session from database. @fixture(scope="session", autouse=True) def clear_dbs(): conn = r.connect("frontend") for table in TABLES: table.delete().run(conn) sleep(1) @fixture(scope="function") def linharn_client(): """Generates and runs a Harness plugin thread connecting to 127.0.0.1:5000 """ r.connect("frontend").repl() client_thread = Process(target=control_loop, args=("C_127.0.0.1_1",)) client_thread.start() yield client_thread client_thread.terminate() """ @fixture(scope="module") def chrome_browser(): # Connect to the Selenium server remote webdriver (Chrome) no_headless = environ.get("NO_HEADLESS", "") if no_headless == "TRUE": browser = Firefox() else: browser = Remote("http://localhost:4445/wd/hub", DesiredCapabilities.CHROME.copy()) browser.implicitly_wait(20) browser.get("http://frontend:8080") yield browser browser.close() """ @fixture(scope="module") def firefox_browser(): # Connect to the Selenium server remote webdriver (Firefox) no_headless = environ.get("NO_HEADLESS", "") if no_headless == "TRUE": browser = Firefox() else: browser = Remote("http://localhost:4444/wd/hub", DesiredCapabilities.FIREFOX.copy()) browser.implicitly_wait(20) browser.get("http://frontend:8080") yield browser browser.close() def test_instantiate_firefox(linharn_client, firefox_browser): """Test something... """ # Add a target add_tgt = firefox_browser.find_element_by_id('add_target_id') add_tgt.click() plgn = firefox_browser.find_element_by_id('service_name') plgn.click() plgn.send_keys('h') plgn.send_keys(Keys.ENTER) firefox_browser.find_element_by_id('location_num').send_keys('127.0.0.1') submit = firefox_browser.find_element_by_id('add_target_submit') submit.click() def test_instantiate_addjob0(linharn_client, firefox_browser): tgt_name = firefox_browser.find_element_by_id('name_tag_id0') tgt_name.click() tgt_name.get_attribute('Harness') tgt_ip = firefox_browser.find_element_by_id('address_tag_id0').get_attribute('127.0.0.1') add_job = firefox_browser.find_element_by_id('add_job_sc_id0') add_job.click() plugin = firefox_browser.find_element_by_id('pluginid1').get_attribute('Harness:5000') addr = firefox_browser.find_element_by_id('addressid1').get_attribute('127.0.0.1') # Add commands to existing job # Using Firefox browser def test_instantiate_addcmd0(linharn_client, firefox_browser): """ Adds command to job """ tgt_name = firefox_browser.find_element_by_id('name_tag_id0') tgt_name.click() cmd_name = firefox_browser.find_element_by_id('acommandid4') cmd_name.click() cmd_txt = firefox_browser.find_element_by_id('argumentid_0').send_keys('test1234') cmd_btn = firefox_browser.find_element_by_id('add_command_to_job_id2') cmd_btn.click() cmd_box = firefox_browser.find_element_by_id('commandid1').get_attribute('test1234') def test_instantiate_runjob0(linharn_client, firefox_browser): """ Starts job. """ exec_btn = firefox_browser.find_element_by_id('execute_button') exec_btn.click() """ def test_instantiate_chkjob0(linharn_client, firefox_browser): "\""Check to see if job was successful "\"" done = False res = None start = time() while time() - start < 30: c = JOBS_TABLE.run() for d in c: res = d if res and res["Status"] == "Done": done = True break sleep(1) print(res) assert done """
managers.py
# # Module providing the `SyncManager` class for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] # # Imports # import sys import threading import array import queue import time from traceback import format_exc from . import connection from .context import reduction, get_spawning_popen, ProcessError from . import pool from . import process from . import util from . import get_context # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tobytes()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] if view_types[0] is not list: # only needed in Py3.0 def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: reduction.register(view_type, rebuild_as_list) # # Type for identifying shared objects # class Token(object): ''' Type to uniquely identify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % \ (self.__class__.__name__, self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): if not isinstance(result, str): raise TypeError( "Result {0!r} (kind '{1}') type is {2}, not str".format( result, kind, type(result))) if kind == '#UNSERIALIZABLE': return RemoteError('Unserializable message: %s\n' % result) else: return RemoteError(result) else: return ValueError('Unrecognized message type {!r}'.format(kind)) class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): if not isinstance(authkey, bytes): raise TypeError( "Authkey {0!r} is type {1!s}, not bytes".format( authkey, type(authkey))) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.id_to_local_proxy_obj = {} self.mutex = threading.Lock() def serve_forever(self): ''' Run the server forever ''' self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: # what about stderr? util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() def handle_request(self, c): ''' Handle a new connection ''' funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) c.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request try: obj, exposed, gettypeid = id_to_obj[ident] except KeyError as ke: try: obj, exposed, gettypeid = \ self.id_to_local_proxy_obj[ident] except KeyError as second_ke: raise ke if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception as e: send(('#UNSERIALIZABLE', format_exc())) except Exception as e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' # Perhaps include debug info about 'c'? with self.mutex: result = [] keys = list(self.id_to_refcount.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' return len(self.id_to_refcount) def shutdown(self, c): ''' Shutdown this process ''' try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(*args, **kwds): ''' Create a new shared object and return its id ''' if len(args) >= 3: self, c, typeid, *args = args elif not args: raise TypeError("descriptor 'create' of 'Server' object " "needs an argument") else: if 'typeid' not in kwds: raise TypeError('create expected at least 2 positional ' 'arguments, got %d' % (len(args)-1)) typeid = kwds.pop('typeid') if len(args) >= 2: self, c, *args = args else: if 'c' not in kwds: raise TypeError('create expected at least 2 positional ' 'arguments, got %d' % (len(args)-1)) c = kwds.pop('c') self, *args = args args = tuple(args) with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: if kwds or (len(args) != 1): raise ValueError( "Without callable, must have one non-keyword argument") obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: if not isinstance(method_to_typeid, dict): raise TypeError( "Method_to_typeid {0!r}: type {1!s}, not dict".format( method_to_typeid, type(method_to_typeid))) exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 self.incref(c, ident) return ident, tuple(exposed) def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: try: self.id_to_refcount[ident] += 1 except KeyError as ke: # If no external references exist but an internal (to the # manager) still does and a new external reference is created # from it, restore the manager's tracking of it from the # previously stashed internal ref. if ident in self.id_to_local_proxy_obj: self.id_to_refcount[ident] = 1 self.id_to_obj[ident] = \ self.id_to_local_proxy_obj[ident] obj, exposed, gettypeid = self.id_to_obj[ident] util.debug('Server re-enabled tracking & INCREF %r', ident) else: raise ke def decref(self, c, ident): if ident not in self.id_to_refcount and \ ident in self.id_to_local_proxy_obj: util.debug('Server DECREF skipping %r', ident) return with self.mutex: if self.id_to_refcount[ident] <= 0: raise AssertionError( "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( ident, self.id_to_obj[ident], self.id_to_refcount[ident])) self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_refcount[ident] if ident not in self.id_to_refcount: # Two-step process in case the object turns out to contain other # proxy objects (e.g. a managed list of managed lists). # Otherwise, deleting self.id_to_obj[ident] would trigger the # deleting of the stored value (another managed object) which would # in turn attempt to acquire the mutex that is already held here. self.id_to_obj[ident] = (None, (), None) # thread-safe util.debug('disposing of obj with id %r', ident) with self.mutex: del self.id_to_obj[ident] # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx=None): if authkey is None: authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(*args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' self, typeid, *args = args args = tuple(args) assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() if self._state.value != State.STARTED: if self._state.value == State.INITIAL: raise ProcessError("Unable to start server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=0.1) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass @property def address(self): return self._address @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in list(method_to_typeid.items()): # isinstance? assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] # Should be set to True only when a proxy object is being created # on the manager server; primary use case: nested proxy objects. # RebuildProxy detects when a proxy is being created on the manager # and sets this value appropriately. self._owned_by_manager = manager_owned if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): if self._owned_by_manager: util.debug('owned_by_manager skipped INCREF of %r', self._token.id) return conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % \ (type(self).__name__, self._token.typeid, id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if token.id not in server.id_to_local_proxy_obj: server.id_to_local_proxy_obj[token.id] = \ server.id_to_obj[token.id] incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return a proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): _exposed_ = ('__next__', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True, timeout=None): args = (blocking,) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self, n=1): return self._callmethod('notify', (n,)) def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = time.monotonic() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - time.monotonic() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties',)) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting',)) @property def broken(self): return self._callmethod('__getattribute__', ('broken',)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) DictProxy._method_to_typeid_ = { '__iter__': 'Iterator', } ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__' )) BasePoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) BasePoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocessing.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', queue.Queue) SyncManager.register('JoinableQueue', queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False)
proxy.py
# coding=utf-8 import socket import threading source_host = '127.0.0.1' source_port = 11210 desc_host = '0.0.0.0' desc_port = 11211 def send(sender, recver): while 1: try: data = sender.recv(2048) except: break print "recv error" try: recver.sendall(data) except: break print "send error" sender.close() recver.close() def proxy(client): server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.connect((source_host, source_port)) threading.Thread(target=send, args=(client, server)).start() threading.Thread(target=send, args=(server, client)).start() def main(): proxy_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) proxy_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) proxy_server.bind((desc_host, desc_port)) proxy_server.listen(50) print "Proxying from %s:%s to %s:%s ..."%(source_host, source_port, desc_host, desc_port) conn, addr = proxy_server.accept() print "received connect from %s:%s"%(addr[0], addr[1]) threading.Thread(target=proxy, args=(conn, )).start() if __name__ == '__main__': main()
test.py
import json import os.path as p import random import socket import threading import time import logging import io import string import ast import avro.schema import avro.io import avro.datafile from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient from confluent_kafka.avro.serializer.message_serializer import MessageSerializer import kafka.errors import pytest from google.protobuf.internal.encoder import _VarintBytes from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager from helpers.test_tools import TSV from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection from kafka.protocol.admin import DescribeGroupsRequest_v1 from kafka.protocol.group import MemberAssignment from kafka.admin import NewTopic # protoc --version # libprotoc 3.0.0 # # to create kafka_pb2.py # protoc --python_out=. kafka.proto from . import kafka_pb2 from . import social_pb2 # TODO: add test for run-time offset update in CH, if we manually update it on Kafka side. # TODO: add test for SELECT LIMIT is working. cluster = ClickHouseCluster(__file__) instance = cluster.add_instance('instance', main_configs=['configs/kafka.xml'], with_kafka=True, with_zookeeper=True, # For Replicated Table macros={"kafka_broker":"kafka1", "kafka_topic_old":"old", "kafka_group_name_old":"old", "kafka_topic_new":"new", "kafka_group_name_new":"new", "kafka_client_id":"instance", "kafka_format_json_each_row":"JSONEachRow"}, clickhouse_path_dir='clickhouse_path') def get_kafka_producer(port, serializer, retries): errors = [] for _ in range(retries): try: producer = KafkaProducer(bootstrap_servers="localhost:{}".format(port), value_serializer=serializer) logging.debug("Kafka Connection establised: localhost:{}".format(port)) return producer except Exception as e: errors += [str(e)] time.sleep(1) raise Exception("Connection not establised, {}".format(errors)) def producer_serializer(x): return x.encode() if isinstance(x, str) else x def kafka_create_topic(admin_client, topic_name, num_partitions=1, replication_factor=1, max_retries=50, config=None): logging.debug(f"Kafka create topic={topic_name}, num_partitions={num_partitions}, replication_factor={replication_factor}") topics_list = [NewTopic(name=topic_name, num_partitions=num_partitions, replication_factor=replication_factor, topic_configs=config)] retries = 0 while True: try: admin_client.create_topics(new_topics=topics_list, validate_only=False) logging.debug("Admin client succeed") return except Exception as e: retries += 1 time.sleep(0.5) if retries < max_retries: logging.warning(f"Failed to create topic {e}") else: raise def kafka_delete_topic(admin_client, topic, max_retries=50): result = admin_client.delete_topics([topic]) for (topic, e) in result.topic_error_codes: if e == 0: logging.debug(f"Topic {topic} deleted") else: logging.error(f"Failed to delete topic {topic}: {e}") retries = 0 while True: topics_listed = admin_client.list_topics() logging.debug(f"TOPICS LISTED: {topics_listed}") if topic not in topics_listed: return else: retries += 1 time.sleep(0.5) if retries > max_retries: raise Exception(f"Failed to delete topics {topic}, {result}") def kafka_produce(kafka_cluster, topic, messages, timestamp=None, retries=15): logging.debug("kafka_produce server:{}:{} topic:{}".format("localhost", kafka_cluster.kafka_port, topic)) producer = get_kafka_producer(kafka_cluster.kafka_port, producer_serializer, retries) for message in messages: producer.send(topic=topic, value=message, timestamp_ms=timestamp) producer.flush() ## just to ensure the python client / producer is working properly def kafka_producer_send_heartbeat_msg(max_retries=50): kafka_produce(kafka_cluster, 'test_heartbeat_topic', ['test'], retries=max_retries) def kafka_consume(kafka_cluster, topic): consumer = KafkaConsumer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), auto_offset_reset="earliest") consumer.subscribe(topics=(topic)) for toppar, messages in list(consumer.poll(5000).items()): if toppar.topic == topic: for message in messages: yield message.value.decode() consumer.unsubscribe() consumer.close() def kafka_produce_protobuf_messages(kafka_cluster, topic, start_index, num_messages): data = b'' for i in range(start_index, start_index + num_messages): msg = kafka_pb2.KeyValuePair() msg.key = i msg.value = str(i) serialized_msg = msg.SerializeToString() data = data + _VarintBytes(len(serialized_msg)) + serialized_msg producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer) producer.send(topic=topic, value=data) producer.flush() logging.debug(("Produced {} messages for topic {}".format(num_messages, topic))) def kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, topic, start_index, num_messages): data = '' producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) for i in range(start_index, start_index + num_messages): msg = kafka_pb2.KeyValuePair() msg.key = i msg.value = str(i) serialized_msg = msg.SerializeToString() producer.send(topic=topic, value=serialized_msg) producer.flush() logging.debug("Produced {} messages for topic {}".format(num_messages, topic)) def kafka_produce_protobuf_social(kafka_cluster,topic, start_index, num_messages): data = b'' for i in range(start_index, start_index + num_messages): msg = social_pb2.User() msg.username='John Doe {}'.format(i) msg.timestamp=1000000+i serialized_msg = msg.SerializeToString() data = data + _VarintBytes(len(serialized_msg)) + serialized_msg producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer) producer.send(topic=topic, value=data) producer.flush() logging.debug(("Produced {} messages for topic {}".format(num_messages, topic))) def avro_message(value): schema = avro.schema.make_avsc_object({ 'name': 'row', 'type': 'record', 'fields': [ {'name': 'id', 'type': 'long'}, {'name': 'blockNo', 'type': 'int'}, {'name': 'val1', 'type': 'string'}, {'name': 'val2', 'type': 'float'}, {'name': 'val3', 'type': 'int'} ] }) bytes_writer = io.BytesIO() # writer = avro.io.DatumWriter(schema) # encoder = avro.io.BinaryEncoder(bytes_writer) # writer.write(value, encoder) # DataFileWrite seems to be mandatory to get schema encoded writer = avro.datafile.DataFileWriter(bytes_writer, avro.io.DatumWriter(), schema) if isinstance(value, list): for v in value: writer.append(v) else: writer.append(value) writer.flush() raw_bytes = bytes_writer.getvalue() writer.close() bytes_writer.close() return raw_bytes def avro_confluent_message(schema_registry_client, value): # type: (CachedSchemaRegistryClient, dict) -> str serializer = MessageSerializer(schema_registry_client) schema = avro.schema.make_avsc_object({ 'name': 'row', 'type': 'record', 'fields': [ {'name': 'id', 'type': 'long'}, {'name': 'blockNo', 'type': 'int'}, {'name': 'val1', 'type': 'string'}, {'name': 'val2', 'type': 'float'}, {'name': 'val3', 'type': 'int'} ] }) return serializer.encode_record_with_schema('test_subject', schema, value) # Tests def test_kafka_settings_old_syntax(kafka_cluster): assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro", ignore_error=True)) == TSV('''kafka_broker kafka1 kafka_client_id instance kafka_format_json_each_row JSONEachRow kafka_group_name_new new kafka_group_name_old old kafka_topic_new new kafka_topic_old old ''') instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n'); ''') # Don't insert malformed messages since old settings syntax # doesn't support skipping of broken messages. messages = [] for i in range(50): messages.append(json.dumps({'key': i, 'value': i})) kafka_produce(kafka_cluster, 'old', messages) result = '' while True: result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) members = describe_consumer_group(kafka_cluster, 'old') assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka' # text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose")) def test_kafka_settings_new_syntax(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = '{kafka_broker}:19092', kafka_topic_list = '{kafka_topic_new}', kafka_group_name = '{kafka_group_name_new}', kafka_format = '{kafka_format_json_each_row}', kafka_row_delimiter = '\\n', kafka_client_id = '{kafka_client_id} test 1234', kafka_skip_broken_messages = 1; ''') messages = [] for i in range(25): messages.append(json.dumps({'key': i, 'value': i})) kafka_produce(kafka_cluster, 'new', messages) # Insert couple of malformed messages. kafka_produce(kafka_cluster, 'new', ['}{very_broken_message,']) kafka_produce(kafka_cluster, 'new', ['}another{very_broken_message,']) messages = [] for i in range(25, 50): messages.append(json.dumps({'key': i, 'value': i})) kafka_produce(kafka_cluster, 'new', messages) result = '' while True: result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) members = describe_consumer_group(kafka_cluster, 'new') assert members[0]['client_id'] == 'instance test 1234' def test_kafka_json_as_string(kafka_cluster): kafka_produce(kafka_cluster, 'kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }', '{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}']) # 'tombstone' record (null value) = marker of deleted record producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer) producer.send(topic='kafka_json_as_string', key='xxx') producer.flush() instance.query(''' CREATE TABLE test.kafka (field String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'kafka_json_as_string', kafka_group_name = 'kafka_json_as_string', kafka_format = 'JSONAsString', kafka_flush_interval_ms=1000; ''') result = instance.query('SELECT * FROM test.kafka;') expected = '''\ {"t": 123, "e": {"x": "woof"} } {"t": 124, "e": {"x": "test"} } {"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"} ''' assert TSV(result) == TSV(expected) assert instance.contains_in_log( "Parsing of message (topic: kafka_json_as_string, partition: 0, offset: [0-9]*) return no rows") def test_kafka_formats(kafka_cluster): schema_registry_client = CachedSchemaRegistryClient('http://localhost:{}'.format(kafka_cluster.schema_registry_port)) admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) # data was dumped from clickhouse itself in a following manner # clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g' all_formats = { ## Text formats ## # dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;' 'JSONEachRow': { 'data_sample': [ '{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', '{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', '{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', ], 'supports_empty_value': True, }, # JSONAsString doesn't fit to that test, and tested separately 'JSONCompactEachRow': { 'data_sample': [ '["0", 0, "AM", 0.5, 1]\n', '["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n', '["0", 0, "AM", 0.5, 1]\n', ], 'supports_empty_value': True, }, 'JSONCompactEachRowWithNamesAndTypes': { 'data_sample': [ '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n', '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n', '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n', # '' # On empty message exception: Cannot parse input: expected '[' at end of stream., Stack trace (when copying this message, always include the lines below): # /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse # /src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp:0: DB::JSONCompactEachRowRowInputFormat::readPrefix() @ 0x1dee6bd6 in /usr/bin/clickhouse # /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse ], }, 'TSKV': { 'data_sample': [ 'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', 'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', 'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', # '' # On empty message exception: Unexpected end of stream while reading key name from TSKV format # /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&) @ 0x1df8c098 in /usr/bin/clickhouse # /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:114: DB::TSKVRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df8ae3e in /usr/bin/clickhouse # /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse ], }, 'CSV': { 'data_sample': [ '0,0,"AM",0.5,1\n', '1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n', '0,0,"AM",0.5,1\n', ], 'supports_empty_value': True, }, 'TSV': { 'data_sample': [ '0\t0\tAM\t0.5\t1\n', '1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', '0\t0\tAM\t0.5\t1\n', ], 'supports_empty_value': True, }, 'CSVWithNames': { 'data_sample': [ '"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n', '"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n', '"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n', # '', # On empty message exception happens: Attempt to read after eof # /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse # /src/IO/ReadHelpers.cpp:583: void DB::readCSVStringInto<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c961e1 in /usr/bin/clickhouse # /src/IO/ReadHelpers.cpp:678: DB::readCSVString(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c8dfae in /usr/bin/clickhouse # /src/Processors/Formats/Impl/CSVRowInputFormat.cpp:170: DB::CSVRowInputFormat::readPrefix() @ 0x1dec46f7 in /usr/bin/clickhouse # /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse # /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse ], }, 'Values': { 'data_sample': [ "(0,0,'AM',0.5,1)", "(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)", "(0,0,'AM',0.5,1)", ], 'supports_empty_value': True, }, 'TSVWithNames': { 'data_sample': [ 'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n', 'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', 'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n', ], 'supports_empty_value': True, }, 'TSVWithNamesAndTypes': { 'data_sample': [ 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n', 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n', # '', # On empty message exception happens: Cannot parse input: expected '\n' at end of stream. # /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse # /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse # /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:24: DB::skipTSVRow(DB::ReadBuffer&, unsigned long) @ 0x1df92fac in /usr/bin/clickhouse # /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:168: DB::TabSeparatedRowInputFormat::readPrefix() @ 0x1df92df0 in /usr/bin/clickhouse # /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse ], }, # 'Template' : { # 'data_sample' : [ # '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', # # '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', # # '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', # # '' # tolerates # ], # 'extra_settings': ", format_template_row='template_row.format'" # }, 'Regexp': { 'data_sample': [ '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', # '' # On empty message exception happens: Line "" doesn't match the regexp.: (at row 1) # /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse ], 'extra_settings': r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'" }, ## BINARY FORMATS # dumped with # clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g' 'Native': { 'data_sample': [ b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01', b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01', b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01', # '' # On empty message exception happens: DB::Exception: Attempt to read after eof # /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse # /src/IO/VarInt.h:135: void DB::readVarUIntImpl<false>(unsigned long&, DB::ReadBuffer&) @ 0x15c68bb7 in /usr/bin/clickhouse # /src/IO/VarInt.h:149: DB::readVarUInt(unsigned long&, DB::ReadBuffer&) @ 0x15c68844 in /usr/bin/clickhouse # /src/DataStreams/NativeBlockInputStream.cpp:124: DB::NativeBlockInputStream::readImpl() @ 0x1d3e2778 in /usr/bin/clickhouse # /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse # /src/Processors/Formats/Impl/NativeFormat.h:42: DB::NativeInputFormatFromNativeBlockInputStream::generate() @ 0x1df1ea79 in /usr/bin/clickhouse # /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse ], }, 'MsgPack': { 'data_sample': [ b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01', b'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01', b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01', # '' # On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1) # coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170 ], }, 'RowBinary': { 'data_sample': [ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', # '' # On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8. # /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse # /src/IO/ReadHelpers.h:108: void DB::readPODBinary<long>(long&, DB::ReadBuffer&) @ 0x15c67715 in /usr/bin/clickhouse # /src/IO/ReadHelpers.h:737: std::__1::enable_if<is_arithmetic_v<long>, void>::type DB::readBinary<long>(long&, DB::ReadBuffer&) @ 0x15e7afbd in /usr/bin/clickhouse # /src/DataTypes/DataTypeNumberBase.cpp:180: DB::DataTypeNumberBase<long>::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cace581 in /usr/bin/clickhouse # /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse ], }, 'RowBinaryWithNamesAndTypes': { 'data_sample': [ b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', # '' # !!! On empty message segfault: Address not mapped to object # /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse # /contrib/FastMemcpy/memcpy_wrapper.c:5: memcpy @ 0x21738235 in /usr/bin/clickhouse # /src/IO/ReadBuffer.h:145: DB::ReadBuffer::read(char*, unsigned long) @ 0x15c369d7 in /usr/bin/clickhouse # /src/IO/ReadBuffer.h:155: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c68878 in /usr/bin/clickhouse # /src/DataTypes/DataTypeString.cpp:84: DB::DataTypeString::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cad12e7 in /usr/bin/clickhouse # /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse ], }, 'Protobuf': { 'data_sample': [ b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01', b'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01', b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01', # '' # On empty message exception: Attempt to read after eof # /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse # /src/Formats/ProtobufReader.h:115: DB::ProtobufReader::SimpleReader::startMessage() @ 0x1df4f828 in /usr/bin/clickhouse # /src/Formats/ProtobufReader.cpp:1119: DB::ProtobufReader::startMessage() @ 0x1df5356c in /usr/bin/clickhouse # /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse # /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse ], 'extra_settings': ", kafka_schema='test:TestMessage'" }, 'ORC': { 'data_sample': [ b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', # '' # On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below): # /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse # /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse ], }, 'CapnProto': { 'data_sample': [ b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00', b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00', b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00', # '' # On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4. # /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse # /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse # /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse ], 'extra_settings': ", kafka_schema='test:TestRecordStruct'" }, 'Parquet' : { 'data_sample': [ b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31', b'\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31', b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31', ], }, 'AvroConfluent': { 'data_sample': [ avro_confluent_message(schema_registry_client, {'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}), b''.join([avro_confluent_message(schema_registry_client, {'id': id, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}) for id in range(1, 16)]), avro_confluent_message(schema_registry_client, {'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}), ], 'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format( kafka_cluster.schema_registry_host, 8081 ), 'supports_empty_value': True, }, 'Avro': { # It seems impossible to send more than one avro file per a message # because of nature of Avro: blocks go one after another 'data_sample': [ avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}), avro_message([{'id': id, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1} for id in range(1, 16)]), avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}), ], 'supports_empty_value': False, }, 'Arrow' : { 'data_sample' : [ b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31', b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31', b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31', ], }, 'ArrowStream' : { 'data_sample' : [ b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00', b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00', b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00', ], }, } for format_name, format_opts in list(all_formats.items()): logging.debug(('Set up {}'.format(format_name))) topic_name = 'format_tests_{}'.format(format_name) data_sample = format_opts['data_sample'] data_prefix = [] # prepend empty value when supported if format_opts.get('supports_empty_value', False): data_prefix = data_prefix + [''] kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample) instance.query(''' DROP TABLE IF EXISTS test.kafka_{format_name}; CREATE TABLE test.kafka_{format_name} ( id Int64, blockNo UInt16, val1 String, val2 Float32, val3 UInt8 ) ENGINE = Kafka() SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = '{topic_name}', kafka_group_name = '{topic_name}_group', kafka_format = '{format_name}', kafka_flush_interval_ms = 1000 {extra_settings}; DROP TABLE IF EXISTS test.kafka_{format_name}_mv; CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name}; '''.format(topic_name=topic_name, format_name=format_name, extra_settings=format_opts.get('extra_settings') or '')) instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*format_tests_', repetitions=len(all_formats.keys()), look_behind_lines=12000) for format_name, format_opts in list(all_formats.items()): logging.debug(('Checking {}'.format(format_name))) topic_name = f'format_tests_{format_name}' # shift offsets by 1 if format supports empty value offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2] result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name)) expected = '''\ 0 0 AM 0.5 1 {topic_name} 0 {offset_0} 1 0 AM 0.5 1 {topic_name} 0 {offset_1} 2 0 AM 0.5 1 {topic_name} 0 {offset_1} 3 0 AM 0.5 1 {topic_name} 0 {offset_1} 4 0 AM 0.5 1 {topic_name} 0 {offset_1} 5 0 AM 0.5 1 {topic_name} 0 {offset_1} 6 0 AM 0.5 1 {topic_name} 0 {offset_1} 7 0 AM 0.5 1 {topic_name} 0 {offset_1} 8 0 AM 0.5 1 {topic_name} 0 {offset_1} 9 0 AM 0.5 1 {topic_name} 0 {offset_1} 10 0 AM 0.5 1 {topic_name} 0 {offset_1} 11 0 AM 0.5 1 {topic_name} 0 {offset_1} 12 0 AM 0.5 1 {topic_name} 0 {offset_1} 13 0 AM 0.5 1 {topic_name} 0 {offset_1} 14 0 AM 0.5 1 {topic_name} 0 {offset_1} 15 0 AM 0.5 1 {topic_name} 0 {offset_1} 0 0 AM 0.5 1 {topic_name} 0 {offset_2} '''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2]) assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name) kafka_delete_topic(admin_client, topic_name) # Since everything is async and shaky when receiving messages from Kafka, # we may want to try and check results multiple times in a loop. def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'): fpath = p.join(p.dirname(__file__), ref_file) with open(fpath) as reference: if check: assert TSV(result) == TSV(reference) else: return TSV(result) == TSV(reference) # https://stackoverflow.com/a/57692111/1555175 def describe_consumer_group(kafka_cluster, name): client = BrokerConnection('localhost', kafka_cluster.kafka_port, socket.AF_INET) client.connect_blocking() list_members_in_groups = DescribeGroupsRequest_v1(groups=[name]) future = client.send(list_members_in_groups) while not future.is_done: for resp, f in client.recv(): f.success(resp) (error_code, group_id, state, protocol_type, protocol, members) = future.value.groups[0] res = [] for member in members: (member_id, client_id, client_host, member_metadata, member_assignment) = member member_info = {} member_info['member_id'] = member_id member_info['client_id'] = client_id member_info['client_host'] = client_host member_topics_assignment = [] for (topic, partitions) in MemberAssignment.decode(member_assignment).assignment: member_topics_assignment.append({'topic': topic, 'partitions': partitions}) member_info['assignment'] = member_topics_assignment res.append(member_info) return res # Fixtures @pytest.fixture(scope="module") def kafka_cluster(): try: cluster.start() kafka_id = instance.cluster.kafka_docker_id print(("kafka_id is {}".format(kafka_id))) yield cluster finally: cluster.shutdown() @pytest.fixture(autouse=True) def kafka_setup_teardown(): instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;') # logging.debug("kafka is available - running test") yield # run test # Tests def test_kafka_issue11308(kafka_cluster): # Check that matview does respect Kafka SETTINGS kafka_produce(kafka_cluster, 'issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }', '{"t": 124, "e": {"x": "test"} }']) instance.query(''' CREATE TABLE test.persistent_kafka ( time UInt64, some_string String ) ENGINE = MergeTree() ORDER BY time; CREATE TABLE test.kafka (t UInt64, `e.x` String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'issue11308', kafka_group_name = 'issue11308', kafka_format = 'JSONEachRow', kafka_row_delimiter = '\\n', kafka_flush_interval_ms=1000, input_format_import_nested_json = 1; CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS SELECT `t` AS `time`, `e.x` AS `some_string` FROM test.kafka; ''') while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3: time.sleep(1) result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;') instance.query(''' DROP TABLE test.persistent_kafka; DROP TABLE test.persistent_kafka_mv; ''') expected = '''\ 123 woof 123 woof 124 test ''' assert TSV(result) == TSV(expected) def test_kafka_issue4116(kafka_cluster): # Check that format_csv_delimiter parameter works now - as part of all available format settings. kafka_produce(kafka_cluster, 'issue4116', ['1|foo', '2|bar', '42|answer', '100|multi\n101|row\n103|message']) instance.query(''' CREATE TABLE test.kafka (a UInt64, b String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'issue4116', kafka_group_name = 'issue4116', kafka_format = 'CSV', kafka_row_delimiter = '\\n', format_csv_delimiter = '|'; ''') result = instance.query('SELECT * FROM test.kafka ORDER BY a;') expected = '''\ 1 foo 2 bar 42 answer 100 multi 101 row 103 message ''' assert TSV(result) == TSV(expected) def test_kafka_consumer_hang(kafka_cluster): admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) topic_name = "consumer_hang" kafka_create_topic(admin_client, topic_name, num_partitions=8) instance.query(f''' DROP TABLE IF EXISTS test.kafka; DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = '{topic_name}', kafka_group_name = '{topic_name}', kafka_format = 'JSONEachRow', kafka_num_consumers = 8; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory(); CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; ''') instance.wait_for_log_line('kafka.*Stalled', repetitions=20) # This should trigger heartbeat fail, # which will trigger REBALANCE_IN_PROGRESS, # and which can lead to consumer hang. kafka_cluster.pause_container('kafka1') instance.wait_for_log_line('heartbeat error') kafka_cluster.unpause_container('kafka1') # logging.debug("Attempt to drop") instance.query('DROP TABLE test.kafka') # kafka_cluster.open_bash_shell('instance') instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') # original problem appearance was a sequence of the following messages in librdkafka logs: # BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever) # so it was waiting forever while the application will execute queued rebalance callback # from a user perspective: we expect no hanging 'drop' queries # 'dr'||'op' to avoid self matching assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0 # cleanup unread messages so kafka will not wait reading consumers to delete topic instance.query(f''' CREATE TABLE test.kafka (key UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = '{topic_name}', kafka_group_name = '{topic_name}', kafka_format = 'JSONEachRow', kafka_num_consumers = 8; ''') num_read = int(instance.query('SELECT count() FROM test.kafka')) logging.debug(f"read {num_read} from {topic_name} before delete") instance.query('DROP TABLE test.kafka') kafka_delete_topic(admin_client, topic_name) def test_kafka_consumer_hang2(kafka_cluster): admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) topic_name = "consumer_hang2" kafka_create_topic(admin_client, topic_name) instance.query(''' DROP TABLE IF EXISTS test.kafka; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'consumer_hang2', kafka_group_name = 'consumer_hang2', kafka_format = 'JSONEachRow'; CREATE TABLE test.kafka2 (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'consumer_hang2', kafka_group_name = 'consumer_hang2', kafka_format = 'JSONEachRow'; ''') # first consumer subscribe the topic, try to poll some data, and go to rest instance.query('SELECT * FROM test.kafka') # second consumer do the same leading to rebalance in the first # consumer, try to poll some data instance.query('SELECT * FROM test.kafka2') # echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn & # kafka_cluster.open_bash_shell('instance') # first consumer has pending rebalance callback unprocessed (no poll after select) # one of those queries was failing because of # https://github.com/edenhill/librdkafka/issues/2077 # https://github.com/edenhill/librdkafka/issues/2898 instance.query('DROP TABLE test.kafka') instance.query('DROP TABLE test.kafka2') # from a user perspective: we expect no hanging 'drop' queries # 'dr'||'op' to avoid self matching assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0 kafka_delete_topic(admin_client, topic_name) def test_kafka_csv_with_delimiter(kafka_cluster): messages = [] for i in range(50): messages.append('{i}, {i}'.format(i=i)) kafka_produce(kafka_cluster, 'csv', messages) instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'csv', kafka_group_name = 'csv', kafka_format = 'CSV'; ''') result = '' while True: result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) def test_kafka_tsv_with_delimiter(kafka_cluster): messages = [] for i in range(50): messages.append('{i}\t{i}'.format(i=i)) kafka_produce(kafka_cluster, 'tsv', messages) instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'tsv', kafka_group_name = 'tsv', kafka_format = 'TSV'; ''') result = '' while True: result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) def test_kafka_select_empty(kafka_cluster): admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) topic_name = "empty" kafka_create_topic(admin_client, topic_name) instance.query(f''' CREATE TABLE test.kafka (key UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = '{topic_name}', kafka_group_name = '{topic_name}', kafka_format = 'TSV', kafka_row_delimiter = '\\n'; ''') assert int(instance.query('SELECT count() FROM test.kafka')) == 0 kafka_delete_topic(admin_client, topic_name) def test_kafka_json_without_delimiter(kafka_cluster): messages = '' for i in range(25): messages += json.dumps({'key': i, 'value': i}) + '\n' kafka_produce(kafka_cluster, 'json', [messages]) messages = '' for i in range(25, 50): messages += json.dumps({'key': i, 'value': i}) + '\n' kafka_produce(kafka_cluster, 'json', [messages]) instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'json', kafka_group_name = 'json', kafka_format = 'JSONEachRow'; ''') result = '' while True: result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) def test_kafka_protobuf(kafka_cluster): kafka_produce_protobuf_messages(kafka_cluster, 'pb', 0, 20) kafka_produce_protobuf_messages(kafka_cluster, 'pb', 20, 1) kafka_produce_protobuf_messages(kafka_cluster, 'pb', 21, 29) instance.query(''' CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'pb', kafka_group_name = 'pb', kafka_format = 'Protobuf', kafka_schema = 'kafka.proto:KeyValuePair'; ''') result = '' while True: result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) def test_kafka_string_field_on_first_position_in_protobuf(kafka_cluster): # https://github.com/ClickHouse/ClickHouse/issues/12615 kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 0, 20) kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 20, 1) kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 21, 29) instance.query(''' CREATE TABLE test.kafka ( username String, timestamp Int32 ) ENGINE = Kafka() SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'string_field_on_first_position_in_protobuf', kafka_group_name = 'string_field_on_first_position_in_protobuf', kafka_format = 'Protobuf', kafka_schema = 'social:User'; ''') result = instance.query('SELECT * FROM test.kafka', ignore_error=True) expected = '''\ John Doe 0 1000000 John Doe 1 1000001 John Doe 2 1000002 John Doe 3 1000003 John Doe 4 1000004 John Doe 5 1000005 John Doe 6 1000006 John Doe 7 1000007 John Doe 8 1000008 John Doe 9 1000009 John Doe 10 1000010 John Doe 11 1000011 John Doe 12 1000012 John Doe 13 1000013 John Doe 14 1000014 John Doe 15 1000015 John Doe 16 1000016 John Doe 17 1000017 John Doe 18 1000018 John Doe 19 1000019 John Doe 20 1000020 John Doe 21 1000021 John Doe 22 1000022 John Doe 23 1000023 John Doe 24 1000024 John Doe 25 1000025 John Doe 26 1000026 John Doe 27 1000027 John Doe 28 1000028 John Doe 29 1000029 John Doe 30 1000030 John Doe 31 1000031 John Doe 32 1000032 John Doe 33 1000033 John Doe 34 1000034 John Doe 35 1000035 John Doe 36 1000036 John Doe 37 1000037 John Doe 38 1000038 John Doe 39 1000039 John Doe 40 1000040 John Doe 41 1000041 John Doe 42 1000042 John Doe 43 1000043 John Doe 44 1000044 John Doe 45 1000045 John Doe 46 1000046 John Doe 47 1000047 John Doe 48 1000048 John Doe 49 1000049 ''' assert TSV(result) == TSV(expected) def test_kafka_protobuf_no_delimiter(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'pb_no_delimiter', kafka_group_name = 'pb_no_delimiter', kafka_format = 'ProtobufSingle', kafka_schema = 'kafka.proto:KeyValuePair'; ''') kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 0, 20) kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 20, 1) kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 21, 29) result = '' while True: result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) instance.query(''' CREATE TABLE test.kafka_writer (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'pb_no_delimiter', kafka_group_name = 'pb_no_delimiter', kafka_format = 'ProtobufSingle', kafka_schema = 'kafka.proto:KeyValuePair'; ''') instance.query("INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')") time.sleep(1) result = instance.query("SELECT * FROM test.kafka ORDER BY key", ignore_error=True) expected = '''\ 13 Friday 42 Answer to the Ultimate Question of Life, the Universe, and Everything 110 just a number ''' assert TSV(result) == TSV(expected) def test_kafka_materialized_view(kafka_cluster): instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'mv', kafka_group_name = 'mv', kafka_format = 'JSONEachRow', kafka_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; ''') messages = [] for i in range(50): messages.append(json.dumps({'key': i, 'value': i})) kafka_produce(kafka_cluster, 'mv', messages) while True: result = instance.query('SELECT * FROM test.view') if kafka_check_result(result): break instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') kafka_check_result(result, True) def test_kafka_recreate_kafka_table(kafka_cluster): ''' Checks that materialized view work properly after dropping and recreating the Kafka table. ''' # line for backporting: # admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092") admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) topic_name = "recreate_kafka_table" kafka_create_topic(admin_client, topic_name, num_partitions=6) instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'recreate_kafka_table', kafka_group_name = 'recreate_kafka_table_group', kafka_format = 'JSONEachRow', kafka_num_consumers = 6, kafka_flush_interval_ms = 1000, kafka_skip_broken_messages = 1048577; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; ''') messages = [] for i in range(120): messages.append(json.dumps({'key': i, 'value': i})) kafka_produce(kafka_cluster,'recreate_kafka_table', messages) instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100) instance.query(''' DROP TABLE test.kafka; ''') kafka_produce(kafka_cluster,'recreate_kafka_table', messages) instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'recreate_kafka_table', kafka_group_name = 'recreate_kafka_table_group', kafka_format = 'JSONEachRow', kafka_num_consumers = 6, kafka_flush_interval_ms = 1000, kafka_skip_broken_messages = 1048577; ''') instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100) # data was not flushed yet (it will be flushed 7.5 sec after creating MV) assert int(instance.query("SELECT count() FROM test.view")) == 240 instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') kafka_delete_topic(admin_client, topic_name) def test_librdkafka_compression(kafka_cluster): """ Regression for UB in snappy-c (that is used in librdkafka), backport pr is [1]. [1]: https://github.com/ClickHouse-Extras/librdkafka/pull/3 Example of corruption: 2020.12.10 09:59:56.831507 [ 20 ] {} <Error> void DB::StorageKafka::threadFunc(size_t): Code: 27. DB::Exception: Cannot parse input: expected '"' before: 'foo"}': (while reading the value of key value): (at row 1) To trigger this regression there should duplicated messages Orignal reproducer is: $ gcc --version |& fgrep gcc gcc (GCC) 10.2.0 $ yes foobarbaz | fold -w 80 | head -n10 >| in-… $ make clean && make CFLAGS='-Wall -g -O2 -ftree-loop-vectorize -DNDEBUG=1 -DSG=1 -fPIC' $ ./verify in final comparision of in failed at 20 of 100 """ supported_compression_types = ['gzip', 'snappy', 'lz4', 'zstd', 'uncompressed'] messages = [] expected = [] value = 'foobarbaz'*10 number_of_messages = 50 for i in range(number_of_messages): messages.append(json.dumps({'key': i, 'value': value})) expected.append(f'{i}\t{value}') expected = '\n'.join(expected) for compression_type in supported_compression_types: logging.debug(('Check compression {}'.format(compression_type))) topic_name = 'test_librdkafka_compression_{}'.format(compression_type) admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) kafka_create_topic(admin_client, topic_name, config={'compression.type': compression_type}) instance.query(''' CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = '{topic_name}', kafka_group_name = '{topic_name}_group', kafka_format = 'JSONEachRow', kafka_flush_interval_ms = 1000; CREATE MATERIALIZED VIEW test.consumer Engine=Log AS SELECT * FROM test.kafka; '''.format(topic_name=topic_name) ) kafka_produce(kafka_cluster, topic_name, messages) instance.wait_for_log_line("Committed offset {}".format(number_of_messages)) result = instance.query('SELECT * FROM test.consumer') assert TSV(result) == TSV(expected) instance.query('DROP TABLE test.kafka SYNC') instance.query('DROP TABLE test.consumer SYNC') kafka_delete_topic(admin_client, topic_name) def test_kafka_materialized_view_with_subquery(kafka_cluster): instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'mvsq', kafka_group_name = 'mvsq', kafka_format = 'JSONEachRow', kafka_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM (SELECT * FROM test.kafka); ''') messages = [] for i in range(50): messages.append(json.dumps({'key': i, 'value': i})) kafka_produce(kafka_cluster, 'mvsq', messages) while True: result = instance.query('SELECT * FROM test.view') if kafka_check_result(result): break instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') kafka_check_result(result, True) def test_kafka_many_materialized_views(kafka_cluster): instance.query(''' DROP TABLE IF EXISTS test.view1; DROP TABLE IF EXISTS test.view2; DROP TABLE IF EXISTS test.consumer1; DROP TABLE IF EXISTS test.consumer2; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'mmv', kafka_group_name = 'mmv', kafka_format = 'JSONEachRow', kafka_row_delimiter = '\\n'; CREATE TABLE test.view1 (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; CREATE TABLE test.view2 (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS SELECT * FROM test.kafka; CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS SELECT * FROM test.kafka; ''') messages = [] for i in range(50): messages.append(json.dumps({'key': i, 'value': i})) kafka_produce(kafka_cluster, 'mmv', messages) while True: result1 = instance.query('SELECT * FROM test.view1') result2 = instance.query('SELECT * FROM test.view2') if kafka_check_result(result1) and kafka_check_result(result2): break instance.query(''' DROP TABLE test.consumer1; DROP TABLE test.consumer2; DROP TABLE test.view1; DROP TABLE test.view2; ''') kafka_check_result(result1, True) kafka_check_result(result2, True) def test_kafka_flush_on_big_message(kafka_cluster): # Create batchs of messages of size ~100Kb kafka_messages = 1000 batch_messages = 1000 messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)] kafka_produce(kafka_cluster, 'flush', messages) instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'flush', kafka_group_name = 'flush', kafka_format = 'JSONEachRow', kafka_max_block_size = 10; CREATE TABLE test.view (key UInt64, value String) ENGINE = MergeTree ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; ''') client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) received = False while not received: try: offsets = client.list_consumer_group_offsets('flush') for topic, offset in list(offsets.items()): if topic.topic == 'flush' and offset.offset == kafka_messages: received = True break except kafka.errors.GroupCoordinatorNotAvailableError: continue while True: result = instance.query('SELECT count() FROM test.view') if int(result) == kafka_messages * batch_messages: break instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') assert int(result) == kafka_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result) def test_kafka_virtual_columns(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'virt1', kafka_group_name = 'virt1', kafka_format = 'JSONEachRow'; ''') messages = '' for i in range(25): messages += json.dumps({'key': i, 'value': i}) + '\n' kafka_produce(kafka_cluster, 'virt1', [messages], 0) messages = '' for i in range(25, 50): messages += json.dumps({'key': i, 'value': i}) + '\n' kafka_produce(kafka_cluster, 'virt1', [messages], 0) result = '' while True: result += instance.query( '''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''', ignore_error=True) if kafka_check_result(result, False, 'test_kafka_virtual1.reference'): break kafka_check_result(result, True, 'test_kafka_virtual1.reference') def test_kafka_virtual_columns_with_materialized_view(kafka_cluster): instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'virt2', kafka_group_name = 'virt2', kafka_format = 'JSONEachRow', kafka_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC'))) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka; ''') messages = [] for i in range(50): messages.append(json.dumps({'key': i, 'value': i})) kafka_produce(kafka_cluster, 'virt2', messages, 0) sql = 'SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view ORDER BY kafka_key' result = instance.query(sql) iterations = 0 while not kafka_check_result(result, False, 'test_kafka_virtual2.reference') and iterations < 10: time.sleep(3) iterations += 1 result = instance.query(sql) kafka_check_result(result, True, 'test_kafka_virtual2.reference') instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') def test_kafka_insert(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'insert1', kafka_group_name = 'insert1', kafka_format = 'TSV', kafka_row_delimiter = '\\n'; ''') values = [] for i in range(50): values.append("({i}, {i})".format(i=i)) values = ','.join(values) while True: try: instance.query("INSERT INTO test.kafka VALUES {}".format(values)) break except QueryRuntimeException as e: if 'Local: Timed out.' in str(e): continue else: raise messages = [] while True: messages.extend(kafka_consume(kafka_cluster, 'insert1')) if len(messages) == 50: break result = '\n'.join(messages) kafka_check_result(result, True) def test_kafka_produce_consume(kafka_cluster): instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'insert2', kafka_group_name = 'insert2', kafka_format = 'TSV', kafka_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; ''') messages_num = 10000 def insert(): values = [] for i in range(messages_num): values.append("({i}, {i})".format(i=i)) values = ','.join(values) while True: try: instance.query("INSERT INTO test.kafka VALUES {}".format(values)) break except QueryRuntimeException as e: if 'Local: Timed out.' in str(e): continue else: raise threads = [] threads_num = 16 for _ in range(threads_num): threads.append(threading.Thread(target=insert)) for thread in threads: time.sleep(random.uniform(0, 1)) thread.start() while True: result = instance.query('SELECT count() FROM test.view') time.sleep(1) if int(result) == messages_num * threads_num: break instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') for thread in threads: thread.join() assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result) def test_kafka_commit_on_block_write(kafka_cluster): instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'block', kafka_group_name = 'block', kafka_format = 'JSONEachRow', kafka_max_block_size = 100, kafka_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; ''') cancel = threading.Event() i = [0] def produce(): while not cancel.is_set(): messages = [] for _ in range(101): messages.append(json.dumps({'key': i[0], 'value': i[0]})) i[0] += 1 kafka_produce(kafka_cluster, 'block', messages) kafka_thread = threading.Thread(target=produce) kafka_thread.start() while int(instance.query('SELECT count() FROM test.view')) == 0: time.sleep(1) cancel.set() instance.query(''' DROP TABLE test.kafka; ''') instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'block', kafka_group_name = 'block', kafka_format = 'JSONEachRow', kafka_max_block_size = 100, kafka_row_delimiter = '\\n'; ''') while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]: time.sleep(1) result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view')) instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') kafka_thread.join() assert result == 1, 'Messages from kafka get duplicated!' def test_kafka_virtual_columns2(kafka_cluster): admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) kafka_create_topic(admin_client, "virt2_0", num_partitions=2) kafka_create_topic(admin_client, "virt2_1", num_partitions=2) instance.query(''' CREATE TABLE test.kafka (value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'virt2_0,virt2_1', kafka_group_name = 'virt2', kafka_num_consumers = 2, kafka_format = 'JSONEachRow'; CREATE MATERIALIZED VIEW test.view Engine=Log AS SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka; ''') producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer) producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001, headers=[('content-encoding', b'base64')]) producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002, headers=[('empty_value', b''), ('', b'empty name'), ('', b''), ('repetition', b'1'), ('repetition', b'2')]) producer.flush() producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003, headers=[('b', b'b'), ('a', b'a')]) producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004, headers=[('a', b'a'), ('b', b'b')]) producer.flush() producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005) producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006) producer.flush() producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007) producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008) producer.flush() instance.wait_for_log_line('kafka.*Committed offset 2.*virt2_[01]', repetitions=4, look_behind_lines=6000) members = describe_consumer_group(kafka_cluster, 'virt2') # pprint.pprint(members) members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0' members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1' result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True) expected = '''\ 1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64'] 2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2'] 3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a'] 4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b'] 5 k5 virt2_1 0 0 1577836805 1577836805005 [] [] 6 k6 virt2_1 0 1 1577836806 1577836806006 [] [] 7 k7 virt2_1 1 0 1577836807 1577836807007 [] [] 8 k8 virt2_1 1 1 1577836808 1577836808008 [] [] ''' assert TSV(result) == TSV(expected) kafka_delete_topic(admin_client, "virt2_0") kafka_delete_topic(admin_client, "virt2_1") def test_kafka_produce_key_timestamp(kafka_cluster): admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) topic_name = "insert3" kafka_create_topic(admin_client, topic_name) instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC')) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'insert3', kafka_group_name = 'insert3', kafka_format = 'TSV', kafka_row_delimiter = '\\n'; CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC')) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'insert3', kafka_group_name = 'insert3', kafka_format = 'TSV', kafka_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.view Engine=Log AS SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka; ''') instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1, 1, 'k1', 1577836801)) instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2, 2, 'k2', 1577836802)) instance.query( "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3, 3, 'k3', 1577836803, 4, 4, 'k4', 1577836804)) instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5, 5, 'k5', 1577836805)) instance.wait_for_log_line("Committed offset 5") result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True) # logging.debug(result) expected = '''\ 1 1 k1 1577836801 k1 insert3 0 0 1577836801 2 2 k2 1577836802 k2 insert3 0 1 1577836802 3 3 k3 1577836803 k3 insert3 0 2 1577836803 4 4 k4 1577836804 k4 insert3 0 3 1577836804 5 5 k5 1577836805 k5 insert3 0 4 1577836805 ''' assert TSV(result) == TSV(expected) kafka_delete_topic(admin_client, topic_name) def test_kafka_flush_by_time(kafka_cluster): admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) topic_name = "flush_by_time" kafka_create_topic(admin_client, topic_name) instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'flush_by_time', kafka_group_name = 'flush_by_time', kafka_format = 'JSONEachRow', kafka_max_block_size = 100, kafka_row_delimiter = '\\n'; SELECT * FROM test.kafka; CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3)) ENGINE = MergeTree() ORDER BY key; ''') cancel = threading.Event() def produce(): while not cancel.is_set(): messages = [] messages.append(json.dumps({'key': 0, 'value': 0})) kafka_produce(kafka_cluster, 'flush_by_time', messages) time.sleep(0.8) kafka_thread = threading.Thread(target=produce) kafka_thread.start() instance.query(''' CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; ''') time.sleep(18) result = instance.query('SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view') cancel.set() kafka_thread.join() # kafka_cluster.open_bash_shell('instance') instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') assert TSV(result) == TSV('1 1') kafka_delete_topic(admin_client, topic_name) def test_kafka_flush_by_block_size(kafka_cluster): cancel = threading.Event() def produce(): while not cancel.is_set(): messages = [] messages.append(json.dumps({'key': 0, 'value': 0})) kafka_produce(kafka_cluster, 'flush_by_block_size', messages) kafka_thread = threading.Thread(target=produce) kafka_thread.start() instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'flush_by_block_size', kafka_group_name = 'flush_by_block_size', kafka_format = 'JSONEachRow', kafka_max_block_size = 100, kafka_poll_max_batch_size = 1, kafka_flush_interval_ms = 120000, /* should not flush by time during test */ kafka_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; ''') # Wait for Kafka engine to consume this data while 1 != int(instance.query( "SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")): time.sleep(0.5) cancel.set() kafka_thread.join() # more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0). result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'") # logging.debug(result) instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') # 100 = first poll should return 100 messages (and rows) # not waiting for stream_flush_interval_ms assert int( result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!' def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster): admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) topic_name = "topic_with_multiple_partitions2" kafka_create_topic(admin_client, topic_name, num_partitions=10) instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'topic_with_multiple_partitions2', kafka_group_name = 'topic_with_multiple_partitions2', kafka_format = 'JSONEachRow', kafka_max_block_size = 211, kafka_flush_interval_ms = 500; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; ''') messages = [] count = 0 for dummy_msg in range(1000): rows = [] for dummy_row in range(random.randrange(3, 10)): count = count + 1 rows.append(json.dumps({'key': count, 'value': count})) messages.append("\n".join(rows)) kafka_produce(kafka_cluster, 'topic_with_multiple_partitions2', messages) instance.wait_for_log_line('kafka.*Stalled', repetitions=5) result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view') logging.debug(result) assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count)) instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') kafka_delete_topic(admin_client, topic_name) def test_kafka_rebalance(kafka_cluster): NUMBER_OF_CONSURRENT_CONSUMERS = 11 instance.query(''' DROP TABLE IF EXISTS test.destination; CREATE TABLE test.destination ( key UInt64, value UInt64, _topic String, _key String, _offset UInt64, _partition UInt64, _timestamp Nullable(DateTime('UTC')), _consumed_by LowCardinality(String) ) ENGINE = MergeTree() ORDER BY key; ''') # kafka_cluster.open_bash_shell('instance') # time.sleep(2) admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) topic_name = "topic_with_multiple_partitions" kafka_create_topic(admin_client, topic_name, num_partitions=11) cancel = threading.Event() msg_index = [0] def produce(): while not cancel.is_set(): messages = [] for _ in range(59): messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]})) msg_index[0] += 1 kafka_produce(kafka_cluster, 'topic_with_multiple_partitions', messages) kafka_thread = threading.Thread(target=produce) kafka_thread.start() for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS): table_name = 'kafka_consumer{}'.format(consumer_index) logging.debug(("Setting up {}".format(table_name))) instance.query(''' DROP TABLE IF EXISTS test.{0}; DROP TABLE IF EXISTS test.{0}_mv; CREATE TABLE test.{0} (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'topic_with_multiple_partitions', kafka_group_name = 'rebalance_test_group', kafka_format = 'JSONEachRow', kafka_max_block_size = 33, kafka_flush_interval_ms = 500; CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS SELECT key, value, _topic, _key, _offset, _partition, _timestamp, '{0}' as _consumed_by FROM test.{0}; '''.format(table_name)) # kafka_cluster.open_bash_shell('instance') # Waiting for test.kafka_consumerX to start consume ... instance.wait_for_log_line('kafka_consumer{}.*Polled offset [0-9]+'.format(consumer_index)) cancel.set() # I leave last one working by intent (to finish consuming after all rebalances) for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1): logging.debug(("Dropping test.kafka_consumer{}".format(consumer_index))) instance.query('DROP TABLE IF EXISTS test.kafka_consumer{} SYNC'.format(consumer_index)) # logging.debug(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')) # kafka_cluster.open_bash_shell('instance') while 1: messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination')) if messages_consumed >= msg_index[0]: break time.sleep(1) logging.debug(("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed, msg_index[0]))) logging.debug((instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))) # Some queries to debug... # SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1) # select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0; # SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key; # select _partition from test.destination group by _partition having count() <> max(_offset) + 1; # select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset; # SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset; # CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', # kafka_topic_list = 'topic_with_multiple_partitions', # kafka_group_name = 'rebalance_test_group_reference', # kafka_format = 'JSONEachRow', # kafka_max_block_size = 100000; # # CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS # SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by # FROM test.reference; # # select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = ''; result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination')) for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS): logging.debug(("kafka_consumer{}".format(consumer_index))) table_name = 'kafka_consumer{}'.format(consumer_index) instance.query(''' DROP TABLE IF EXISTS test.{0}; DROP TABLE IF EXISTS test.{0}_mv; '''.format(table_name)) instance.query(''' DROP TABLE IF EXISTS test.destination; ''') kafka_thread.join() assert result == 1, 'Messages from kafka get duplicated!' kafka_delete_topic(admin_client, topic_name) def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster): messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)] kafka_produce(kafka_cluster, 'no_holes_when_write_suffix_failed', messages) instance.query(''' DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'no_holes_when_write_suffix_failed', kafka_group_name = 'no_holes_when_write_suffix_failed', kafka_format = 'JSONEachRow', kafka_max_block_size = 20, kafka_flush_interval_ms = 2000; CREATE TABLE test.view (key UInt64, value String) ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1') ORDER BY key; ''') # init PartitionManager (it starts container) earlier pm = PartitionManager() instance.query(''' CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka WHERE NOT sleepEachRow(0.25); ''') instance.wait_for_log_line("Polled batch of 20 messages") # the tricky part here is that disconnect should happen after write prefix, but before write suffix # we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages" # while materialized view is working to inject zookeeper failure pm.drop_instance_zk_connections(instance) instance.wait_for_log_line("Error.*(session has been expired|Connection loss).*while pushing to view") pm.heal_all() instance.wait_for_log_line("Committed offset 22") result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view') logging.debug(result) # kafka_cluster.open_bash_shell('instance') instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') assert TSV(result) == TSV('22\t22\t22') def test_exception_from_destructor(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'xyz', kafka_group_name = '', kafka_format = 'JSONEachRow'; ''') instance.query_and_get_error(''' SELECT * FROM test.kafka; ''') instance.query(''' DROP TABLE test.kafka; ''') instance.query(''' CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'xyz', kafka_group_name = '', kafka_format = 'JSONEachRow'; ''') instance.query(''' DROP TABLE test.kafka; ''') # kafka_cluster.open_bash_shell('instance') assert TSV(instance.query('SELECT 1')) == TSV('1') def test_commits_of_unprocessed_messages_on_drop(kafka_cluster): messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)] kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages) instance.query(''' DROP TABLE IF EXISTS test.destination SYNC; CREATE TABLE test.destination ( key UInt64, value UInt64, _topic String, _key String, _offset UInt64, _partition UInt64, _timestamp Nullable(DateTime('UTC')), _consumed_by LowCardinality(String) ) ENGINE = MergeTree() ORDER BY key; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'commits_of_unprocessed_messages_on_drop', kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group', kafka_format = 'JSONEachRow', kafka_max_block_size = 1000, kafka_flush_interval_ms = 1000; CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS SELECT key, value, _topic, _key, _offset, _partition, _timestamp FROM test.kafka; ''') # Waiting for test.kafka_consumer to start consume instance.wait_for_log_line('Committed offset [0-9]+') cancel = threading.Event() i = [2] def produce(): while not cancel.is_set(): messages = [] for _ in range(113): messages.append(json.dumps({'key': i[0], 'value': i[0]})) i[0] += 1 kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages) time.sleep(0.5) kafka_thread = threading.Thread(target=produce) kafka_thread.start() time.sleep(4) instance.query(''' DROP TABLE test.kafka SYNC; ''') instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'commits_of_unprocessed_messages_on_drop', kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group', kafka_format = 'JSONEachRow', kafka_max_block_size = 10000, kafka_flush_interval_ms = 1000; ''') cancel.set() instance.wait_for_log_line('kafka.*Stalled', repetitions=5) # kafka_cluster.open_bash_shell('instance') # SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key; result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination') logging.debug(result) instance.query(''' DROP TABLE test.kafka_consumer SYNC; DROP TABLE test.destination SYNC; ''') kafka_thread.join() assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0] - 1)), 'Missing data!' def test_bad_reschedule(kafka_cluster): messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)] kafka_produce(kafka_cluster, 'test_bad_reschedule', messages) instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'test_bad_reschedule', kafka_group_name = 'test_bad_reschedule', kafka_format = 'JSONEachRow', kafka_max_block_size = 1000, kafka_flush_interval_ms = 1000; CREATE MATERIALIZED VIEW test.destination Engine=Log AS SELECT key, now() as consume_ts, value, _topic, _key, _offset, _partition, _timestamp FROM test.kafka; ''') instance.wait_for_log_line("Committed offset 20000") assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8 def test_kafka_duplicates_when_commit_failed(kafka_cluster): messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)] kafka_produce(kafka_cluster, 'duplicates_when_commit_failed', messages) instance.query(''' DROP TABLE IF EXISTS test.view SYNC; DROP TABLE IF EXISTS test.consumer SYNC; CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'duplicates_when_commit_failed', kafka_group_name = 'duplicates_when_commit_failed', kafka_format = 'JSONEachRow', kafka_max_block_size = 20, kafka_flush_interval_ms = 1000; CREATE TABLE test.view (key UInt64, value String) ENGINE = MergeTree() ORDER BY key; ''') instance.query(''' CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka WHERE NOT sleepEachRow(0.25); ''') instance.wait_for_log_line("Polled batch of 20 messages") # the tricky part here is that disconnect should happen after write prefix, but before we do commit # we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages" # while materialized view is working to inject zookeeper failure kafka_cluster.pause_container('kafka1') # if we restore the connection too fast (<30sec) librdkafka will not report any timeout # (alternative is to decrease the default session timeouts for librdkafka) # # when the delay is too long (>50sec) broker will decide to remove us from the consumer group, # and will start answering "Broker: Unknown member" instance.wait_for_log_line("Exception during commit attempt: Local: Waiting for coordinator", timeout=45) instance.wait_for_log_line("All commit attempts failed", look_behind_lines=500) kafka_cluster.unpause_container('kafka1') # kafka_cluster.open_bash_shell('instance') instance.wait_for_log_line("Committed offset 22") result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view') logging.debug(result) instance.query(''' DROP TABLE test.consumer SYNC; DROP TABLE test.view SYNC; ''') # After https://github.com/edenhill/librdkafka/issues/2631 # timeout triggers rebalance, making further commits to the topic after getting back online # impossible. So we have a duplicate in that scenario, but we report that situation properly. assert TSV(result) == TSV('42\t22\t22') # if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval # that behavior is a bit quesionable - we can just take a bigger pauses between polls instead - # to do more job in a single pass, and give more rest for a thread. # But in cases of some peaky loads in kafka topic the current contract sounds more predictable and # easier to understand, so let's keep it as is for now. # also we can came to eof because we drained librdkafka internal queue too fast def test_premature_flush_on_eof(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'premature_flush_on_eof', kafka_group_name = 'premature_flush_on_eof', kafka_format = 'JSONEachRow'; SELECT * FROM test.kafka LIMIT 1; CREATE TABLE test.destination ( key UInt64, value UInt64, _topic String, _key String, _offset UInt64, _partition UInt64, _timestamp Nullable(DateTime('UTC')), _consumed_by LowCardinality(String) ) ENGINE = MergeTree() ORDER BY key; ''') # messages created here will be consumed immedeately after MV creation # reaching topic EOF. # But we should not do flush immedeately after reaching EOF, because # next poll can return more data, and we should respect kafka_flush_interval_ms # and try to form bigger block messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)] kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages) instance.query(''' CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS SELECT key, value, _topic, _key, _offset, _partition, _timestamp FROM test.kafka; ''') # all subscriptions/assignments done during select, so it start sending data to test.destination # immediately after creation of MV instance.wait_for_log_line("Polled batch of 1 messages") instance.wait_for_log_line("Stalled") # produce more messages after delay kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages) # data was not flushed yet (it will be flushed 7.5 sec after creating MV) assert int(instance.query("SELECT count() FROM test.destination")) == 0 instance.wait_for_log_line("Committed offset 2") # it should be single part, i.e. single insert result = instance.query('SELECT _part, count() FROM test.destination group by _part') assert TSV(result) == TSV('all_1_1_0\t2') instance.query(''' DROP TABLE test.kafka_consumer; DROP TABLE test.destination; ''') def test_kafka_unavailable(kafka_cluster): messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)] kafka_produce(kafka_cluster, 'test_bad_reschedule', messages) kafka_cluster.pause_container('kafka1') instance.query(''' CREATE TABLE test.test_bad_reschedule (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'test_bad_reschedule', kafka_group_name = 'test_bad_reschedule', kafka_format = 'JSONEachRow', kafka_max_block_size = 1000; CREATE MATERIALIZED VIEW test.destination_unavailable Engine=Log AS SELECT key, now() as consume_ts, value, _topic, _key, _offset, _partition, _timestamp FROM test.test_bad_reschedule; ''') instance.query("SELECT * FROM test.test_bad_reschedule") instance.query("SELECT count() FROM test.destination_unavailable") # enough to trigger issue time.sleep(30) kafka_cluster.unpause_container('kafka1') while int(instance.query("SELECT count() FROM test.destination_unavailable")) < 20000: print("Waiting for consume") time.sleep(1) def test_kafka_issue14202(kafka_cluster): """ INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure """ instance.query(''' CREATE TABLE test.empty_table ( dt Date, some_string String ) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY some_string; CREATE TABLE test.kafka_q (t UInt64, `some_string` String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'issue14202', kafka_group_name = 'issue14202', kafka_format = 'JSONEachRow'; ''') instance.query( 'INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )') # check instance is alive assert TSV(instance.query('SELECT 1')) == TSV('1') instance.query(''' DROP TABLE test.empty_table; DROP TABLE test.kafka_q; ''') def test_kafka_csv_with_thread_per_consumer(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'csv_with_thread_per_consumer', kafka_group_name = 'csv_with_thread_per_consumer', kafka_format = 'CSV', kafka_row_delimiter = '\\n', kafka_num_consumers = 4, kafka_thread_per_consumer = 1; ''') messages = [] for i in range(50): messages.append('{i}, {i}'.format(i=i)) kafka_produce(kafka_cluster, 'csv_with_thread_per_consumer', messages) result = '' while True: result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) def random_string(size=8): return ''.join(random.choices(string.ascii_uppercase + string.digits, k=size)) def test_kafka_engine_put_errors_to_stream(kafka_cluster): instance.query(''' DROP TABLE IF EXISTS test.kafka; DROP TABLE IF EXISTS test.kafka_data; DROP TABLE IF EXISTS test.kafka_errors; CREATE TABLE test.kafka (i Int64, s String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'kafka_engine_put_errors_to_stream', kafka_group_name = 'kafka_engine_put_errors_to_stream', kafka_format = 'JSONEachRow', kafka_max_block_size = 128, kafka_handle_error_mode = 'stream'; CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String) ENGINE = MergeTree ORDER BY i AS SELECT i, s FROM test.kafka WHERE length(_error) == 0; CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String) ENGINE = MergeTree ORDER BY (topic, offset) AS SELECT _topic AS topic, _partition AS partition, _offset AS offset, _raw_message AS raw, _error AS error FROM test.kafka WHERE length(_error) > 0; ''') messages = [] for i in range(128): if i % 2 == 0: messages.append(json.dumps({'i': i, 's': random_string(8)})) else: # Unexpected json content for table test.kafka. messages.append(json.dumps({'i': 'n_' + random_string(4), 's': random_string(8)})) kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream', messages) instance.wait_for_log_line("Committed offset 128") assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('64') assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64') instance.query(''' DROP TABLE test.kafka; DROP TABLE test.kafka_data; DROP TABLE test.kafka_errors; ''') def gen_normal_json(): return '{"i":1000, "s":"ABC123abc"}' def gen_malformed_json(): return '{"i":"n1000", "s":"1000"}' def gen_message_with_jsons(jsons = 10, malformed = 0): s = io.StringIO() # we don't care on which position error will be added # (we skip whole broken message), but we need to be # sure that at least one error will be added, # otherwise test will fail. error_pos = random.randint(0,jsons-1) for i in range (jsons): if malformed and i == error_pos: s.write(gen_malformed_json()) else: s.write(gen_normal_json()) s.write(' ') return s.getvalue() def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_cluster): instance.query(''' DROP TABLE IF EXISTS test.kafka; DROP TABLE IF EXISTS test.kafka_data; DROP TABLE IF EXISTS test.kafka_errors; CREATE TABLE test.kafka (i Int64, s String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'kafka_engine_put_errors_to_stream_with_random_malformed_json', kafka_group_name = 'kafka_engine_put_errors_to_stream_with_random_malformed_json', kafka_format = 'JSONEachRow', kafka_max_block_size = 100, kafka_poll_max_batch_size = 1, kafka_handle_error_mode = 'stream'; CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String) ENGINE = MergeTree ORDER BY i AS SELECT i, s FROM test.kafka WHERE length(_error) == 0; CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String) ENGINE = MergeTree ORDER BY (topic, offset) AS SELECT _topic AS topic, _partition AS partition, _offset AS offset, _raw_message AS raw, _error AS error FROM test.kafka WHERE length(_error) > 0; ''') messages = [] for i in range(128): if i % 2 == 0: messages.append(gen_message_with_jsons(10, 1)) else: messages.append(gen_message_with_jsons(10, 0)) kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream_with_random_malformed_json', messages) instance.wait_for_log_line("Committed offset 128") # 64 good messages, each containing 10 rows assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('640') # 64 bad messages, each containing some broken row assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64') instance.query(''' DROP TABLE test.kafka; DROP TABLE test.kafka_data; DROP TABLE test.kafka_errors; ''') def test_kafka_formats_with_broken_message(kafka_cluster): # data was dumped from clickhouse itself in a following manner # clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g' admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) all_formats = { ## Text formats ## # dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;' 'JSONEachRow': { 'data_sample': [ '{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', '{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', '{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', # broken message '{"id":"0","blockNo":"BAD","val1":"AM","val2":0.5,"val3":1}', ], 'expected':'''{"raw_message":"{\\"id\\":\\"0\\",\\"blockNo\\":\\"BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}': (while reading the value of key blockNo)"}''', 'supports_empty_value': True, 'printable': True, }, # JSONAsString doesn't fit to that test, and tested separately 'JSONCompactEachRow': { 'data_sample': [ '["0", 0, "AM", 0.5, 1]\n', '["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n', '["0", 0, "AM", 0.5, 1]\n', # broken message '["0", "BAD", "AM", 0.5, 1]', ], 'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse input: expected '\\"' before: 'BAD\\", \\"AM\\", 0.5, 1]': (while reading the value of key blockNo)"}''', 'supports_empty_value': True, 'printable':True, }, 'JSONCompactEachRowWithNamesAndTypes': { 'data_sample': [ '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n', '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n', '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n', # broken message '["0", "BAD", "AM", 0.5, 1]', ], 'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse JSON string: expected opening quote"}''', 'printable':True, }, 'TSKV': { 'data_sample': [ 'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', 'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', 'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', # broken message 'id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n', ], 'expected':'{"raw_message":"id=0\\tblockNo=BAD\\tval1=AM\\tval2=0.5\\tval3=1\\n","error":"Found garbage after field in TSKV format: blockNo: (at row 1)\\n"}', 'printable':True, }, 'CSV': { 'data_sample': [ '0,0,"AM",0.5,1\n', '1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n', '0,0,"AM",0.5,1\n', # broken message '0,"BAD","AM",0.5,1\n', ], 'expected':'''{"raw_message":"0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''', 'printable':True, 'supports_empty_value': True, }, 'TSV': { 'data_sample': [ '0\t0\tAM\t0.5\t1\n', '1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', '0\t0\tAM\t0.5\t1\n', # broken message '0\tBAD\tAM\t0.5\t1\n', ], 'expected':'''{"raw_message":"0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''', 'supports_empty_value': True, 'printable':True, }, 'CSVWithNames': { 'data_sample': [ '"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n', '"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n', '"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n', # broken message '"id","blockNo","val1","val2","val3"\n0,"BAD","AM",0.5,1\n', ], 'expected':'''{"raw_message":"\\"id\\",\\"blockNo\\",\\"val1\\",\\"val2\\",\\"val3\\"\\n0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''', 'printable':True, }, 'Values': { 'data_sample': [ "(0,0,'AM',0.5,1)", "(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)", "(0,0,'AM',0.5,1)", # broken message "(0,'BAD','AM',0.5,1)", ], 'expected':r'''{"raw_message":"(0,'BAD','AM',0.5,1)","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception"}''', 'supports_empty_value': True, 'printable':True, }, 'TSVWithNames': { 'data_sample': [ 'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n', 'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', 'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n', # broken message 'id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n', ], 'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''', 'supports_empty_value': True, 'printable':True, }, 'TSVWithNamesAndTypes': { 'data_sample': [ 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n', 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n', # broken message 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n', ], 'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\nInt64\\tUInt16\\tString\\tFloat32\\tUInt8\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''', 'printable':True, }, 'Native': { 'data_sample': [ b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01', b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01', b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01', # broken message b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x53\x74\x72\x69\x6e\x67\x03\x42\x41\x44\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01', ], 'expected':'''{"raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801","error":"Cannot convert: String to UInt16"}''', 'printable':False, }, 'RowBinary': { 'data_sample': [ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', # broken message b'\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01', ], 'expected':'{"raw_message":"00000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}', 'printable':False, }, 'RowBinaryWithNamesAndTypes': { 'data_sample': [ b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', # broken message b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x53\x74\x72\x69\x6e\x67\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01', ], 'expected':'{"raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}', 'printable':False, }, 'ORC': { 'data_sample': [ b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', # broken message b'\x4f\x52\x43\x0a\x0b\x0a\x03\x00\x00\x00\x12\x04\x08\x01\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x29\x0a\x04\x00\x00\x00\x00\x12\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\xff\x80\xff\x80\xff\x00\xff\x80\xff\x03\x42\x41\x44\xff\x80\xff\x02\x41\x4d\xff\x80\x00\x00\x00\x3f\xff\x80\xff\x01\x0a\x06\x08\x06\x10\x00\x18\x0d\x0a\x06\x08\x06\x10\x01\x18\x17\x0a\x06\x08\x06\x10\x02\x18\x14\x0a\x06\x08\x06\x10\x03\x18\x14\x0a\x06\x08\x06\x10\x04\x18\x2b\x0a\x06\x08\x06\x10\x05\x18\x17\x0a\x06\x08\x00\x10\x00\x18\x02\x0a\x06\x08\x00\x10\x01\x18\x02\x0a\x06\x08\x01\x10\x01\x18\x02\x0a\x06\x08\x00\x10\x02\x18\x02\x0a\x06\x08\x02\x10\x02\x18\x02\x0a\x06\x08\x01\x10\x02\x18\x03\x0a\x06\x08\x00\x10\x03\x18\x02\x0a\x06\x08\x02\x10\x03\x18\x02\x0a\x06\x08\x01\x10\x03\x18\x02\x0a\x06\x08\x00\x10\x04\x18\x02\x0a\x06\x08\x01\x10\x04\x18\x04\x0a\x06\x08\x00\x10\x05\x18\x02\x0a\x06\x08\x01\x10\x05\x18\x02\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x1a\x03\x47\x4d\x54\x0a\x59\x0a\x04\x08\x01\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x08\x03\x10\xec\x02\x1a\x0c\x08\x03\x10\x8e\x01\x18\x1d\x20\xc1\x01\x28\x01\x22\x2e\x08\x0c\x12\x05\x01\x02\x03\x04\x05\x1a\x02\x69\x64\x1a\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x1a\x04\x76\x61\x6c\x31\x1a\x04\x76\x61\x6c\x32\x1a\x04\x76\x61\x6c\x33\x20\x00\x28\x00\x30\x00\x22\x08\x08\x04\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x05\x20\x00\x28\x00\x30\x00\x22\x08\x08\x01\x20\x00\x28\x00\x30\x00\x30\x01\x3a\x04\x08\x01\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x3a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x40\x90\x4e\x48\x01\x08\xd5\x01\x10\x00\x18\x80\x80\x04\x22\x02\x00\x0b\x28\x5b\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', ], 'expected':r'''{"raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception."}''', 'printable':False, } } topic_name_prefix = 'format_tests_4_stream_' for format_name, format_opts in list(all_formats.items()): logging.debug(f'Set up {format_name}') topic_name = f"{topic_name_prefix}{format_name}" data_sample = format_opts['data_sample'] data_prefix = [] raw_message = '_raw_message' # prepend empty value when supported if format_opts.get('supports_empty_value', False): data_prefix = data_prefix + [''] if format_opts.get('printable', False) == False: raw_message = 'hex(_raw_message)' kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample) instance.query(''' DROP TABLE IF EXISTS test.kafka_{format_name}; CREATE TABLE test.kafka_{format_name} ( id Int64, blockNo UInt16, val1 String, val2 Float32, val3 UInt8 ) ENGINE = Kafka() SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = '{topic_name}', kafka_group_name = '{topic_name}', kafka_format = '{format_name}', kafka_handle_error_mode = 'stream', kafka_flush_interval_ms = 1000 {extra_settings}; DROP TABLE IF EXISTS test.kafka_data_{format_name}_mv; CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv Engine=Log AS SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name} WHERE length(_error) = 0; DROP TABLE IF EXISTS test.kafka_errors_{format_name}_mv; CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv Engine=Log AS SELECT {raw_message} as raw_message, _error as error, _topic as topic, _partition as partition, _offset as offset FROM test.kafka_{format_name} WHERE length(_error) > 0; '''.format(topic_name=topic_name, format_name=format_name, raw_message=raw_message, extra_settings=format_opts.get('extra_settings') or '')) for format_name, format_opts in list(all_formats.items()): logging.debug('Checking {format_name}') topic_name = f"{topic_name_prefix}{format_name}" # shift offsets by 1 if format supports empty value offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2] result = instance.query('SELECT * FROM test.kafka_data_{format_name}_mv;'.format(format_name=format_name)) expected = '''\ 0 0 AM 0.5 1 {topic_name} 0 {offset_0} 1 0 AM 0.5 1 {topic_name} 0 {offset_1} 2 0 AM 0.5 1 {topic_name} 0 {offset_1} 3 0 AM 0.5 1 {topic_name} 0 {offset_1} 4 0 AM 0.5 1 {topic_name} 0 {offset_1} 5 0 AM 0.5 1 {topic_name} 0 {offset_1} 6 0 AM 0.5 1 {topic_name} 0 {offset_1} 7 0 AM 0.5 1 {topic_name} 0 {offset_1} 8 0 AM 0.5 1 {topic_name} 0 {offset_1} 9 0 AM 0.5 1 {topic_name} 0 {offset_1} 10 0 AM 0.5 1 {topic_name} 0 {offset_1} 11 0 AM 0.5 1 {topic_name} 0 {offset_1} 12 0 AM 0.5 1 {topic_name} 0 {offset_1} 13 0 AM 0.5 1 {topic_name} 0 {offset_1} 14 0 AM 0.5 1 {topic_name} 0 {offset_1} 15 0 AM 0.5 1 {topic_name} 0 {offset_1} 0 0 AM 0.5 1 {topic_name} 0 {offset_2} '''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2]) # print(('Checking result\n {result} \n expected \n {expected}\n'.format(result=str(result), expected=str(expected)))) assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name) errors_result = ast.literal_eval(instance.query('SELECT raw_message, error FROM test.kafka_errors_{format_name}_mv format JSONEachRow'.format(format_name=format_name))) errors_expected = ast.literal_eval(format_opts['expected']) # print(errors_result.strip()) # print(errors_expected.strip()) assert errors_result['raw_message'] == errors_expected['raw_message'], 'Proper raw_message for format: {}'.format(format_name) # Errors text can change, just checking prefixes assert errors_expected['error'] in errors_result['error'], 'Proper error for format: {}'.format(format_name) kafka_delete_topic(admin_client, topic_name) def wait_for_new_data(table_name, prev_count = 0, max_retries = 120): retries = 0 while True: new_count = int(instance.query("SELECT count() FROM {}".format(table_name))) print(new_count) if new_count > prev_count: return new_count else: retries += 1 time.sleep(0.5) if retries > max_retries: raise Exception("No new data :(") def test_kafka_consumer_failover(kafka_cluster): # for backporting: # admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092") admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) topic_name = "kafka_consumer_failover" kafka_create_topic(admin_client, topic_name, num_partitions=2) instance.query(''' DROP TABLE IF EXISTS test.kafka; DROP TABLE IF EXISTS test.kafka2; CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'kafka_consumer_failover', kafka_group_name = 'kafka_consumer_failover_group', kafka_format = 'JSONEachRow', kafka_max_block_size = 1, kafka_poll_timeout_ms = 200; CREATE TABLE test.kafka2 (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'kafka_consumer_failover', kafka_group_name = 'kafka_consumer_failover_group', kafka_format = 'JSONEachRow', kafka_max_block_size = 1, kafka_poll_timeout_ms = 200; CREATE TABLE test.kafka3 (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'kafka_consumer_failover', kafka_group_name = 'kafka_consumer_failover_group', kafka_format = 'JSONEachRow', kafka_max_block_size = 1, kafka_poll_timeout_ms = 200; CREATE TABLE test.destination ( key UInt64, value UInt64, _consumed_by LowCardinality(String) ) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.kafka_mv TO test.destination AS SELECT key, value, 'kafka' as _consumed_by FROM test.kafka; CREATE MATERIALIZED VIEW test.kafka2_mv TO test.destination AS SELECT key, value, 'kafka2' as _consumed_by FROM test.kafka2; CREATE MATERIALIZED VIEW test.kafka3_mv TO test.destination AS SELECT key, value, 'kafka3' as _consumed_by FROM test.kafka3; ''') producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer) ## all 3 attached, 2 working producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=0) producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=1) producer.flush() prev_count = wait_for_new_data('test.destination') ## 2 attached, 2 working instance.query('DETACH TABLE test.kafka') producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=0) producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=1) producer.flush() prev_count = wait_for_new_data('test.destination', prev_count) ## 1 attached, 1 working instance.query('DETACH TABLE test.kafka2') producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=0) producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=1) producer.flush() prev_count = wait_for_new_data('test.destination', prev_count) ## 2 attached, 2 working instance.query('ATTACH TABLE test.kafka') producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=0) producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=1) producer.flush() prev_count = wait_for_new_data('test.destination', prev_count) ## 1 attached, 1 working instance.query('DETACH TABLE test.kafka3') producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=0) producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=1) producer.flush() prev_count = wait_for_new_data('test.destination', prev_count) ## 2 attached, 2 working instance.query('ATTACH TABLE test.kafka2') producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=0) producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=1) producer.flush() prev_count = wait_for_new_data('test.destination', prev_count) ## 3 attached, 2 working instance.query('ATTACH TABLE test.kafka3') producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=0) producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=1) producer.flush() prev_count = wait_for_new_data('test.destination', prev_count) ## 2 attached, same 2 working instance.query('DETACH TABLE test.kafka3') producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=0) producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=1) producer.flush() prev_count = wait_for_new_data('test.destination', prev_count) kafka_delete_topic(admin_client, topic_name) if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown()
rnode.py
import re import os import queue import shlex import string import shutil import logging from logging import (Logger) import threading from threading import Event import contextlib from multiprocessing import Queue, Process from collections import defaultdict from typing import ( Dict, List, Tuple, Optional, Generator, AbstractSet, Set ) from rchain.crypto import PrivateKey from rchain.certificate import get_node_id_raw from rchain.vault import DEFAULT_PHLO_LIMIT, DEFAULT_PHLO_PRICE from cryptography.hazmat.primitives.serialization import load_pem_private_key from cryptography.hazmat.backends import default_backend from docker.client import DockerClient from docker.models.containers import Container from docker.models.containers import ExecResult from .common import ( make_tempdir, make_tempfile, TestingContext, NonZeroExitCodeError, GetBlockError, ParsingError, SynchronyConstraintError ) from .wait import ( wait_for_node_started, wait_for_approved_block_received_handler_state, ) DEFAULT_IMAGE = os.environ.get("DEFAULT_IMAGE", "rchain-integration-tests:latest") _PB_REPEATED_STR_SEP = "#$" rnode_binary = '/opt/docker/bin/rnode' rnode_directory = "/var/lib/rnode" rnode_deploy_dir = "{}/deploy".format(rnode_directory) rnode_bonds_file = '{}/genesis/bonds.txt'.format(rnode_directory) rnode_wallets_file = '{}/genesis/wallets.txt'.format(rnode_directory) rnode_certificate_path = '{}/node.certificate.pem'.format(rnode_directory) rnode_key_path = '{}/node.key.pem'.format(rnode_directory) class RNodeAddressNotFoundError(Exception): def __init__(self, regex: str) -> None: super().__init__() self.regex = regex class CommandTimeoutError(Exception): def __init__(self, command: Tuple[str, ...], timeout: int) -> None: super().__init__() self.command = command self.timeout = timeout class UnexpectedShowBlocksOutputFormatError(Exception): def __init__(self, output: str) -> None: super().__init__() self.output = output class UnexpectedProposeOutputFormatError(Exception): def __init__(self, output: str) -> None: super().__init__() self.output = output class UnexpectedDeployOutputFormatError(Exception): def __init__(self, output: str) -> None: super().__init__() self.output = output def extract_block_count_from_show_blocks(show_blocks_output: str) -> int: lines = show_blocks_output.splitlines() prefix = 'count: ' interesting_lines = [l for l in lines if l.startswith(prefix)] if len(interesting_lines) != 1: raise UnexpectedShowBlocksOutputFormatError(show_blocks_output) line = interesting_lines[0] count = line[len(prefix):] try: result = int(count) except ValueError: raise UnexpectedShowBlocksOutputFormatError(show_blocks_output) return result def parse_show_blocks_key_value_line(line: str) -> Tuple[str, str]: match = re.match(r'(?P<key>[^:]*): "?(?P<value>.*(?<!"))', line.strip()) if match is None: raise UnexpectedShowBlocksOutputFormatError(line) return (match.group('key'), match.group('value')) def parse_show_blocks_output(show_blocks_output: str) -> List[Dict[str, str]]: result = [] lines = show_blocks_output.splitlines() i = 0 while True: if i >= len(lines): break if lines[i].startswith('------------- block '): block = {} j = i + 1 while True: if j >= len(lines): break if lines[j].strip() == "": break key, value = parse_show_blocks_key_value_line(lines[j]) block[key] = value j += 1 result.append(block) i = j else: i += 1 return result def parse_show_block_output(show_block_output: str) -> Dict[str, str]: result: Dict[str, str] = {} lines = show_block_output.splitlines() bonds_validator = [] deploy_cost = [] for line in lines: if line.startswith('status:') or line.startswith('blockInfo {') or line.startswith('}'): continue if line.strip() == '': continue key, value = parse_show_blocks_key_value_line(line) if key == "bondsValidatorList": validator_hash = value.strip('"') bonds_validator.append(validator_hash) elif key == "deployCost": deploy_cost.append(value.strip('"')) else: result[key] = value result['bondsValidatorList'] = _PB_REPEATED_STR_SEP.join(bonds_validator) result['deployCost'] = _PB_REPEATED_STR_SEP.join(deploy_cost) return result def extract_validator_stake_from_bonds_validator_str(out_put: str) -> Dict[str, float]: validator_stake_dict = {} validator_stake_list = out_put.split(_PB_REPEATED_STR_SEP) for validator_stake in validator_stake_list: validator, stake = validator_stake.split(': ') stake_f = float(stake) validator_stake_dict[validator] = stake_f return validator_stake_dict def extract_block_hash_from_propose_output(propose_output: str) -> str: """We're getting back something along the lines of: Response: Success! Block a91208047c... created and added.\n """ match = re.match(r'Response: Success! Block ([0-9a-f]+) created and added.', propose_output.strip()) if match is None: raise UnexpectedProposeOutputFormatError(propose_output) return match.group(1) def extract_validator_stake_from_deploy_cost_str(output: str) -> Dict[str, float]: deploy_cost_dict: Dict[str, float] = defaultdict(lambda: 0) deploy_cost_list = output.split(_PB_REPEATED_STR_SEP) for deploy_cost_str in deploy_cost_list: match = re.match(r'User: (?P<user>[a-zA-Z0-9]*), Cost: (?P<cost>[0-9]*) DeployData \#(?P<timestamp>[0-9]*) -- .', deploy_cost_str) if match: deploy_cost_dict[match.group('user')] = int(match.group('cost')) return deploy_cost_dict class Node: def __init__(self, *, container: Container, deploy_dir: str, command_timeout: int, network: str) -> None: self.container = container self.local_deploy_dir = deploy_dir self.remote_deploy_dir = rnode_deploy_dir self.name = container.name self.command_timeout = command_timeout self.network = network self.terminate_background_logging_event = threading.Event() self.background_logging = LoggingThread( container=container, logger=logging.getLogger('peers'), terminate_thread_event=self.terminate_background_logging_event, ) self.background_logging.setDaemon(True) self.background_logging.start() def __repr__(self) -> str: return '<Node(name={})>'.format(repr(self.name)) def get_node_pem_cert(self) -> bytes: return self.shell_out("cat", rnode_certificate_path).encode('utf8') def get_node_pem_key(self) -> bytes: return self.shell_out("cat", rnode_key_path).encode('utf8') def get_node_id_raw(self) -> bytes: key = load_pem_private_key(self.get_node_pem_key(), None, default_backend()) return get_node_id_raw(key) def logs(self) -> str: return self.container.logs().decode('utf-8') def get_rnode_address(self) -> str: log_content = self.logs() regex = "Listening for traffic on (rnode://.+@{name}\\?protocol=\\d+&discovery=\\d+)\\.$".format(name=self.container.name) match = re.search(regex, log_content, re.MULTILINE | re.DOTALL) if match is None: raise RNodeAddressNotFoundError(regex) address = match.group(1) return address def get_metrics(self) -> str: return self.shell_out('curl', '-s', 'http://localhost:40403/metrics') def get_connected_peers_metric_value(self) -> str: try: return self.shell_out('sh', '-c', 'curl -s http://localhost:40403/metrics | grep ^rchain_comm_rp_connect_peers\\ ') except NonZeroExitCodeError as e: if e.exit_code == 1: return '' raise def cleanup(self) -> None: self.container.remove(force=True, v=True) self.terminate_background_logging_event.set() self.background_logging.join() def show_blocks_with_depth(self, depth: int) -> str: return self.rnode_command('show-blocks', '--depth', str(depth)) def show_block(self, hash: str) -> str: return self.rnode_command('show-block', hash) def get_blocks_count(self, depth: int) -> int: show_blocks_output = self.show_blocks_with_depth(depth) return extract_block_count_from_show_blocks(show_blocks_output) def show_blocks_parsed(self, depth: int) -> List[Dict[str, str]]: show_blocks_output = self.show_blocks_with_depth(depth) return parse_show_blocks_output(show_blocks_output) def show_block_parsed(self, hash: str) -> Dict[str, str]: show_block_output = self.show_block(hash) block_info = parse_show_block_output(show_block_output) return block_info def get_block(self, block_hash: str) -> str: try: return self.rnode_command('show-block', block_hash, stderr=False) except NonZeroExitCodeError as e: raise GetBlockError(command=e.command, exit_code=e.exit_code, output=e.output) # Too low level -- do not use directly. Prefer shell_out() instead. def _exec_run_with_timeout(self, cmd: Tuple[str, ...], stderr: bool = True) -> Tuple[int, str]: control_queue: queue.Queue = Queue(1) def command_process() -> None: exec_result: ExecResult = self.container.exec_run(cmd, stderr=stderr) control_queue.put((exec_result.exit_code, exec_result.output.decode('utf-8'))) process = Process(target=command_process) logging.info("COMMAND {} {}".format(self.name, cmd)) process.start() try: exit_code, output = control_queue.get(True, self.command_timeout) except queue.Empty: raise CommandTimeoutError(cmd, self.command_timeout) finally: process.terminate() if exit_code != 0: for line in output.splitlines(): logging.info('{}: {}'.format(self.name, line)) logging.warning("EXITED {} {} {}".format(self.name, cmd, exit_code)) else: for line in output.splitlines(): logging.debug('{}: {}'.format(self.name, line)) logging.debug("EXITED {} {} {}".format(self.name, cmd, exit_code)) return exit_code, output def shell_out(self, *cmd: str, stderr: bool = True) -> str: exit_code, output = self._exec_run_with_timeout(cmd, stderr=stderr) if exit_code != 0: raise NonZeroExitCodeError(command=cmd, exit_code=exit_code, output=output) return output def rnode_command(self, *node_args: str, stderr: bool = True) -> str: return self.shell_out(rnode_binary, *node_args, stderr=stderr) def eval(self, rho_file_path: str) -> str: return self.rnode_command('eval', rho_file_path) def deploy(self, rho_file_path: str, private_key: PrivateKey, phlo_limit:int = DEFAULT_PHLO_LIMIT, phlo_price: int = DEFAULT_PHLO_PRICE) -> str: try: output = self.rnode_command('deploy', '--private-key={}'.format(private_key.to_hex()), '--phlo-limit={}'.format(phlo_limit), '--phlo-price={}'.format(phlo_price), rho_file_path, stderr=False) deploy_id = extract_deploy_id_from_deploy_output(output) return deploy_id except NonZeroExitCodeError as e: if "Parsing error" in e.output: raise ParsingError(command=e.command, exit_code=e.exit_code, output=e.output) # TODO out of phlogiston error raise e def get_vdag(self) -> str: return self.rnode_command('vdag') def get_mvdag(self) -> str: return self.rnode_command('mvdag', stderr=False) def get_parsed_mvdag(self) -> Dict[str, Set[str]]: return parse_mvdag_str(self.get_mvdag()) def deploy_string(self, rholang_code: str, private_key: str, phlo_limit:int = DEFAULT_PHLO_LIMIT, phlo_price: int = DEFAULT_PHLO_PRICE) -> str: quoted_rholang = shlex.quote(rholang_code) deploy_out = self.shell_out('sh', '-c', 'echo {quoted_rholang} >/tmp/deploy_string.rho && {rnode_binary} deploy --private-key={private_key} --phlo-limit={phlo_limit} --phlo-price={phlo_price} /tmp/deploy_string.rho'.format( rnode_binary=rnode_binary, quoted_rholang=quoted_rholang, private_key=private_key, phlo_limit=phlo_limit, phlo_price=phlo_price ), stderr=False) return extract_deploy_id_from_deploy_output(deploy_out) def find_deploy(self, deploy_id: str) -> Dict[str, str]: output = self.rnode_command("find-deploy", "--deploy-id", deploy_id, stderr=False) block_info = parse_show_block_output(output) return block_info def propose(self) -> str: try: output = self.rnode_command('propose', stderr=False) block_hash = extract_block_hash_from_propose_output(output) return block_hash except NonZeroExitCodeError as e: if "Must wait for more blocks from other validators" in e.output: raise SynchronyConstraintError(command=e.command, exit_code=e.exit_code, output=e.output) raise e def last_finalized_block(self) -> Dict[str, str]: output = self.rnode_command('last-finalized-block') block_info = parse_show_block_output(output) return block_info def repl(self, rholang_code: str, stderr: bool = False) -> str: quoted_rholang_code = shlex.quote(rholang_code) output = self.shell_out( 'sh', '-c', 'echo {quoted_rholang_code} | {rnode_binary} repl'.format(quoted_rholang_code=quoted_rholang_code,rnode_binary=rnode_binary), stderr=stderr, ) return output def cat_forward_file(self, public_key: str) -> str: return self.shell_out('cat', '/opt/docker/forward_{}.rho'.format(public_key)) def cat_bond_file(self, public_key: str) -> str: return self.shell_out('cat', '/opt/docker/bond_{}.rho'.format(public_key)) __timestamp_rx = "\\d\\d:\\d\\d:\\d\\d\\.\\d\\d\\d" __log_message_rx = re.compile("^{timestamp_rx} (.*?)(?={timestamp_rx})".format(timestamp_rx=__timestamp_rx), re.MULTILINE | re.DOTALL) def log_lines(self) -> List[str]: log_content = self.logs() return Node.__log_message_rx.split(log_content) def deploy_contract_with_substitution(self, substitute_dict: Dict[str, str], rho_file_path: str, private_key: PrivateKey, phlo_limit:int = DEFAULT_PHLO_LIMIT, phlo_price: int = DEFAULT_PHLO_PRICE) -> str: """ Supposed that you have a contract with content like below. new x in { x!("#DATA") } If you pass a dict {'#DATA': "123456"} as substitute_dict args in this func, this method would substitute the string #DATA in the contract with 123456, which turns out to be new x in { x!("123456") } And then deploy the contract in the node """ shutil.copyfile(rho_file_path, os.path.join(self.local_deploy_dir, os.path.basename(rho_file_path))) container_contract_file_path = os.path.join(self.remote_deploy_dir, os.path.basename(rho_file_path)) substitute_rules = ';'.join([r's/{}/{}/g'.format(key.replace(r'/', r'\/'), value.replace(r'/', r'\/')) for key, value in substitute_dict.items()]) self.shell_out( 'sed', '-i', '-e', substitute_rules, container_contract_file_path, ) self.deploy(container_contract_file_path, private_key, phlo_limit, phlo_price) block_hash = self.propose() return block_hash class LoggingThread(threading.Thread): def __init__(self, terminate_thread_event: Event, container: Container, logger: Logger) -> None: super().__init__() self.terminate_thread_event = terminate_thread_event self.container = container self.logger = logger def run(self) -> None: containers_log_lines_generator = self.container.logs(stream=True, follow=True) try: while True: if self.terminate_thread_event.is_set(): break line = next(containers_log_lines_generator) self.logger.info('{}: {}'.format(self.container.name, line.decode('utf-8').rstrip())) except StopIteration: pass class DeployThread(threading.Thread): def __init__(self, name: str, node: Node, contract: str, count: int, private_key: PrivateKey) -> None: threading.Thread.__init__(self) self.name = name self.node = node self.contract = contract self.count = count self.private_key = private_key def run(self) -> None: for _ in range(self.count): self.node.deploy(self.contract, self.private_key) self.node.propose() def make_container_command(container_command: str, container_command_flags: AbstractSet, container_command_options: Dict) -> str: opts = ['{} {}'.format(option, argument) for option, argument in container_command_options.items()] flags = ' '.join(container_command_flags) result = '{} {} {}'.format(container_command, flags, ' '.join(opts)) return result def make_node( *, docker_client: DockerClient, name: str, network: str, bonds_file: str, container_command: str, container_command_flags: AbstractSet, container_command_options: Dict, command_timeout: int, extra_volumes: Optional[List[str]], allowed_peers: Optional[List[str]], image: str = DEFAULT_IMAGE, mem_limit: Optional[str] = None, wallets_file: Optional[str] = None, ) -> Node: assert isinstance(name, str) assert '_' not in name, 'Underscore is not allowed in host name' deploy_dir = make_tempdir("rchain-integration-test") hosts_allow_file_content = \ "ALL:ALL" if allowed_peers is None else "\n".join("ALL: {}".format(peer) for peer in allowed_peers) hosts_allow_file = make_tempfile("hosts-allow-{}".format(name), hosts_allow_file_content) hosts_deny_file = make_tempfile("hosts-deny-{}".format(name), "ALL: ALL") command = make_container_command(container_command, container_command_flags, container_command_options) env = {} java_options = os.environ.get('_JAVA_OPTIONS') if java_options is not None: env['_JAVA_OPTIONS'] = java_options logging.debug('Using _JAVA_OPTIONS: {}'.format(java_options)) volumes = [ "{}:/etc/hosts.allow".format(hosts_allow_file), "{}:/etc/hosts.deny".format(hosts_deny_file), "{}:{}".format(bonds_file, rnode_bonds_file), "{}:{}".format(deploy_dir, rnode_deploy_dir), ] if wallets_file is not None: volumes.append('{}:{}'.format(wallets_file, rnode_wallets_file)) if extra_volumes: all_volumes = volumes + extra_volumes else: all_volumes = volumes logging.info('STARTING %s %s', name, command) container = docker_client.containers.run( image, name=name, user='root', detach=True, mem_limit=mem_limit, network=network, volumes=all_volumes, command=command, hostname=name, environment=env, ) node = Node( container=container, deploy_dir=deploy_dir, command_timeout=command_timeout, network=network, ) return node def get_absolute_path_for_mounting(relative_path: str, mount_dir: Optional[str]=None)-> str: """Drone runs each job in a new Docker container FOO. That Docker container has a new filesystem. Anything in that container can read anything in that filesystem. To read files from HOST, it has to be shared though, so let's share /tmp:/tmp. You also want to start new Docker containers, so you share /var/run/docker.sock:/var/run/docker.sock. When you start a new Docker container from FOO, it's not in any way nested. You just contact the Docker daemon running on HOST via the shared docker.sock. So when you start a new image from FOO, the HOST creates a new Docker container BAR with brand new filesystem. So if you tell Docker from FOO to mount /MOUNT_DIR:/MOUNT_DIR from FOO to BAR, the Docker daemon will actually mount /MOUNT_DIR from HOST to BAR, and not from FOO to BAR. """ if mount_dir is not None: return os.path.join(mount_dir, relative_path) return os.path.abspath(os.path.join('resources', relative_path)) def make_bootstrap_node( *, docker_client: DockerClient, network: str, bonds_file: str, private_key: PrivateKey, command_timeout: int, allowed_peers: Optional[List[str]] = None, mem_limit: Optional[str] = None, cli_flags: Optional[AbstractSet] = None, cli_options: Optional[Dict] = None, wallets_file: Optional[str] = None, extra_volumes: Optional[List[str]] = None, synchrony_constraint_threshold: float = 0.0, max_peer_queue_size: int = 10, give_up_after_skipped: int = 0, drop_peer_after_retries: int = 0 ) -> Node: container_name = make_bootstrap_name(network) container_command_flags = set([ "--standalone", "--prometheus", "--no-upnp", "--allow-private-addresses" ]) container_command_options = { "--port": 40400, "--validator-private-key": private_key.to_hex(), "--validator-public-key": private_key.get_public_key().to_hex(), "--host": container_name, "--synchrony-constraint-threshold": synchrony_constraint_threshold, "--max-peer-queue-size": max_peer_queue_size, "--give-up-after-skipped": give_up_after_skipped, "--drop-peer-after-retries": drop_peer_after_retries } if cli_flags is not None: container_command_flags.update(cli_flags) if cli_options is not None: container_command_options.update(cli_options) container = make_node( docker_client=docker_client, name=container_name, network=network, bonds_file=bonds_file, container_command='run', container_command_flags=container_command_flags, container_command_options=container_command_options, command_timeout=command_timeout, extra_volumes=extra_volumes, allowed_peers=allowed_peers, mem_limit=mem_limit if mem_limit is not None else '4G', wallets_file=wallets_file, ) return container def make_container_name(network_name: str, name: str) -> str: return "{network_name}.{name}".format(network_name=network_name, name=name) def make_bootstrap_name(network_name: str) -> str: return make_container_name(network_name=network_name, name='bootstrap') def make_peer_name(network_name: str, name: str) -> str: if name.isdigit(): actual_name = 'peer{}'.format(name) else: actual_name = name return make_container_name(network_name=network_name, name=actual_name) def make_peer( *, docker_client: DockerClient, network: str, name: str, bonds_file: str, command_timeout: int, bootstrap: Node, private_key: PrivateKey, allowed_peers: Optional[List[str]] = None, mem_limit: Optional[str] = None, wallets_file: Optional[str] = None, cli_flags: Optional[AbstractSet] = None, cli_options: Optional[Dict] = None, extra_volumes: Optional[List[str]] = None, synchrony_constraint_threshold: float = 0.0, max_peer_queue_size: int = 10, give_up_after_skipped: int = 0, drop_peer_after_retries: int = 0 ) -> Node: assert isinstance(name, str) assert '_' not in name, 'Underscore is not allowed in host name' name = make_peer_name(network, name) bootstrap_address = bootstrap.get_rnode_address() container_command_flags = set([ "--prometheus", "--no-upnp", "--allow-private-addresses" ]) if cli_flags is not None: container_command_flags.update(cli_flags) container_command_options = { "--bootstrap": bootstrap_address, "--validator-private-key": private_key.to_hex(), "--validator-public-key": private_key.get_public_key().to_hex(), "--host": name, "--synchrony-constraint-threshold": synchrony_constraint_threshold, "--max-peer-queue-size": max_peer_queue_size, "--give-up-after-skipped": give_up_after_skipped, "--drop-peer-after-retries": drop_peer_after_retries } if cli_options is not None: container_command_options.update(cli_options) container = make_node( docker_client=docker_client, name=name, network=network, bonds_file=bonds_file, container_command='run', container_command_flags=container_command_flags, container_command_options=container_command_options, command_timeout=command_timeout, extra_volumes=extra_volumes, allowed_peers=allowed_peers, mem_limit=mem_limit if not None else '4G', wallets_file=wallets_file, ) return container @contextlib.contextmanager def started_peer( *, context: TestingContext, network: str, name: str, bootstrap: Node, private_key: PrivateKey, cli_flags: Optional[AbstractSet] = None, cli_options: Optional[Dict] = None, extra_volumes: Optional[List[str]] = None, synchrony_constraint_threshold: float = 0.0 ) -> Generator[Node, None, None]: peer = make_peer( docker_client=context.docker, network=network, name=name, bonds_file=context.bonds_file, bootstrap=bootstrap, private_key=private_key, command_timeout=context.command_timeout, wallets_file=context.wallets_file, cli_flags=cli_flags, cli_options=cli_options, extra_volumes=extra_volumes, synchrony_constraint_threshold=synchrony_constraint_threshold ) try: wait_for_node_started(context, peer) yield peer finally: peer.cleanup() @contextlib.contextmanager def bootstrap_connected_peer( *, context: TestingContext, bootstrap: Node, name: str, private_key: PrivateKey, cli_options: Optional[Dict[str, str]] = None, synchrony_constraint_threshold: float = 0.0 ) -> Generator[Node, None, None]: with started_peer( context=context, network=bootstrap.network, name=name, bootstrap=bootstrap, private_key=private_key, cli_options=cli_options, synchrony_constraint_threshold=synchrony_constraint_threshold ) as peer: wait_for_approved_block_received_handler_state(context, peer) yield peer def create_peer_nodes( *, docker_client: DockerClient, bootstrap: Node, network: str, bonds_file: str, private_keys: List[PrivateKey], command_timeout: int, allowed_peers: Optional[List[str]] = None, mem_limit: Optional[str] = None, ) -> List[Node]: assert len(set(private_keys)) == len(private_keys), "There shouldn't be any duplicates in the key pairs" if allowed_peers is None: allowed_peers = [bootstrap.name] + [make_peer_name(network, str(i)) for i in range(0, len(private_keys))] result = [] try: for i, private_key in enumerate(private_keys): peer_node = make_peer( docker_client=docker_client, network=network, name=str(i), bonds_file=bonds_file, command_timeout=command_timeout, bootstrap=bootstrap, private_key=private_key, allowed_peers=allowed_peers, mem_limit=mem_limit if mem_limit is not None else '4G', ) result.append(peer_node) except: for node in result: node.cleanup() raise return result def make_random_network_name(context: TestingContext, length: int) -> str: return ''.join(context.random_generator.choice(string.ascii_lowercase) for m in range(length)) @contextlib.contextmanager def docker_network(context: TestingContext, docker_client: DockerClient) -> Generator[str, None, None]: network_name = "rchain-{}".format(make_random_network_name(context, 5)) docker_client.networks.create(network_name, driver="bridge") try: yield network_name finally: for network in docker_client.networks.list(): if network_name == network.name: network.remove() @contextlib.contextmanager def started_bootstrap( *, context: TestingContext, network: str, cli_flags: Optional[AbstractSet] = None, cli_options: Optional[Dict[str, str]] = None, extra_volumes: Optional[List[str]] = None, synchrony_constraint_threshold: float = 0.0 ) -> Generator[Node, None, None]: bootstrap_node = make_bootstrap_node( docker_client=context.docker, network=network, bonds_file=context.bonds_file, private_key=context.bootstrap_key, command_timeout=context.command_timeout, cli_flags=cli_flags, cli_options=cli_options, wallets_file=context.wallets_file, extra_volumes=extra_volumes, synchrony_constraint_threshold=synchrony_constraint_threshold ) try: wait_for_node_started(context, bootstrap_node) yield bootstrap_node finally: bootstrap_node.cleanup() @contextlib.contextmanager def docker_network_with_started_bootstrap( context: TestingContext, cli_flags: Optional[AbstractSet] = None, cli_options: Optional[Dict] = None, synchrony_constraint_threshold: float = 0.0 ) -> Generator[Node, None, None]: with docker_network(context, context.docker) as network: with started_bootstrap( context=context, network=network, cli_flags=cli_flags, cli_options=cli_options, synchrony_constraint_threshold=synchrony_constraint_threshold ) as bootstrap: wait_for_approved_block_received_handler_state(context, bootstrap) yield bootstrap @contextlib.contextmanager def ready_bootstrap( context: TestingContext, cli_flags: Optional[AbstractSet] = None, cli_options: Optional[Dict] = None, extra_volumes: Optional[List[str]] = None ) -> Generator[Node, None, None]: with docker_network(context, context.docker) as network: with started_bootstrap(context=context, network=network, cli_flags=cli_flags, cli_options=cli_options, extra_volumes=extra_volumes) as node: yield node def parse_mvdag_str(mvdag_output: str) -> Dict[str, Set[str]]: dag_dict: Dict[str, Set[str]] = defaultdict(set) lines = mvdag_output.splitlines() for line in lines: parent_hash, child_hash = line.split(' ') dag_dict[parent_hash].add(child_hash) return dag_dict def extract_deploy_id_from_deploy_output(deploy_output: str) -> str: match = re.match(r'Response: Success!\nDeployId is: ([0-9a-f]+)', deploy_output.strip()) if match is None: raise UnexpectedDeployOutputFormatError(deploy_output) return match.group(1)
dokku-installer.py
#!/usr/bin/env python2.7 import cgi import json import os import SimpleHTTPServer import SocketServer import subprocess import sys import threading VERSION = 'v0.5.4' hostname = '' try: command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'" hostname = subprocess.check_output(command, shell=True) if ':' in hostname: hostname = '' except subprocess.CalledProcessError: pass key_file = os.getenv('KEY_FILE', '/root/.ssh/authorized_keys') admin_keys = [] if os.path.isfile(key_file): try: command = "cat {0}".format(key_file) admin_keys = subprocess.check_output(command, shell=True).strip().split("\n") except subprocess.CalledProcessError: pass def check_boot(): if 'onboot' not in sys.argv: return init_dir = os.getenv('INIT_DIR', '/etc/init') systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system') nginx_dir = os.getenv('NGINX_DIR', '/etc/nginx/conf.d') if os.path.exists(init_dir): with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f: f.write("start on runlevel [2345]\n") f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__))) if os.path.exists(systemd_dir): with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f: f.write("[Unit]\n") f.write("Description=Dokku web-installer\n") f.write("\n") f.write("[Service]\n") f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__))) f.write("\n") f.write("[Install]\n") f.write("WantedBy=multi-user.target\n") f.write("WantedBy=graphical.target\n") if os.path.exists(nginx_dir): with open('{0}/dokku-installer.conf'.format(nginx_dir), 'w') as f: f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n") f.write("server {\n") f.write(" listen 80;\n") f.write(" location / {\n") f.write(" proxy_pass http://dokku-installer;\n") f.write(" }\n") f.write("}\n") subprocess.call('rm -f /etc/nginx/sites-enabled/*', shell=True) sys.exit(0) class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): content = PAGE.replace('{VERSION}', VERSION) content = content.replace('{HOSTNAME}', hostname) content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys)) self.send_response(200) self.end_headers() self.wfile.write(content) def do_POST(self): if self.path not in ['/setup', '/setup/']: return params = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']}) dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku') if 'vhost' in params and params['vhost'].value == 'true': with open('{0}/VHOST'.format(dokku_root), 'w') as f: f.write(params['hostname'].value) else: try: os.remove('{0}/VHOST'.format(dokku_root)) except OSError: pass with open('{0}/HOSTNAME'.format(dokku_root), 'w') as f: f.write(params['hostname'].value) command = ['sshcommand', 'acl-add', 'dokku', 'admin'] for key in params['keys'].value.split("\n"): proc = subprocess.Popen(command, stdin=subprocess.PIPE) proc.stdin.write(key) proc.stdin.close() proc.wait() if 'selfdestruct' in sys.argv: DeleteInstallerThread() self.send_response(200) self.end_headers() self.wfile.write(json.dumps({'status': 'ok'})) class DeleteInstallerThread(object): def __init__(self, interval=1): thread = threading.Thread(target=self.run, args=()) thread.daemon = True thread.start() def run(self): command = "rm /etc/nginx/conf.d/dokku-installer.conf && /etc/init.d/nginx stop && /etc/init.d/nginx start" try: subprocess.call(command, shell=True) except: pass command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && stop dokku-installer" try: subprocess.call(command, shell=True) except: pass def main(): check_boot() port = int(os.getenv('PORT', 2000)) httpd = SocketServer.TCPServer(("", port), GetHandler) print "Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port) httpd.serve_forever() PAGE = """ <html> <head> <title>Dokku Setup</title> <link rel="stylesheet" href="//netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.min.css" /> <script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script> </head> <body> <div class="container" style="width: 640px;"> <form id="form" role="form"> <h1>Dokku Setup <small>{VERSION}</small></h1> <div class="form-group"> <h3><small style="text-transform: uppercase;">Admin Access</small></h3> <label for="key">Public Key</label><br /> <textarea class="form-control" name="keys" rows="7" id="key">{ADMIN_KEYS}</textarea> </div> <div class="form-group"> <h3><small style="text-transform: uppercase;">Hostname Configuration</small></h3> <div class="form-group"> <label for="hostname">Hostname</label> <input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" /> </div> <div class="checkbox"> <label><input id="vhost" name="vhost" type="checkbox" value="true"> Use <abbr title="Nginx will be run on port 80 and backend to your apps based on hostname">virtualhost naming</abbr> for apps</label> </div> <p>Your app URLs will look like:</p> <pre id="example">http://hostname:port</pre> </div> <button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span style="padding-left: 20px;" id="result"></span> </form> </div> <div id="error-output"></div> <script> function setup() { if ($.trim($("#key").val()) == "") { alert("Your admin public key cannot be blank.") return } if ($.trim($("#hostname").val()) == "") { alert("Your hostname cannot be blank.") return } data = $("#form").serialize() $("input,textarea,button").prop("disabled", true); $.post('/setup', data) .done(function() { $("#result").html("Success!") window.location.href = "http://progrium.viewdocs.io/dokku/application-deployment"; }) .fail(function(data) { $("#result").html("Something went wrong...") $("#error-output").html(data.responseText) }); } function update() { if ($("#vhost").is(":checked") && $("#hostname").val().match(/^(\d{1,3}\.){3}\d{1,3}$/)) { alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.") $("#vhost").prop('checked', false); } if ($("#vhost").is(':checked')) { $("#example").html("http://&lt;app-name&gt;."+$("#hostname").val()) } else { $("#example").html("http://"+$("#hostname").val()+":&lt;app-port&gt;") } } $("#vhost").change(update); $("#hostname").change(update); update(); </script> </body> </html> """ if __name__ == "__main__": main()
rotaprint.py
#!/usr/bin/env python """ Copyright (c) 2020 This file is part of the rotaprint project. Main script for running rotaprint. Written by Callum Morrison <callum.morrison@mac.com> and Hélène Verhaeghe <hv236@bath.ac.uk>, 2020 Unauthorized copying of this file, via any medium is strictly prohibited Proprietary and confidential """ import http.server import socketserver import serial import time import re import asyncio import websockets import threading import concurrent.futures import logging import sqlite3 import os import io import sys import traceback import math import numpy as np from skimage.metrics import structural_similarity import cv2 from json import dumps, loads class rotaprint: # Setup threading pool pool = concurrent.futures.ThreadPoolExecutor(max_workers=10) # Initialise variables # Enables check mode to test gcode first check_mode = "" # Enable scanning functionality scan_mode = "" # Radius of part to print on / mm radius = "" # Length of part to print on / mm length = "" # Number of parts in batch / int batch = "" # Current batch part / int batch_current = "" # Current offset / float offset = "" # Fraction of total print remaining (where 0 is complete, 1 is not started) remaining = 1 # Quality control override qc_override = False status = { "time_elapsed": 0, "parts_complete": 0, "time_remaining": 0, "operation": "Idle", "grbl_operation": "Idle", "grbl_x": "", "grbl_y": "", "grbl_z": "", "grbl_lockout": 1, "print_progress": 0 } def setup_log(self): # Create normal logger log = logging.getLogger("rotaprint") log.setLevel(logging.DEBUG) # Number of characters already sent self.log_history = 0 # Create variable logger for GUI log_capture_string = io.StringIO() ch = logging.StreamHandler(log_capture_string) ch.setLevel(logging.DEBUG) formatter = logging.Formatter( '<*>%(asctime)s<~>%(levelname)s<~>%(message)s', '%H:%M:%S') ch.setFormatter(formatter) log.addHandler(ch) # Decrease output of external modules logging.getLogger("websockets").setLevel(logging.WARNING) logging.getLogger("werkzeug").setLevel(logging.WARNING) # if args.debug: logging.basicConfig(format='%(name)-12s: %(levelname)-8s %(message)s') log.info("Successfully setup logging") return log, log_capture_string def except_logger(self): exc_type, exc_value, tb = sys.exc_info() error = traceback.format_exception(exc_type, exc_value, tb) for line in error: # Replace newline to prevent issues when displaying at frontend log.error(line) def timer(self): # Initialise start time start_time = time.time() while self.active: # Do not update if on hold status if re.match("Hold", self.status["grbl_operation"]): start_time += 1 # Update time elapsed time_elapsed = time.time() - start_time time_format = str(round(time_elapsed / 60)) + \ "m " + str(round(time_elapsed % 60)) + "s" self.status["time_elapsed"] = time_format # Estimate time remaining try: time_remaining = self.remaining * \ (time_elapsed / (1 - self.remaining)) except ZeroDivisionError: time_remaining = 0 time_format = "~" + str(round(time_remaining / 60)) + \ "m " + str(round(time_remaining % 60)) + "s" self.status["time_remaining"] = time_format time.sleep(1) def print_sequence(self): log.info("Starting print sequence!") # Allow existing timers to end self.active = False time.sleep(2) # Indicate machine is active self.active = True # Submit timer task r.pool.submit(self.timer) # Modify gcode as required for colour change and dimensions log.info("Correcting GCODE dimensions") gc.correct() # Change check mode on grbl if required if self.check_mode != g.check: g.check_mode() # Start batch at part 0 self.batch_current = 0 # If check mode is enabled if self.check_mode: # Setup batch size of 1 self.batch = 1 # Send gcode once self.batch_new_part() else: if r.scan_mode: # Move first part under camera for initial alignment g.change_batch(self.batch_current, True) log.info( "Scan mode is enabled, performing initial alignment scan.") # Setup WCS for correct scan start position g.offset_y(self.offset) # Scan for reference images v.initial_alignment_scan() self.batch_new_part() time.sleep(5) def batch_new_part(self): # If there are more parts to do if self.batch_current < self.batch: log.info("Starting a new batch part") # Update parts complete status self.status["parts_complete"] = str( self.batch_current) + " of " + str(self.batch) # If not first part if self.batch_current > 0 and r.scan_mode: log.info("Scan mode is enabled, starting scan sequence") # If QC is required if not self.qc_override: log.info( "Scanning part for quality assurance...") # Go back to scanner g.change_batch(self.batch_current - 1, True) # Only one part has completed, no reference for comparison if self.batch_current == 1: v.initial_quality_scan() else: # Scan part for quality checking score = v.quality_scan() log.debug(f"Quality score :{score}") # If score received is lower than needed if score < db.settings["quality_score"]: r.status["grbl_operation"] == "Failed QC" log.error("Quality check failed!") return else: log.info("Quality check passed!") else: # Reset QC override and continue as normal log.info("Quality check overridden. Continuing to next part") self.qc_override = False # Go to scanner g.change_batch(self.batch_current, True) log.info("Scanning part for alignment...") # Scan part for alignment self.offset = v.alignment_scan() log.debug(f"Alignment offset: {self.offset}") # Setup WCS for correct print start position g.offset_y(self.offset) # Go to printer g.change_batch(self.batch_current) # Send gcode g.send(gc.gcode, batch=True) elif self.batch_current == self.batch: log.info("All parts complete!") # Update parts complete status self.status["parts_complete"] = str( self.batch_current) + " of " + str(self.batch) self.status["print_progress"] = 100 self.status["time_remaining"] = "0m 0s" # Check for final status update try: g.s.write('?\n'.encode()) g.read() except: r.except_logger() self.active = False self.status["grbl_operation"] = "Done" class gcode: gcode = "" def correct(self): for idx, line in enumerate(self.gcode): # Correct Y and Z commands self.correct_dims(idx, line) # Add correct colour change commands self.correct_colours(idx, line) def correct_dims(self, idx, line): # Correct y and z values to take radius into account # Alter Y value m = re.search("Y([\d.]+)", line) # If command contains Y if m: y = m.string[m.start(1):m.end(1)] y = float(y) # deg = mm * (360 / 2πr) y = y * 360 / (2 * math.pi * r.radius) y = round(y, 2) new_line = line[:m.start(1)] + str(y) + line[m.end(1):] self.gcode[idx] = new_line # Alter Z value m = re.search("Z([\d.]+)", line) # If command contains Z if m: z = m.string[m.start(1):m.end(1)] z = float(z) # if drawing if z == 1: # set Z to part outer radius, plus the offset z = db.settings["z_height"] - \ r.radius + db.settings["z_offset"] else: # set Z to part outer radius, plus the lift height z = db.settings["z_height"] - \ r.radius - db.settings["z_lift"] z = round(z, 2) new_line = line[:m.start(1)] + str(z) + line[m.end(1):] self.gcode[idx] = new_line def correct_colours(self, idx, line): # Replace generic colour change commands with correct GCODE m = re.search("<C(\d+?)>", line) # If a colour change command if m: colour = m.string[m.start(1): m.end(1)] colour = float(colour) # Update command correctly based on requested colour command = "G0B" + \ str(db.settings["colour_origin"] + colour * db.settings["colour_offset"]) self.gcode[idx] = command class webserver: def start(self): log.info("Initialising webserver") # Create thread to run webserver.run as a daemon (in background) webserverThread = threading.Thread(target=self.run) webserverThread.daemon = True webserverThread.start() log.info( "Webserver initialised, Accessible at http://localhost:8080/web/index.html") def run(self): PORT = 8080 Handler = http.server.SimpleHTTPRequestHandler with socketserver.TCPServer(("", PORT), Handler) as httpd: log.info(f"serving at port {PORT}") httpd.serve_forever() class database: # General class for connection with backend database settings = { # --- GRBL specific --- "$0": 10, # Length of step pulse, microseconds "$1": 255, # Step idle delay, milliseconds "$2": 0, # Step port invert, mask "$3": 0, # Direction port invert, mask "$4": 0, # Step enable invert, boolean "$5": 0, # Limit pins invert, boolean "$6": 0, # Probe pin invert, boolean "$10": 1, # Status report, mask "$11": 0.010, # Junction deviation, mm "$12": 0.002, # Arc tolerance, mm "$13": 0, # Report inches, boolean "$20": 0, # Soft limits, boolean "$21": 0, # Hard limits, boolean "$22": 1, # Homing cycle, boolean "$23": 0, # Homing dir invert, mask "$24": 25, # Homing feed, mm/min "$25": 500, # Homing seek, mm/min "$26": 25, # Homing debounce, milliseconds "$27": 1, # Homing pull-off, mm "$30": 1000, # Max spindle speed, RPM "$31": 0, # Min spindle speed, RPM "$32": 1, # Laser mode, boolean "$33": 0, # Camera pin invert, boolean "$100": 250, # X steps/mm "$101": 250, # Y steps/° "$102": 250, # Z steps/mm "$103": 250, # A steps/mm "$104": 250, # B steps/° "$110": 500, # X Max rate, mm/min "$111": 500, # Y Max rate, °/min "$112": 500, # Z Max rate, mm/min "$113": 500, # A Max rate, mm/min "$114": 500, # B Max rate, °/min "$120": 10, # X Acceleration, mm/sec^2 "$121": 10, # Y Acceleration, °/sec^2 "$122": 10, # Z Acceleration, mm/sec^2 "$123": 10, # A Acceleration, mm/sec^2 "$124": 10, # B Acceleration, °/sec^2 "$130": 200, # X Max travel, mm "$131": 720, # Y Max travel, ° "$132": 200, # Z Max travel, mm "$133": 200, # A Max travel, mm "$134": 360, # B Max travel, ° # --- Printer specific --- "port": "/dev/ttyS3", # --- General settings --- "warning_percentage": 10, "report_interval": 1, "polling_interval": 100, "log_history": 500, "z_height": 20, "z_offset": 0, "z_lift": 10, # --- Batch settings --- "batch_origin": 110, "batch_offset": 100, "scanner_offset": 50, "video_device": 0, "reference_images": 4, "comparison_images": 20, "qc_images": 8, "quality_score": 0.5, # --- Colour settings --- "colour_origin": 10, "colour_offset": 90, # --- Option defaults --- "length": 100, "radius": 10, "batch": 5, "check": False, "scan": True, } # Convert dictionary to list of tuples for database connection settings_tuple = settings.items() def connect(self): log.info("Connecting to database...") db_location = 'rotaprint.db' if not os.path.isfile(db_location): log.info("Database not found...") # Connect to database self.connection = sqlite3.connect( db_location, check_same_thread=False) self.cursor = self.connection.cursor() # Create and populate database tables self.create_databases() else: log.info("Database already found, using that one") # Connect to database self.connection = sqlite3.connect( db_location, check_same_thread=False) self.cursor = self.connection.cursor() # Update settings using database values self.get_settings() def create_databases(self): # Create new settings tables log.info("Creating new databases...") self.cursor.execute( 'CREATE TABLE IF NOT EXISTS \'settings\' (parameter STRING, value REAL)') self.cursor.execute( 'CREATE TABLE IF NOT EXISTS \'default_settings\' (parameter STRING, value REAL)') # Create default settings values log.debug("Inserting default settings values...") self.cursor.executemany( 'INSERT INTO \'settings\' VALUES(?, ?)', self.settings_tuple) self.cursor.executemany( 'INSERT INTO \'default_settings\' VALUES(?, ?)', self.settings_tuple) self.connection.commit() log.debug("Settings updated successfully") def get_settings(self): # Select and retrieve all settings log.debug("Fetching settings from database...") self.cursor.execute('SELECT * FROM \'settings\'') self.settings = dict(self.cursor.fetchall()) self.cursor.execute('SELECT * FROM \'default_settings\'') default_settings = dict(self.cursor.fetchall()) return self.settings, default_settings def set_settings(self, settings): log.debug("Updating database settings...") self.cursor.executemany( 'UPDATE \'settings\' SET value=? WHERE parameter=?', settings) self.connection.commit() log.debug("Database successfully updated") class websocket: # Class for interacting with front end GUI over websocket (to receive data) def connect(self): logging.info("Initialising websocket instance") # Create thread to run websocket.listen listenThread = threading.Thread(target=self.listen) listenThread.start() # r.pool.submit(self.listen) logging.info("Websocket initialised") def listen(self): # Zero connections to start with self.connected = 0 # Listen always for messages over websocket async def listener(websocket, path): self.connected += 1 try: while True: # Listen for new messages data = await websocket.recv() # Send incoming messages to the handler, in parallel with main process response = self.handler(data) await websocket.send(response) # future = r.pool.submit(self.handler, data) # response = future.result() # await websocket.send(response) finally: # Decriment connection counter when disconnected self.connected -= 1 asyncio.set_event_loop(asyncio.new_event_loop()) server = websockets.serve(listener, 'localhost', 8765, max_size=None) log.warning( "No file size limit set on websocket connection. This may cause issues when trying to upload large files.") asyncio.get_event_loop().run_until_complete(server) asyncio.get_event_loop().run_forever() def payloader(self, command, payload): # Used to combine a command and payload into a single JSON style string data = { "command": str(command), "payload": str(payload) } # Convert to JSON string and return data = dumps(data) return data def handler(self, data): def print_settings(self, payload): try: settings = loads(payload) r.check_mode = settings["check_mode"] r.scan_mode = settings["scan_mode"] r.radius = float(settings["radius"]) r.length = float(settings["length"]) r.batch = int(settings["batch"]) r.offset = float(settings["offset"]) return "DONE" except: log.error("Could not assign print settings") r.except_logger() return "ERROR" def database_set(self, payload): # Update a database setting # Load JSON string input to Python list settings = loads(payload) # Convert to (reversed) tuple for SQL query db_settings = [(v, k) for k, v in settings.items()] db.set_settings(db_settings) if g.connected: g.send_settings() else: log.error( "Not connected to printer - could not update settings. Restart rotaprint!") return "DONE" def send_manual(self, payload): # Send manual command to grbl if g.connected: try: data = payload + "\n" g.s.write(data.encode()) log.info(f"GRBL > {g.read()}") return "DONE" except: r.except_logger() return "ERROR" else: log.error( "Not connected to printer - could not update settings. Restart rotaprint!") return "ERROR" def receive_gcode(self, payload): # Receive gcode, and load into global variable gc.gcode = [line.strip() for line in payload.splitlines() if not line.startswith('#')] return "DONE" def print_now(self, payload): # Send all supplied GCODE to printer if gc.gcode == "": log.error("No GCODE supplied; cannot print") return "ERROR" else: log.info("Sending gcode to printer...") # r.pool.submit(r.print_sequence()) thread = threading.Thread(target=r.print_sequence()) thread.start() # r.print_sequence() return "DONE" def home(self, payload): g.home() return "DONE" def fetch_settings(self, payload): # Return all database settings in JSON format log.debug("Retrieving database settings") current_settings, default_settings = db.get_settings() return dumps(current_settings) + "~<>~" + dumps(default_settings) def fetch_value(self, payload): # Get current value of variable variable = { "grbl": g.connected, "websocket": w.connected, } return self.payloader(payload, str(variable[payload])) def reconnect_printer(self, payload): # Reconnect to printer incase of issue g.reconnect() return "DONE" def return_logs(self, payload): # Return current log data to frontend log_contents = logs.getvalue() new_logs = log_contents[r.log_history:] r.log_history += len(new_logs) return new_logs def reset_logs_counter(self, payload): r.log_history = 0 return "DONE" def get_current_status(self, payload): data = dumps(r.status) return data def toggle_lighting(self, payload): g.toggle_lighting() return "DONE" def change_batch(self, payload): payload = int(payload) if payload in range(5): g.change_batch(payload, True) elif payload == -1: g.send(["G0A0"], True) else: return "ERROR" return "DONE" def feed_hold(self, payload): g.s.write("!".encode()) return "DONE" def feed_release(self, payload): g.s.write("~".encode()) return "DONE" def quality_control_override(self, payload): log.warning("Quality control check overridden!") r.qc_override = True r.batch_new_part return "DONE" switcher = { "SET": print_settings, "DBS": database_set, "GRB": send_manual, "GCD": receive_gcode, "PRN": print_now, "HME": home, "FTS": fetch_settings, "RQV": fetch_value, "RCN": reconnect_printer, "LOG": return_logs, "RLC": reset_logs_counter, "GCS": get_current_status, "LGT": toggle_lighting, "BTC": change_batch, "FHD": feed_hold, "FRL": feed_release, "QCO": quality_control_override, } # Separate JSON string into command and payload data = loads(data) command = data["command"].upper() payload = data["payload"] if not (command == "LOG" or command == "GCS"): if len(payload) < 50: log.debug(f'WSKT > {command} \"{payload}\"') else: log.debug(f'WSKT > {command} (long payload)') # Call respective command using switcher try: response = switcher[command](self, payload) except: r.except_logger() response = "ERROR" if not (command == "LOG" or command == "GCS"): if len(response) < 50: log.debug(f'WSKT < {command} \"{response}\"') else: log.debug(f'WSKT < {command} (long payload)') return self.payloader(command, response) class vision: def connect(self): log.debug("Activating the desired camera...") self.cap = cv2.VideoCapture(int(db.settings["video_device"])) # Check if camera opened successfully if (self.cap.isOpened() == False): log.error( "Error opening video stream! Attempted use of vision system will fail!") else: log.info("Camera connected!") def take_picture(self): # Return a single frame in variable `picture_data` # picture_data is an image array vector _, picture_data = self.cap.read() return picture_data def rotate_and_picture(self, n): # Takes n pictures around the part, always starting at y = 0 # Turn on lighting log.debug("Turning lights on") g.toggle_lighting(True) log.info(f"Taking {n} picture(s) of the current part...") if n == 0: log.error("The number of pictures specified must be above zero!") return # Initialise variables pictures_list = [] angle_step = 360 / n # Go to start rotation angle g.send(["G0Y0"], settings_mode=True) y_acceleration = db.settings["$121"] # Acceleration in deg / s^2 y_max_speed = db.settings["$111"] / 60 # Max speed in deg / s # The distance at which the triangle velocity graph becomes a trapesium s_limit = 2 * y_max_speed ^ 2 / y_acceleration # Wait for machine to reach position before taking pictures # Calculate time delay for worst case initial align if s_limit > 180: # Triangle profile delay = 2 * (180 / y_acceleration) ^ 0.5 else: # Trapesium profile delay = 2 * (s_limit / y_acceleration) ^ 0.5 + \ (180 - s_limit) / y_max_speed # Wait for delay with 1s buffer time time.sleep(delay + 1) # Calculate time delay required between images to ensure machine has stopped moving if s_limit > angle_step: # Triangle profile delay = 2 * (angle_step / y_acceleration) ^ 0.5 else: # Trapesium profile delay = 2 * (s_limit / y_acceleration) ^ 0.5 + \ (angle_step - s_limit) / y_max_speed log.debug(f"Calculated picture delay as {delay}s") for x in range(n): # Angle is the degrees you want the part to go to angle = angle_step * x log.debug(f"Taking a picture at {angle}°") # Convert requested angle to GCODE command gcode = "G0Y" + str(angle) # Send command to firmware g.send([gcode], settings_mode=True) # Wait for machine to reach position time.sleep(delay) # Extract data from image taken picture_data = take_picture() # Record data in a list pictures_list.append(picture_data) log.info(f"{n} picture(s) were sucessfully taken") # Turn off lighting log.debug("Turning lights off") g.toggle_lighting(False) return pictures_list def initial_alignment_scan(self): # Store alignment images into variable (starting at y = 0) self.ref_images = self.rotate_and_picture( db.settings["reference_images"]) def initial_quality_scan(self): # Store quality images into variable (starting at y = 0) self.quality_images = self.rotate_and_picture( db.settings["qc_images"]) def alignment_scan(self): log.info("Running initial alignment scan") # run scan with alignment reference images and n = number of comparison scan images comparison_images = self.rotate_and_picture( db.settings["comparison_images"]) offset, _ = self.scan(self.ref_images, comparison_images) return offset def quality_scan(self): log.info("Running initial quality scan") # run scan with quality reference images and n = number of quality scan image comparison_images = self.rotate_and_picture( db.settings["qc_images"]) _, score = self.scan(self.quality_images, comparison_images, True) return score def scan(self, reference_images, comparison_images, aligned=False): # Where reference_images are the images to compare against, and n is the number of new images to take # The number of images required for reference and comparison r = len(reference_images) c = len(comparison_images) scores = [] # List of all the scores per set # Determine ideal number of comparison images to skip for each reference image ideal_step = c / r if aligned: # For quality check scanning where part already aligned tests = [0] else: # For alignment scanning only tests = range(c) for start in tests: # Increment start comparison image. # Next set log.debug(f"Comparing image {start} of {tests}") score_set = [] for j, i in zip(np.arange(start, start + c, ideal_step, int), range(r)): # Compare the comparison image against its supposedly correct reference images (in terms of angle) # Check the score of similarity between those two images # round j to nearest integer # If image index is greater than the number of images, 'wrap around' to the start - keeping the given angle step if j >= c: j = j - c for colour in range(3): # Loop over B,G,R pixels - in that order score = structural_similarity( reference_images[i][:, :, colour], comparison_images[j][:, :, colour]) score_set.append(score) # After a set, average all scores of set scores[start] = np.mean(score_set) # Find the highest score of all sets, and return the index. If two same scores, it will take the first. max_score = max(scores) maxi = scores.index(max_score) # Calculate the angle value corresponding to the maximum scoring comparison image set offset = 360 * maxi / c log.debug(f"Score: {max_score}\nOffset: {offset}") return offset, max_score class grbl: """ Object for control and configuration of grbl firmware and connection. """ startup = { "$N0": "" # GCODE to run on every startup of grbl TODO } # Checkmode toggle check = False # Connected flag connected = False # Lighting toggle lighting = False def __init__(self): # Get GRBL settings self.settings, _ = db.get_settings() self.port = self.settings["port"] self.settings = {x: self.settings[x] for x in self.settings if x.find("$") >= 0} def reconnect(self): log.debug("Reconnecting to printer...") try: self.s.close() self.connected = False except: r.except_logger() log.warning("Could not disconnect") self.connect() def connect(self): log.info("Connecting to printer...") try: # Connect to serial log.debug(f"Connecting on port {self.port}...") self.s = serial.Serial(self.port, 115200, timeout=10) log.info("Connection success!") # Wake up grbl log.debug("GRBL < \"\\r\\n\\r\\n\"") self.s.write("\r\n\r\n".encode()) time.sleep(2) # Wait for grbl to initialize for i in range(3): log.debug(self.read()) self.s.flushInput() # Flush startup text in serial input self.connected = True self.send_settings() except: log.error("Unable to connect to printer!") r.except_logger() def send_settings(self): log.info("Checking if firmware settings need updating...") log.debug("GRBL <* $$") self.s.write("$$\n".encode()) # # In testing, GRBL would often take several lines to start responding, # # this should flush that so program will not hang # for i in range(0, 20): # self.s.write("\n".encode()) # Wait until current settings are received temp_out = "" force_settings = False timeout_counter = 0 while not temp_out.startswith("$"): # Wait for settings to start receiving if timeout_counter > 15: # Timeout condition log.error("Printer communication timeout while reading settings") log.info("Will reconnect in an attempt to fix") self.reconnect() log.warning( "Attempting to continue by forcing settings update, if this doesn't work restart the machine and try again!") force_settings = True break if temp_out.find('error:9') >= 0: log.warning( "Lockout error detected while attempting to send settings!") force_settings = True elif temp_out.find('error') >= 0: log.debug(f"GRBL > {temp_out}") log.error( "Error occured with printer communication during settings setup") log.warning("Forcing settings send...") force_settings = True break temp_out = self.read() log.debug(f"GRBL > {temp_out}") timeout_counter += 1 current_settings = dict() if not force_settings: # Read all current settings if temp_out.startswith("$"): while temp_out.startswith("$"): log.debug(f"GRBL > {temp_out}") # Convert received data to dictionary format dict_out = {str(temp_out.split("=")[0]): float( temp_out.split("=")[1])} current_settings.update(dict_out) # Update read temp_out = self.read() else: log.debug(f"GRBL > {temp_out}") log.error("Unexpected data received from GRBL") log.info("All settings will be forced instead") force_settings = True send_settings = list() # Get required settings from Database self.settings, _ = db.get_settings() # Convert received settings to directionary self.settings = {x: self.settings[x] for x in self.settings if x.find("$") >= 0} # Iterate through received data and find outdated settings for key in self.settings: if self.settings[key] != current_settings.get(key): log.debug(f"Out of date setting: {key}") send_settings.append( key + "=" + str(self.settings[key])) else: log.debug(f"Up to date setting: {key}") # Send new settings if required if len(send_settings) > 0 or force_settings: log.info(f"{len(send_settings)} setting(s) need updating!") log.info("Sending updated settings...") self.send(send_settings, True) else: log.info("No settings need updating") def home(self): # Built-in GRBL homing functionality log.info("Homing machine...") self.send(["$H"], True) def toggle_lighting(self, manual=None): # Turn lights and laser on or off log.info("Toggling the lights...") I = db.settings["$33"] X = manual == None L = self.lighting M = manual # Turn lights on if: # - lighting is off already, and manual is not true or # - manual is true if (M and not I) or (X and not I and not L) or (M == 0 and I) or (X and I and not L): # Turn lights on self.send(["M9"], True) self.lighting = True else: # Turn lights off self.send(["M8"], True) self.lighting = False def change_batch(self, batch, scan=False): if not scan: # Generate GCODE to move requested part under printhead command = "G0A" + \ str(db.settings["batch_origin"] + batch * db.settings["batch_offset"]) else: # Generate GCODE to move requested part under scanner command = "G0A" + \ str(db.settings["batch_origin"] + batch * db.settings["batch_offset"] + db.settings["scanner_offset"]) # Send to grbl self.send([command], True) def offset_y(self, offset): log.debug(f"Setting Y offset to {offset}") # Setup offset value command = "G10 L2 P1 Y" + str(offset) # Send command to grbl self.send([command], True) def send_status_query(self): # Built in GRBL status report, in format: # <Idle|MPos:0.000,0.000,0.000|FS:0.0,0> # Recommended query frequency no more than 5Hz # log.debug("Sending status query...") # log.debug("GRBL < ?") try: self.s.write('?\n'.encode()) except: r.except_logger() def periodic_timer(self): while True: if self.is_run: self.send_status_query() time.sleep(db.settings["report_interval"]) def monitor(self): self.is_run = False # if enable_status_reports: log.info("Starting firmware log daemon...") timerThread = threading.Thread(target=self.periodic_timer) timerThread.daemon = True timerThread.start() def check_mode(self): log.info('Toggling grbl check-mode...') self.send(['$C'], True) # Invert checkmode variable self.check != self.check if self.check: log.info("Check mode enabled!") log.info("Inspect logs once complete if any errors occur.") else: log.info("Check mode disabled!") log.info(self.read()) # while True: # out = self.s.readline().strip() # Wait for grbl response with carriage return # if out.find('error') >= 0: # log.debug(f"GRBL > {out}") # log.error( # "Failed to set Grbl check-mode. Attempting to reconnect...") # self.reconnect() # elif out.find('ok') >= 0: # log.debug(f'GRBL > {out}') def read(self): output = self.s.readline().strip().decode() # Contains status report if re.search("^<[\\w\\W]+?>$", output): # Current grbl operation r.status["grbl_operation"] = re.findall( "<([\\w\\W]+?)\\|", output)[0] # Current grbl position MPos = re.findall("MPos:([\\w\\W]+?)\\|", output) MPos = MPos[0].split(",") r.status["grbl_x"] = "<b>X</b>{:.2f}".format(float(MPos[0])) r.status["grbl_y"] = "<b>Y</b>{:.2f}".format(float(MPos[1])) r.status["grbl_z"] = "<b>Z</b>{:.2f}".format(float(MPos[2])) return "REMOVE" # Lockout message warning if re.search("\$X", output) or re.search("error:9", output): r.status["grbl_lockout"] = 1 return output def send(self, data, settings_mode=False, batch=False): def _sender(self, **args): l_count = 0 error_count = 0 start_time = time.time() if settings_mode: # Send settings file via simple call-response streaming method. Settings must be streamed # in this manner since the EEPROM accessing cycles shut-off the serial interrupt. self.is_run = True for line in data: l_count += 1 # Iterate the line counter l_block = line.strip() # Strip all EOL characters for consistency # Asterisk indicates code is sent using settings mode log.debug(f"GRBL <* {str(l_count)}: {l_block}") # Send g-code block to grbl self.s.write((l_block + '\n').encode()) while True: # Wait for grbl response with carriage return out = self.read() if out.find('ok') >= 0: log.debug(f"GRBL > {str(l_count)}: {out}") break elif out.find('error') >= 0: log.warning(f"GRBL > {str(l_count)}: {out}") error_count += 1 break elif out.find('ALARM') >= 0: log.error(f"GRBL > {str(l_count)}: {out}") else: if out.find('REMOVE') < 0: log.debug(f"GRBL > {out}") else: # Send g-code program via a more agressive streaming protocol that forces characters into # Grbl's serial read buffer to ensure Grbl has immediate access to the next g-code command # rather than wait for the call-response serial protocol to finish. This is done by careful # counting of the number of characters sent by the streamer to Grbl and tracking Grbl's # responses, such that we never overflow Grbl's serial read buffer. log.debug("Stream mode") self.is_run = True g_count = 0 c_line = [] rx_buffer_size = 128 gcode_length = len(data) for line in data: l_count += 1 # Iterate line counter # Calculate percentage complete r.status["print_progress"] = (g_count / gcode_length) * 100 # Estimate time remaining r.remaining = ((r.batch - 1 - r.batch_current) + (gcode_length - g_count) / gcode_length) / r.batch # Strip comments/spaces/new line and capitalize l_block = re.sub(r'\s|\(.*?\)', '', line).upper() # Track number of characters in grbl serial read buffer c_line.append(len(l_block) + 1) out = '' while sum(c_line) >= rx_buffer_size - 1 | self.s.inWaiting(): out_temp = self.read() # Wait for grbl response if out_temp.find('ok') < 0 and out_temp.find('error') < 0: if out_temp.find('REMOVE') < 0: # Debug response log.debug(f"GRBL > {out_temp}") else: if out_temp.find('error') >= 0: error_count += 1 g_count += 1 # Iterate g-code counter log.debug(f"GRBL > {str(g_count)}: {out_temp}") # Delete the block character count corresponding to the last 'ok' del c_line[0] data_to_send = l_block + '\n' # Send g-code block to grbl self.s.write(data_to_send.encode()) log.debug(f"GRBL < {str(l_count)}: {l_block}") # Wait until all responses have been received. while l_count > g_count: out_temp = self.read() # Wait for grbl response if out_temp.find('ok') < 0 and out_temp.find('error') < 0: if out_temp.find('REMOVE') < 0: log.debug(f"GRBL > {out_temp}") # Debug response else: if out_temp.find('error') >= 0: error_count += 1 g_count += 1 # Iterate g-code counter log.debug(f"GRBL > {str(g_count)}: {out_temp}") # Delete the block character count corresponding to the last 'ok' del c_line[0] end_time = time.time() log.info(f"Time elapsed: {str(end_time-start_time)}") self.is_run = False if self.check: log.info("Checking response...") if error_count > 0: log.error( f"Check failed: {error_count} errors found! See output for details.") quit() else: log.info("Check passed: No errors found in g-code program.") # Request next batch if required if batch: r.batch_current += 1 r.batch_new_part() # Submit task to pool if batch: r.pool.submit(_sender, self, data=data, settings_mode=settings_mode, batch=batch) else: _sender(self, data=data, settings_mode=settings_mode) if __name__ == "__main__": # --- SETUP SEQUENCE --- # Connect general use class r = rotaprint() # Setup logging log, logs = r.setup_log() # Connect backend database db = database() db.connect() # Connect gcode class gc = gcode() # Start GUI websocket w = websocket() w.connect() # Start webserver webserver().start() # Connect grbl g = grbl() # GRBL object g.connect() # Serial connection # Connect vision class v = vision() v.connect() # Set up monitoring thread g.monitor()
CVPlatform.py
import os import sys import threading import numpy as np import cv2 import huicui from PyQt5 import QtCore, QtGui from PyQt5.QtGui import * from PyQt5.QtWidgets import * from huicui import Image, Contour from huicui.lib import helper from huicui.operation import ImgOperation, ImgSample from helperPackage.affineTransformMatrixDialog import AffineTransformMatrixDialog from helperPackage.perspectiveTransformMatrixDialog import PerspectiveTransformMatrixDialog from helperPackage.structuringElementDialog import StructuringElementDialog from operationPackage.contourSelectDialog import ContourSelectDialog from operationPackage.foregroundExtractionDialog import ForegroundExtractionDialog from operationPackage.FourierTFDialog import FourierTFDialog from operationPackage.geometricTFDialog import GeometricTFDialog from operationPackage.histDialog import HistDialog from helperPackage.rotationMatrix2DDialog import RotationMatrix2DDialog from operationPackage.segmentationDialog import SegmentationDialog from operationPackage.addDialog import AddDialog from operationPackage.cannyDialog import CannyDialog from helperPackage.commonHelper import CommonHelper from operationPackage.contoursDialog import ContoursDialog from operationPackage.gradientDialog import GradientDialog from operationPackage.houghTFDialog import HoughTFDialog from operationPackage.logicDialog import LogicDialog from operationPackage.matchTemplateDialog import MatchTemplateDialog from operationPackage.morphDialog import morphDialog from operationPackage.samplingDialog import samplingDialog from operationPackage.smoothDialog import SmoothDialog from operationPackage.thresholdDialog import ThresholdDialog from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas import matplotlib.pyplot as plt class MainWindow(QMainWindow): isClose = False isCapture = False num = 1 selection = True currentIndex = 0 LaplacianSample = [] def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.cwd = os.getcwd() self.imageList = [Image()] self.image = Image() self.imageOperation = ImgOperation(copy=False) self.imgSample = ImgSample() self.contours = None self.channelMat = [] self.initUI() def initUI(self): self.resize(1500, 700) self.setWindowTitle("汇萃计算机视觉教学平台") self.center() """ 定义顶部导航栏 """ projectBar = self.menuBar() projectFile = projectBar.addMenu("项目") projectFile.addAction("新建工程") projectFile.addAction("导入工程") projectFile.addAction("导出工程") projectFile.triggered[QAction].connect(self.projectAction) operationBar = self.menuBar() operationFile = operationBar.addMenu("图片操作") singleImageOperationMenu = operationFile.addMenu("单图片操作") multiImageOperationMenu = operationFile.addMenu("多图片操作") grayTF = singleImageOperationMenu.addMenu("灰度转换") grayTF.addAction("转成灰度图像") grayTF.addAction("转成彩色图像") threshMenu = singleImageOperationMenu.addMenu("阈值处理") threshMenu.addAction("自定义阈值处理") threshMenu.addAction("自适应阈值处理") smoothMenu = singleImageOperationMenu.addMenu("平滑处理") smoothMenu.addAction("滤波") smoothMenu.addAction("2D卷积") singleImageOperationMenu.addAction("加法运算") houghMenu = singleImageOperationMenu.addMenu("霍夫变换") houghMenu.addAction("霍夫直线变换") houghMenu.addAction("霍夫圆环变换") singleImageOperationMenu.addAction("模板匹配") ##数据显示 addMenu = multiImageOperationMenu.addMenu("多图片加法运算") addMenu.addAction("多图片相加") addMenu.addAction("图片加权和") multiImageOperationMenu.addAction("逻辑操作") geometricTFMenu = multiImageOperationMenu.addMenu("几何变换") geometricTFMenu.addAction("简单几何变换") geometricTFMenu.addAction("图片重映射") multiImageOperationMenu.addAction("形态学操作") multiImageOperationMenu.addAction("梯度呈现") multiImageOperationMenu.addAction("边缘检测") multiImageOperationMenu.addAction("图像取样") multiImageOperationMenu.addAction("图像轮廓") multiImageOperationMenu.addAction("图片灰度统计") ##数据显示 multiImageOperationMenu.addAction("傅里叶滤波") multiImageOperationMenu.addAction("图像分割") multiImageOperationMenu.addAction("前景提取") operationFile.triggered[QAction].connect(self.operationAction) HelpBar = self.menuBar() HelpFile = HelpBar.addMenu("帮助") HelpFile.addAction("仿射变换旋转矩阵") HelpFile.addAction("仿射变换矩阵") HelpFile.addAction("透视变换矩阵") HelpFile.addAction("形态学操作核结构") HelpFile.triggered[QAction].connect(self.helpAction) """ 定义layout内总体布局 """ self.vLayout = QVBoxLayout(self) """ 定义头部布局 """ self.headBox = QGroupBox(self) self.headHLayout = QHBoxLayout(self) self.openCamera = QPushButton(self) self.openCamera.setText("打开相机") self.captureCamera = QPushButton(self) self.captureCamera.setText("截取视频") self.cameraSet = QPushButton(self) self.cameraSet.setText("相机设置") self.functionAdd = QPushButton(self) self.functionAdd.setText("功能追加") self.functionAdd.setStyleSheet("width:80px; height:100px;") self.functionAdd.setProperty("name", "functionAdd") self.openCamera.clicked.connect(self.cameraOpen) self.captureCamera.clicked.connect(self.captureVideo) self.headHLayout.addWidget(self.openCamera) self.headHLayout.addWidget(self.captureCamera) self.headHLayout.addWidget(self.cameraSet) self.headHLayout.addWidget(self.functionAdd) self.headHLayout.addStretch() self.headBox.setGeometry(0, 0, 1500, 100) self.headBox.setLayout(self.headHLayout) self.headBox.setProperty("name", "headBox") # self.headBox.setStyleSheet("border-bottom:2px solid #0c0e11 ;") self.vLayout.addWidget(self.headBox) self.bottomLayout = QHBoxLayout(self) self.bottomLeftLayout = QVBoxLayout(self) self.functionBox = QGroupBox(self) self.functionHLayout = QHBoxLayout(self) self.projectCB = QComboBox(self) font = QtGui.QFont() font.setPointSize(15) self.projectCB.setFont(font) self.projectCB.addItem("未处理图像") # self.projectCB.setGeometry(10, 100, 120, 55) self.projectCB.currentIndexChanged.connect(self.selectionchange) self.projectCB.setProperty("name", "projectCB") self.functionHLayout.addWidget(self.projectCB) self.functionHLayout.addStretch() self.functionBox.setLayout(self.functionHLayout) self.functionBox.setGeometry(0, 100, 820, 55) self.functionBox.setProperty("name", "functionBox") self.bottomLeftLayout.addWidget(self.functionBox) self.videoWidget = QWidget(self) self.videoWidget.setGeometry(QtCore.QRect(0, 150, 820, 500)) self.videoLayout = QVBoxLayout(self) self.figure = plt.figure() self.canvas = FigureCanvas(self.figure) plt.subplot(111) self.figure.patch.set_facecolor('#111111') self.canvas.draw() self.videoLayout.addWidget(self.canvas) self.videoWidget.setLayout(self.videoLayout) self.bottomLeftLayout.addWidget(self.videoWidget) self.bottomLayout.addLayout(self.bottomLeftLayout) self.bottomRightBox = QGroupBox(self) self.bottomRightLayout = QVBoxLayout(self) self.dataBox = QGroupBox(self) self.dataForm = QGridLayout(self) self.dataBox.setLayout(self.dataForm) self.bottomRightLayout.addWidget(self.dataBox) # self.relatedDataLabel = QLabel(self) # font = QtGui.QFont() # font.setPointSize(35) # self.relatedDataLabel.setFont(font) # self.relatedDataLabel.setText("相关数据显示") # self.relatedDataLabel.setProperty("name", "relatedDataLabel") # self.relatedDataLabel.setStyleSheet("width:400px;height:300px") # self.relatedDataLabel.resize(400,300) # self.relatedDataLabel.setGeometry(QtCore.QRect(0, 200, 400, 300)) # self.bottomRightLayout.addWidget(self.relatedDataLabel) # self.bottomRightLayout.setGeometry(QtCore.QRect(820, 100, 680, 555)) self.dataWidget = QWidget(self) # self.dataWidget.setGeometry(QtCore.QRect(0, 150, 820, 500)) self.dataLayout = QVBoxLayout(self) # self.dataFigure = plt.figure() # self.dataCanvas = FigureCanvas(self.dataFigure) # self.figure = plt.figure() # self.canvas = FigureCanvas(self.figure) # # plt.subplot(111) # self.dataFigure.patch.set_facecolor('#111111') # self.dataCanvas.draw() # self.dataLayout.addWidget(self.dataCanvas) self.dataWidget.setLayout(self.dataLayout) self.bottomRightLayout.addWidget(self.dataWidget) # self.bottomRightLayout.addStretch() self.bottomRightBox.setLayout(self.bottomRightLayout) self.bottomRightBox.setGeometry(820, 100, 680, 555) self.bottomRightBox.setProperty("name", "bottomRightBox") self.bottomLayout.addWidget(self.bottomRightBox) self.bottomLayout.addStretch() self.vLayout.addLayout(self.bottomLayout) self.vLayout.addStretch() # self.setLayout(self.vLayout) def center(self): """ 把窗口位置移到中心 :return: """ qr = self.frameGeometry() cp = QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def projectAction(self, q): """ 项目菜单栏操作监听 :param q: :return: """ if q.text() == "新建工程": self.image = Image() MainWindow.selection = False name = "未处理图像" + str(MainWindow.num) self.projectCB.addItem(name) MainWindow.currentIndex = self.projectCB.findText(name) self.imageList.append(self.image) self.projectCB.setCurrentIndex(MainWindow.currentIndex) self.figure.clear() plt.subplot(111) self.figure.patch.set_facecolor('#111111') self.canvas.draw() elif q.text() == "导入工程": print("导入") imgName, imgType = QFileDialog.getOpenFileName(self, "上传图片", self.cwd, "*.jpg;;*.png;;All Files(*)") if imgName == "": return self.image = Image(fileName=imgName) start = imgName.rindex("/") end = imgName.rindex(".") name = imgName[start + 1:end] MainWindow.selection = False self.projectCB.addItem(name) MainWindow.currentIndex = self.projectCB.findText(name) self.imageList.append(self.image) self.projectCB.setCurrentIndex(MainWindow.currentIndex) self.displayImage() elif q.text() == "导出工程": print("导出") savePath, savType = QFileDialog.getSaveFileName(self, "保存图片", self.cwd, "*.jpg;;*.png;;All Files(*)") if savePath == "": return self.image.writeImg(savePath=savePath) # elif q.text() == "打开摄像头": # MainWindow.isClose = False # self.isCamera = True # self.cap = cv2.VideoCapture(0) # th = threading.Thread(target=self.Display) # th.start() else: MainWindow.isClose = False self.isCamera = False imgName, imgType = QFileDialog.getOpenFileName(self, "上传图片", self.cwd, "*.mp4;;*.avi;;All Files(*)") self.cap = cv2.VideoCapture(imgName) self.frameRate = self.cap.get(cv2.CAP_PROP_FPS) th = threading.Thread(target=self.Display) th.start() def helpAction(self, q): if q.text() == "仿射变换旋转矩阵": dialog = RotationMatrix2DDialog() result = dialog.exec_() if result == 0: return centerValLine, angleValLine, scaleLine, downloadLine = dialog.getData() print(centerValLine) center = np.fromstring(centerValLine[1:-1], sep=',') if is_number(angleValLine) and is_number(scaleLine): print(tuple(center)) M = helper.getRotationMatrix2D(tuple(center), float(angleValLine), float(scaleLine)) np.savetxt(downloadLine, M, fmt='%f', delimiter=' ') elif q.text() == "仿射变换矩阵": dialog = AffineTransformMatrixDialog() result = dialog.exec_() if result == 0: return srcLine, dstLine, downloadLine = dialog.getData() pt1 = self.transform(srcLine, (3, 2)) pt2 = self.transform(dstLine, (3, 2)) M = helper.getAffineTransform(pt1, pt2) np.savetxt(downloadLine, M, fmt='%f', delimiter=' ') elif q.text() == "透视变换矩阵": dialog = PerspectiveTransformMatrixDialog() result = dialog.exec_() if result == 0: return srcLine, dstLine, downloadLine = dialog.getData() pt1 = self.transform(srcLine, (4, 2)) pt2 = self.transform(dstLine, (4, 2)) M = helper.getPerspectiveTransform(pt1, pt2) np.savetxt(downloadLine, M, fmt='%f', delimiter=' ') elif q.text() == "形态学操作核结构": dialog = StructuringElementDialog() result = dialog.exec_() if result == 0: return shapeTypeCB, ksizeLine, downloadLine = dialog.getData() if shapeTypeCB == "MORPH_RECT": shapeType = huicui.MORPH_RECT elif shapeTypeCB == "MORPH_CROSS": shapeType = huicui.MORPH_CROSS else: shapeType = huicui.MORPH_ELLIPSE ksize = np.fromstring(ksizeLine[1:-1], sep=',') M = helper.getStructuringElement(shapeType, tuple(ksize)) np.savetxt(downloadLine, M, fmt='%f', delimiter=' ') def operationAction(self, q): """ 操作菜单栏操作监听 :param q: :return: """ try: frame = self.image.getMat() except Exception: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("图片对象为空 ") msg.setInformativeText("请先导入一个图片项目") msg.setWindowTitle("出错提示") msg.setStandardButtons(QMessageBox.Ok) msg.exec_() return if q.text() == "转成灰度图像": print("转换成灰度图像") if len(self.image.getMat().shape) == huicui.THREE_CHANNEL: self.channelMat = self.image.split() self.image.cvtColor(huicui.COLOR_BGR2GRAY, copy=False) self.imageList[MainWindow.currentIndex] = self.image self.displayImage() elif q.text() == "转成彩色图像": print("转换成彩色图像") if len(self.channelMat) == 0: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("图片对象为空 ") msg.setInformativeText("请先导入一个图片项目") msg.setWindowTitle("出错提示") msg.setStandardButtons(QMessageBox.Ok) msg.exec_() return self.image = self.imageOperation.merge(self.channelMat) self.displayImage() elif q.text() == "自定义阈值处理": dialog = ThresholdDialog(self, flag=0) result = dialog.exec_() if result == 0: return threshType, globalThreshType, threshLine, maxValueLine = dialog.getForm() if threshType == "THRESH_BINARY": threshTypeValue = 0 elif threshType == "THRESH_BINARY_INV": threshTypeValue = 1 elif threshType == "THRESH_TRUNC": threshTypeValue = 2 elif threshType == "THRESH_TOZERO": threshTypeValue = 3 else: threshTypeValue = 4 if globalThreshType == " ": globalThreshTypeValue = 0 elif globalThreshType == "THRESH_OTSU": if len(self.image.getMat().shape) == huicui.THREE_CHANNEL: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("图片对象必须为灰度图像 ") msg.setInformativeText("使用全局阈值时图片对象必须是灰度图像") msg.setWindowTitle("出错提示") msg.setStandardButtons(QMessageBox.Ok) msg.exec_() return globalThreshTypeValue = 8 else: globalThreshTypeValue = 16 if is_number(threshLine) and is_number(maxValueLine): self.image.threshold(float(threshLine), float(maxValueLine), threshTypeValue + globalThreshTypeValue, copy=False) self.displayImage() elif q.text() == "自适应阈值处理": dialog = ThresholdDialog(self, flag=1) result = dialog.exec_() if result == 0: return threshTypeCB, adaptiveMethodCB, blockSizeLine, CLine, maxValueLine = dialog.getForm() if threshTypeCB == "THRESH_BINARY": threshType = 0 else: threshType = 1 if adaptiveMethodCB == "ADAPTIVE_THRESH_GAUSSIAN_C": adaptiveMethod = 1 else: adaptiveMethod = 0 if is_number(blockSizeLine) and is_number(maxValueLine) and is_number(CLine): self.image.adaptiveThreshold(float(maxValueLine), adaptiveMethod, threshType, int(blockSizeLine), float(CLine), copy=False) self.displayImage() elif q.text() == "加法运算": dialog = AddDialog(self, flag=1) result = dialog.exec_() if result == 0: return numberText = dialog.getData() if is_number(numberText): self.image.addNumber(float(numberText), copy=False) self.displayImage() elif q.text() == "多图片相加": dialog = AddDialog(self, flag=2) result = dialog.exec_() if result == 0: return imgName = dialog.getData() img = Image(fileName=imgName) self.imageOperation.add(self.image, img) self.displayImage() elif q.text() == "图片加权和": dialog = AddDialog(self, flag=3) result = dialog.exec_() if result == 0: return imgName, alphaText, betaText, gammaText = dialog.getData() if is_number(alphaText) and is_number(betaText) and is_number(gammaText): img = Image(fileName=imgName) alpha = float(alphaText) beta = float(betaText) gamma = float(gammaText) self.imageOperation.addWeighted(self.image, img, alpha=alpha, beta=beta, gamma=gamma) self.displayImage() elif q.text() == "滤波": dialog = SmoothDialog(self, flag=0) result = dialog.exec_() if result == 0: return data = dialog.getData() if data[0] == "均值滤波": smoothType = huicui.SMOOTH_BLUR ksize = np.fromstring(data[1][1:-1], sep=',') self.image.smooth(tuple(ksize), smoothType, copy=False) elif data[0] == "方框滤波": smoothType = huicui.SMOOTH_BOXFILTER ksize = np.fromstring(data[1][1:-1], sep=',') if is_number(data[2]): ddepth = int(data[2]) self.image.smooth(tuple(ksize), smoothType, ddepth=ddepth, copy=False) elif data[0] == "高斯滤波": smoothType = huicui.SMOOTH_GAUSSIANBLUR ksize = np.fromstring(data[1][1:-1], sep=',') if data[2] == "": sigmaX = 0 else: sigmaX = data[2] if data[3] == "": sigmaY = 0 else: sigmaY = data[3] if is_number(sigmaX) and is_number(sigmaY): self.image.smooth(tuple(ksize), smoothType, sigma1=float(sigmaX), sigma2=float(sigmaY), copy=False) elif data[0] == "中值滤波": smoothType = huicui.SMOOTH_MEDIANBLUR ksize = data[1] if is_number(ksize): self.image.smooth(int(ksize), smoothType, copy=False) else: smoothType = huicui.SMOOTH_BILATERALFILTER d = data[1] if is_number(d): d = int(d) if data[2] == "": sigmaColor = 0 else: sigmaColor = data[2] if data[3] == "": sigmaSpace = 0 else: sigmaSpace = data[3] if is_number(sigmaColor) and is_number(sigmaSpace): self.image.smooth(d, smoothType, sigma1=float(sigmaColor), sigma2=float(sigmaSpace), copy=False) self.displayImage() elif q.text() == "2D卷积": dialog = SmoothDialog(self, flag=1) result = dialog.exec_() if result == 0: return kernelPath = dialog.getData() if kernelPath == "": return kernel = np.loadtxt(kernelPath, delimiter=' ') kernel = kernel.astype(np.float32) print(kernelPath) # if is_number(deltaLine): self.image.filter2D(kernel, -1, copy=False) self.displayImage() elif q.text() == "逻辑操作": dialog = LogicDialog(self) result = dialog.exec_() if result == 0: return logicTypeCB, imageLine, = dialog.getData() if logicTypeCB == "BITWISE_AND": logicType = huicui.BITWISE_AND image2 = Image(fileName=imageLine) self.imageOperation.bitwise(self.image, image2, logicType) elif logicTypeCB == "BITWISE_OR": logicType = huicui.BITWISE_OR image2 = Image(fileName=imageLine) self.imageOperation.bitwise(self.image, image2, logicType) elif logicTypeCB == "BITWISE_NOT": logicType = huicui.BITWISE_NOT self.imageOperation.bitwise(self.image, None, logicType) else: logicType = huicui.BITWISE_XOR image2 = Image(fileName=imageLine) self.imageOperation.bitwise(self.image, image2, logicType) self.displayImage() elif q.text() == "简单几何变换": dialog = GeometricTFDialog(self, flag=0) result = dialog.exec_() if result == 0: return data = dialog.getData() if data[0] == "缩放": geometricTF_type = huicui.GEOMETRICTF_RESIZE ksize = np.fromstring(data[1][1:-1], sep=',') self.imageOperation.simpleGeometricTF(geometricTF_type, self.image, tuple(ksize)) self.displayImage() elif data[0] == "翻转": geometricTF_type = huicui.GEOMETRICTF_FLIP flipCodeCB = data[1] if flipCodeCB == "绕x轴旋转": flipCode = 0 elif flipCodeCB == "绕y轴旋转": flipCode = 1 else: flipCode = -1 self.imageOperation.simpleGeometricTF(geometricTF_type, self.image, flipCode=flipCode) self.displayImage() elif data[0] == "仿射": geometricTF_type = huicui.GEOMETRICTF_WARPAFFINE ksize = np.fromstring(data[1][1:-1], sep=',') if data[2] is True: pt1 = data[3] pt2 = data[4] pt1 = self.transform(pt1, (3, 2)) pt2 = self.transform(pt2, (3, 2)) M = helper.getAffineTransform(pt1, pt2) else: M = np.loadtxt(data[3], delimiter=' ') ksize = ksize.astype(np.float32) self.imageOperation.simpleGeometricTF(geometricTF_type, self.image, tuple(ksize), M=M) self.displayImage() else: geometricTF_type = huicui.GEOMETRICTF_WARPPERSPECTIVE ksize = np.fromstring(data[1][1:-1], sep=',') ksize = ksize.astype(np.float32) if data[2] is True: pt1 = data[3] pt2 = data[4] pt1 = self.transform(pt1, (4, 2)) pt2 = self.transform(pt2, (4, 2)) M = helper.getPerspectiveTransform(pt1, pt2) else: M = np.loadtxt(data[2], delimiter=' ') self.imageOperation.simpleGeometricTF(geometricTF_type, self.image, tuple(ksize), M=M) self.displayImage() elif q.text() == "图片重映射": dialog = GeometricTFDialog(self, flag=1) result = dialog.exec_() if result == 0: return mapXLine, mapYLine, interpolationTypeCB = dialog.getData() if interpolationTypeCB == "INTER_NEAREST": interpolationType = huicui.INTER_NEAREST elif interpolationTypeCB == "INTER_LINEAR": interpolationType = huicui.INTER_LINEAR elif interpolationTypeCB == "INTER_CUBIC": interpolationType = huicui.INTER_CUBIC elif interpolationTypeCB == "INTER_LANCZOS4": interpolationType = huicui.INTER_LANCZOS4 elif interpolationTypeCB == "INTER_LINEAR_EXACT": interpolationType = huicui.INTER_LINEAR_EXACT elif interpolationTypeCB == "WARP_FILL_OUTLIERS": interpolationType = huicui.WARP_FILL_OUTLIERS else: interpolationType = huicui.WARP_INVERSE_MAP mapX = np.loadtxt(mapXLine, delimiter=' ') mapY = np.loadtxt(mapYLine, delimiter=' ') mapX = mapX.astype(np.float32) mapY = mapY.astype(np.float32) self.imageOperation.remap(self.image, mapX, mapY, interpolationType) self.displayImage() elif q.text() == "形态学操作": dialog = morphDialog(self) result = dialog.exec_() if result == 0: return data = dialog.getData() morphType = data[0] iterationsLine = data[1] if is_number(iterationsLine): if morphType == "MORPH_ERODE": morph = huicui.MORPH_ERODE elif morphType == "MORPH_DILATE": morph = huicui.MORPH_DILATE elif morphType == "MORPH_OPEN": morph = huicui.MORPH_OPEN elif morphType == "MORPH_CLOSE": morph = huicui.MORPH_CLOSE elif morphType == "MORPH_GRADIENT": morph = huicui.MORPH_GRADIENT elif morphType == "MORPH_TOPHAT": morph = huicui.MORPH_TOPHAT elif morphType == "MORPH_BLACKHAT": morph = huicui.MORPH_BLACKHAT else: morph = huicui.MORPH_HITMISS """ 读取文件内容,转换成核 """ if data[2] is False: kernelPath = data[3] M = np.loadtxt(kernelPath, delimiter=' ') M = M.astype(np.uint8) else: shapeTypeCB = data[3] ksizeLine = data[4] if shapeTypeCB == "MORPH_RECT": shapeType = huicui.MORPH_RECT elif shapeTypeCB == "MORPH_CROSS": shapeType = huicui.MORPH_CROSS else: shapeType = huicui.MORPH_ELLIPSE ksize = np.fromstring(ksizeLine[1:-1], sep=',') M = helper.getStructuringElement(shapeType, tuple(ksize)) self.imageOperation.morphologyEx(morph, self.image, M, iterations=int(iterationsLine)) self.displayImage() elif q.text() == "边缘检测": dialog = CannyDialog(self) result = dialog.exec_() if result == 0: return minThreshValLine, maxThreshValLine, apertureSizeLine = dialog.getData() if is_number(minThreshValLine) and is_number(maxThreshValLine) and is_number(apertureSizeLine): self.imageOperation.canny(self.image, float(minThreshValLine), float(maxThreshValLine), float(apertureSizeLine)) self.displayImage() elif q.text() == "图像取样": if len(MainWindow.LaplacianSample) == 0: dialog = samplingDialog(self, flag=False) else: dialog = samplingDialog(self, flag=True) result = dialog.exec_() if result == 0: return data = dialog.getData() if data[0] == "向下取样": iterationLine = data[1] isLaplacianButton = data[2] if is_number(iterationLine): LaplacianSample, image = self.imgSample.pyrDown(self.image, int(iterationLine), isLaplacianButton) self.image = image self.LaplacianSample.append(LaplacianSample) else: if len(MainWindow.LaplacianSample) == 0: iterationLine = data[1] isLaplacian = data[2] if is_number(iterationLine): image = self.imgSample.pyrUp(self.image, int(iterationLine), isLaplacian, LaplacianSample=self.LaplacianSample) if isLaplacian: for i in range(int(iterationLine)): self.LaplacianSample.pop() self.image = image else: iterationLine = data[1] if is_number(iterationLine): image = self.imgSample.pyrUp(self.image, int(iterationLine)) self.image = image self.displayImage() elif q.text() == "图像轮廓": dialog = ContoursDialog(self) # dialog._signal.connect(self.slotShowTransThreadStatus) result = dialog.exec_() if result == 0: return data = dialog.getData() if data[1] == "RETR_CCOMP": mode = huicui.RETR_CCOMP elif data[1] == "RETR_EXTERNAL": mode = huicui.RETR_EXTERNAL elif data[1] == "RETR_LIST": mode = huicui.RETR_LIST elif data[1] == "RETR_TREE": mode = huicui.RETR_TREE else: mode = huicui.RETR_FLOODFILL if data[2] == "CHAIN_APPROX_NONE": method = huicui.CHAIN_APPROX_NONE elif data[2] == "CHAIN_APPROX_SIMPLE": method = huicui.CHAIN_APPROX_SIMPLE elif data[2] == "CHAIN_APPROX_TC89_KCOS": method = huicui.CHAIN_APPROX_TC89_KCOS else: method = huicui.CHAIN_APPROX_TC89_L1 # print(len(data)) if data[0] is True: if data[3] == "THRESH_BINARY": thresholdType = huicui.THRESH_BINARY elif data[3] == "THRESH_BINARY_INV": thresholdType = huicui.THRESH_BINARY_INV elif data[3] == "THRESH_TRUNC": thresholdType = huicui.THRESH_TRUNC elif data[3] == "THRESH_TOZERO": thresholdType = huicui.THRESH_TOZERO else: thresholdType = huicui.THRESH_TOZERO_INV if data[4] == " ": globalThreshold = 0 elif data[4] == "THRESH_OTSU": globalThreshold = huicui.THRESH_OTSU else: globalThreshold = huicui.THRESH_TRIANGLE threshLine = data[5] maxValueLine = data[6] if is_number(threshLine) and is_number(maxValueLine): contours, hierarchy = self.imageOperation.findContours(self.image, mode, method, thresh=float(threshLine), maxval=float(maxValueLine), type=thresholdType + globalThreshold, threshold=True) else: contours, hierarchy = self.imageOperation.findContours(self.image, mode, method, threshold=False) self.contours = contours contourDialog = ContourSelectDialog(self, contourLen=len(contours)) contourDialog._signal.connect(self.previewContour) result = contourDialog.exec_() if result == 0: return contourIndex = contourDialog.getData() contour = Contour(img=self.image, contour=self.contours[contourIndex]) image = contour.drawContour(color=(255, 255, 255), thickness=5) self.image = image self.displayImage() elif q.text() == "图片灰度统计": dialog = HistDialog(self) result = dialog.exec_() if result == 0: return channelsLine, maskLine, histSizeLine, rangeStartLine, rangeEndLine, accumulate, equal = dialog.getData() if maskLine != "": maskImage = Image(fileName=maskLine) else: maskImage = None if is_number(channelsLine) and is_number(histSizeLine) and is_number(rangeStartLine) and is_number( rangeEndLine): hist = self.imageOperation.calcHist(self.image, int(channelsLine), int(histSizeLine), (int(rangeStartLine), int(rangeEndLine)), mask=maskImage, accumulate=accumulate, equal=equal) print(hist) elif q.text() == "傅里叶滤波": dialog = FourierTFDialog(self) result = dialog.exec_() if result == 0: return maskPath = dialog.getData() maskImg = Image(fileName=maskPath, flag=0) mask = self.imageOperation.merge([maskImg.getMat(), maskImg.getMat()]) # plt.subplot(121) # plt.imshow(self.image.getMat(), cmap="gray") # print(1) self.imageOperation.filter(self.image, mask) self.displayImage() print(1) elif q.text() == "模板匹配": dialog = MatchTemplateDialog(self) result = dialog.exec_() if result == 0: return templatePath, methodCB = dialog.getData() if methodCB == "TM_SQDIFF": method = huicui.TM_SQDIFF elif methodCB == "TM_SQDIFF_NORMED": method = huicui.TM_SQDIFF_NORMED elif methodCB == "TM_CCORR": method = huicui.TM_CCORR elif methodCB == "TM_CCORR_NORMED": method = huicui.TM_CCORR_NORMED elif methodCB == "TM_CCOEFF": method = huicui.TM_CCOEFF else: method = huicui.TM_CCOEFF_NORMED image = Image(fileName=templatePath) rst = self.image.matchTemplate(image, method) print(rst) elif q.text() == "霍夫直线变换": if len(self.image.getMat().shape) == huicui.THREE_CHANNEL: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("图片对象必须为灰度图像 ") msg.setInformativeText("使用霍夫直线变换时图片对象必须是灰度图像") msg.setWindowTitle("出错提示") msg.setStandardButtons(QMessageBox.Ok) msg.exec_() return dialog = HoughTFDialog(self, flag=0) result = dialog.exec_() if result == 0: return rhoLine, thetaLine, thresholdLine, optimizeButton, minLineLengthLine, maxLineGapLine = dialog.getData() if is_number(rhoLine) and is_number(thetaLine) and is_number(thresholdLine) and is_number( minLineLengthLine) and is_number(maxLineGapLine): lines = self.image.HoughLine(int(thresholdLine), float(minLineLengthLine), float(maxLineGapLine), float(rhoLine), np.pi / 180, optimizeButton) for line in lines: x1, y1, x2, y2 = line[0] helper.draw(self.image,(x1,y1),(x2,y2),(255,255,255),5,drawType=huicui.DRAW_LINE,iscopy=False) self.displayImage() elif q.text() == "霍夫圆环变换": if len(self.image.getMat().shape) == huicui.THREE_CHANNEL: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("图片对象必须为灰度图像 ") msg.setInformativeText("使用霍夫圆环变换时图片对象必须是灰度图像") msg.setWindowTitle("出错提示") msg.setStandardButtons(QMessageBox.Ok) msg.exec_() return dialog = HoughTFDialog(self, flag=1) result = dialog.exec_() if result == 0: return dpLine, minDistLine, param1Line, param2Line, minRadiusLine, maxRadiusLine = dialog.getData() if is_number(dpLine) and is_number(minDistLine) and is_number(param1Line) and is_number( param2Line) and is_number(minRadiusLine) and is_number(maxRadiusLine): circles = self.image.HoughCircle(huicui.HOUGH_GRADIENT, float(dpLine), float(minDistLine), float(param1Line), float(param2Line), int(minRadiusLine), int(maxRadiusLine)) print(circles) if circles is None: return circles = np.uint16(np.around(circles)) for i in circles[0, :]: helper.draw(self.image,(i[0],i[1]),i[2],(255,255,255),12,huicui.DRAW_CIRCLE,iscopy=False) self.displayImage() elif q.text() == "图像分割": dialog = SegmentationDialog(self) result = dialog.exec_() if result == 0: return distanceTypeCB, maskSizeCB, thresholdRatioLine, kernelPath, openIterationsLine, dilateIterationsLine = dialog.getData() if distanceTypeCB == "DIST_L1": distanceType = huicui.DIST_L1 elif distanceTypeCB == "DIST_L2": distanceType = huicui.DIST_L2 elif distanceTypeCB == "DIST_C": distanceType = huicui.DIST_C elif distanceTypeCB == "DIST_L12": distanceType = huicui.DIST_L12 elif distanceTypeCB == "DIST_FAIR": distanceType = huicui.DIST_FAIR elif distanceTypeCB == "DIST_WELSCH": distanceType = huicui.DIST_WELSCH else: distanceType = huicui.DIST_HUBER if maskSizeCB == "DIST_MASK_3": maskSize = huicui.DIST_MASK_3 elif maskSizeCB == "DIST_MASK_5": maskSize = huicui.DIST_MASK_5 else: maskSize = huicui.DIST_MASK_PRECISE if is_number(thresholdRatioLine) and is_number(openIterationsLine) and is_number(dilateIterationsLine): kernel = np.loadtxt(kernelPath, delimiter=' ') self.imageOperation.imageSegmentation(self.image, distanceType, maskSize, float(thresholdRatioLine), kernel, int(openIterationsLine), int(dilateIterationsLine)) self.displayImage() elif q.text() == "前景提取": dialog = ForegroundExtractionDialog(self) result = dialog.exec_() if result == 0: return maskPath, rectLine, iterCountLine = dialog.getData() maskImg = Image(fileName=maskPath) rect = np.fromstring(rectLine[1:-1], sep=',') if is_number(iterCountLine): self.imageOperation.GrabCut(self.image, maskImg, rect, int(iterCountLine)) self.displayImage() elif q.text() == "梯度呈现": dialog = GradientDialog(self) result = dialog.exec_() if result == 0: return gradientTypeCB, ddepthCB, dxLine, dyLine = dialog.getData() if gradientTypeCB == "Sobel算子": gradientType = huicui.GRADIENT_SOBEL elif gradientTypeCB == "Scharr算子": gradientType = huicui.GRADIENT_SCHARR else: gradientType = huicui.GRADIENT_LAPLACIAN if ddepthCB == "CV_8U": ddepth = huicui.CV_8U elif ddepthCB == "CV_16U": ddepth = huicui.CV_16U elif ddepthCB == "CV_32F": ddepth = huicui.CV_32F else: ddepth = huicui.CV_64F if is_number(dxLine) and is_number(dyLine): self.imageOperation.gradient(gradientType, self.image, ddepth, int(dxLine), int(dyLine)) self.displayImage() else: print("最后了") pass def cameraOpen(self): """ 打开摄像头 :return: """ MainWindow.isClose = False self.isCamera = True self.cap = cv2.VideoCapture(0) self.Display() # th = threading.Thread(target=self.Display) # th.start() def captureVideo(self): """ 捕获摄像头图片 :return: """ MainWindow.isCapture = True def closeCamera(self): """ 关闭/开启摄像头 :return: """ if MainWindow.isClose is False: self.closeButton.setText("开始播放") MainWindow.isClose = True else: self.closeButton.setText("暂停播放") MainWindow.isClose = False def Display(self): """ 播放视频 :return: """ self.figure.clear() plt.subplot(111) while self.cap.isOpened(): success, frame = self.cap.read() # RGB转BGR if success: # frameCopy = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # img = QImage(frameCopy.data, frameCopy.shape[1], frameCopy.shape[0], QImage.Format_RGB888) # self.videoLabel.setPixmap(QPixmap.fromImage(img).scaled(800, 500)) # plt.imshow(frameCopy) # plt.axis('off') # self.canvas.draw() # time.sleep(0.01) cv2.imshow('frame', frame) if self.isCamera: c = cv2.waitKey(1) if c == 27: break else: cv2.waitKey(int(1000 / self.frameRate)) if MainWindow.isCapture is True: self.image.setAttr(mat=frame) self.imageList[MainWindow.currentIndex] = self.image frameCopy = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) plt.imshow(frameCopy) plt.axis('off') self.canvas.draw() MainWindow.isCapture = False break # 判断关闭事件是否已触发 if MainWindow.isClose is True: # 关闭事件置为未触发,清空显示label print("摄像头即将关闭") # self.stopEvent.clear() # self.ui.DispalyLabel.clear() break self.cap.release() cv2.destroyAllWindows() def selectionchange(self, index): """ ComboBox下拉栏值改变引发操作 :param index: :return: """ MainWindow.currentIndex = index if MainWindow.selection is False: MainWindow.selection = True return image = self.imageList[index] self.image = image self.displayImage() def displayImage(self): try: frame = self.image.getMat() if len(frame.shape) == huicui.THREE_CHANNEL: imageCopy = self.image.cvtColor(huicui.COLOR_BGR2RGB) frame = imageCopy.getMat() plt.imshow(frame) else: plt.imshow(frame, cmap='gray') plt.axis('off') self.canvas.draw() except Exception: self.figure.clear() plt.subplot(111) self.figure.patch.set_facecolor('#111111') self.canvas.draw() def transform(self, data, shape): data = data.replace("[", ",") data = data.replace("]", ",") datalist = data.split(",") while "" in datalist: datalist.remove("") x = np.array(datalist, dtype=np.float32) x = x.reshape(shape) return x def previewContour(self, contourIndex): contour = Contour(img=self.image, contour=self.contours[contourIndex]) image = contour.drawContour(color=(255, 255, 255), thickness=5) image.showImg("contourDemo") helper.waitKey() def is_number(s): """ 是否为数字 :param s: :return: """ try: float(s) return True except ValueError: pass return False def main(): app = QApplication(sys.argv) qssStyle = CommonHelper.readQSS('/Users/zhoujiahao/Downloads/graduate/VisualPlatform/style.qss') ex = MainWindow() ex.setStyleSheet(qssStyle) ex.show() sys.exit(app.exec_()) if __name__ == '__main__': main()
task.py
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors # For information on the respective copyright owner see the NOTICE file # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import datetime import subprocess import psutil import signal from time import sleep, time from threading import Timer, Thread from ..log import create_logger, get_log_dir class Task(object): def __init__(self, task_name, command, params, config, has_gazebo=False, type=None, required=False, process_timeout=None, task_killed_callback=None, stage=None, output_log_dir=None, simulation_timeout=None): assert isinstance(task_name, str) assert len(task_name) > 0 assert callable(task_killed_callback), 'Callback function is not ' \ 'callable' assert isinstance(has_gazebo, bool), 'Invalid has_gazebo flag' if simulation_timeout is not None: assert simulation_timeout >= 0, 'Simulation timeout must ' \ 'be equal or greater than zero' self._stage = stage self._task_name = task_name self._logger = create_logger('task_{}'.format(task_name), output_dir=output_log_dir) self._has_gazebo = has_gazebo self._process_config = config self._command = command self._type = type self._is_running = False self._log_filename = None self._required = required self._task_killed_callback = task_killed_callback self._process_timeout = None self._process_timer = None self._process_monitor = None self._simulation_timeout = simulation_timeout # Create the log directory, if it doesn't exist already self._log_dir = get_log_dir() if output_log_dir is None else output_log_dir if not os.path.isdir(self._log_dir): os.makedirs(self._log_dir) cmd_elems = self._command.split() if cmd_elems[0] == 'roslaunch': self._type = 'roslaunch' elif cmd_elems[0] == 'rosrun': self._type = 'rosrun' if self._type == 'roslaunch' and len(params): # For roslaunch no string substitution is necessary, the parameters # included in the params dict will be added to the command line # as # param_key:=param_value for param_name in params: self._command += ' {}:='.format(param_name) if isinstance(params[param_name], bool): self._command += '{}'.format( 'true' if params[param_name] else 'false') else: self._command += '{}'.format(params[param_name]) else: # For all other cases, if there are any parameters, it is expected # that the parameter can be found in the command string as # {param_name} # and they will then be replaced by the assigned value for param_name in params: self._command = self._command.replace( '{' + param_name +'}', str(params[param_name])) self._logger.info('Task created') self._logger.info('\tName: {}'.format(self._task_name)) self._logger.info('\tCommand: {}'.format(self._command)) self._logger.info('\tROS network configuration: {}'.format(self._process_config)) self._logger.info('\tRuns Gazebo?: {}'.format(self._has_gazebo)) self._logger.info('\tType: {}'.format(self._type)) self._logger.info('\tRequired task? {}'.format(self._required)) if process_timeout is not None: if process_timeout > 0: self._process_timeout = process_timeout self._logger.info('\tProcess timeout: {} seconds'.format( self._process_timeout)) self._process = None self._process_children = None def __del__(self): self._logger.info('Deleting task <{}>'.format(self._task_name)) self.kill() @property def has_gazebo(self): return self._has_gazebo @property def log_filename(self): return self._log_filename @property def log_dir(self): return self._log_dir @property def required(self): return self._required @property def process_timeout(self): return self._process_timeout @property def simulation_timeout(self): return self._simulation_timeout def _process_monitor(self): while self.is_running() and self._process is not None: self._logger.info('process monitor...') sleep(1) self.kill() def _simulation_timeout(self): self._logger.info( 'Applying network configuration for simulation timeout') self._process_config.set_env_variables() start_process_timeout = time() clock_time = 0 while self.is_running() and self._process is not None: self._logger.info('Simulation timeout') def _on_terminate(self, process): try: if psutil.pid_exists(process.pid): self._logger.warning('Process {} <{}> terminated with exit' ' code {}'.format(process.pid, process.name(), process.returncode)) else: self._logger.warning('Process {} already ' 'terminated'.format(process.pid)) except Exception as e: self._logger.error('Error in on_terminate function, message=' + str(e)) def is_running(self): if self._process is None: return False for proc in self.get_all_processes(): if not psutil.pid_exists(proc.pid): self._logger.info('Task {} is not running'.format(self._task_name)) return False return True def kill(self): if self._process is None: self._logger.warning('Task <{}> - Process object is invalid'.format(self._task_name)) return if len(self._process_children) == 0: self._logger.warning('Task <{}> - No children processes found'.format(self._task_name)) return try: self._logger.warning('Task <{}> - Killing process tree...'.format(self._task_name)) for p in self.get_all_processes(): if psutil.pid_exists(p.pid): self._logger.warning('Sending SIGINT to child ' 'process id=%d', p.pid) p.send_signal(signal.SIGINT) if not psutil.pid_exists(p.pid): self._logger.warning('Child process %d ' 'successfully terminated', p.pid) else: self._logger.warning('Child process %d still ' 'running', p.pid) else: self._logger.warning('Child process %d is not alive', p.pid) gone, alive = psutil.wait_procs( self.get_all_processes(), timeout=None, callback=self._on_terminate) self._logger.warning( 'Kill processes=\n\t - Gone={}\n\t - ' 'Alive{}'.format(str(gone), str(alive))) self._process_timeout_triggered = True self._logger.info('Task <{}> - PROCESS TIMEOUT - finishing process...'.format(self._task_name)) except Exception as ex: self._logger.warning('Error occurred while killing processes, ' 'message=%s' % str(ex)) self._process = None self._process_children = None self._logger.info('Task <{}> - Process objects were reset'.format(self._task_name)) if callable(self._task_killed_callback): self._logger.info('Calling task <{}> end callback function'.format(self._task_name)) self._task_killed_callback(self._task_name) self._logger.info('Task <{}> - Callback finished'.format(self._task_name)) self._logger.info('Task <{}> - Processes finished'.format(self._task_name)) def wait(self, timeout=None): if self._process is not None: try: exit_code = self._process.wait(timeout) self._logger.info('Task <{}> finished, exit_code={}'.format(self._task_name, exit_code)) return True except psutil.TimeoutExpired as ex: self._logger.info('Task <{}> still running'.format(self._task_name)) return False return False def get_all_processes(self): proc = psutil.Process(self._process.pid) process_children = proc.children(recursive=True) process_children.append(proc) return process_children def has_terminated(self): if self._process is not None: process_children = self.get_all_processes() for p in process_children: if not p.is_running(): return False return True def run(self): if self.is_running(): self._logger.warning('Task is already running') return # Create a log directory for this task's logs task_log_dir = os.path.join(self._log_dir, self._task_name) if not os.path.isdir(task_log_dir): os.makedirs(task_log_dir) # Create file for log timestamp = datetime.datetime.now().isoformat() timestamp = timestamp.replace(':', '_') self._log_filename = os.path.join( task_log_dir, '{}_process_log_{}.log'.format(timestamp, self._task_name)) logfile = open(self._log_filename, 'a') env_variables = self._process_config.get_env_variables() # Set ROS_HOME to store the log files from the ROS processes env_variables['ROS_HOME'] = os.path.join(task_log_dir, 'ros') if not os.path.isdir(env_variables['ROS_HOME']): os.makedirs(env_variables['ROS_HOME']) self._logger.info('Running command=' + self._command) self._logger.info('ROS network configuration=' + str(self._process_config)) self._logger.info('Process log file=' + self._log_filename) self._logger.info('Directory for log files=' + self._log_dir) if self._process_timeout is not None: self._logger.info('Process timeout={} seconds'.format(self._process_timeout)) self._process_timer = Timer(self._process_timeout, self.kill) self._process = psutil.Popen( self._command, shell=True, stdout=logfile, stderr=logfile, env=env_variables) # Get the process instance proc = psutil.Process(self._process.pid) # Get all the children processes self._process_children = proc.children(recursive=True) self._process_children.append(proc) if self._process_timer is not None: self._process_timer.start() else: self._logger.info('Starting process monitor') self._process_monitor = Thread(target=self._process_monitor) self._process_monitor.daemon = True self._process_monitor.run() self._logger.info('Process created (Name={}, PID={})'.format( proc.name(), proc.pid))
test_urllib.py
"""Regresssion tests for urllib""" import urllib.parse import urllib.request import urllib.error import http.client import email.message import io import unittest from unittest.mock import patch from test import support import os try: import ssl except ImportError: ssl = None import sys import tempfile from nturl2path import url2pathname, pathname2url from base64 import b64encode import collections def hexescape(char): """Escape char as RFC 2396 specifies""" hex_repr = hex(ord(char))[2:].upper() if len(hex_repr) == 1: hex_repr = "0%s" % hex_repr return "%" + hex_repr # Shortcut for testing FancyURLopener _urlopener = None def urlopen(url, data=None, proxies=None): """urlopen(url [, data]) -> open file-like object""" global _urlopener if proxies is not None: opener = urllib.request.FancyURLopener(proxies=proxies) elif not _urlopener: with support.check_warnings( ('FancyURLopener style of invoking requests is deprecated.', DeprecationWarning)): opener = urllib.request.FancyURLopener() _urlopener = opener else: opener = _urlopener if data is None: return opener.open(url) else: return opener.open(url, data) def fakehttp(fakedata): class FakeSocket(io.BytesIO): io_refs = 1 def sendall(self, data): FakeHTTPConnection.buf = data def makefile(self, *args, **kwds): self.io_refs += 1 return self def read(self, amt=None): if self.closed: return b"" return io.BytesIO.read(self, amt) def readline(self, length=None): if self.closed: return b"" return io.BytesIO.readline(self, length) def close(self): self.io_refs -= 1 if self.io_refs == 0: io.BytesIO.close(self) class FakeHTTPConnection(http.client.HTTPConnection): # buffer to store data for verification in urlopen tests. buf = None fakesock = FakeSocket(fakedata) def connect(self): self.sock = self.fakesock return FakeHTTPConnection class FakeHTTPMixin(object): def fakehttp(self, fakedata): self._connection_class = http.client.HTTPConnection http.client.HTTPConnection = fakehttp(fakedata) def unfakehttp(self): http.client.HTTPConnection = self._connection_class class FakeFTPMixin(object): def fakeftp(self): class FakeFtpWrapper(object): def __init__(self, user, passwd, host, port, dirs, timeout=None, persistent=True): pass def retrfile(self, file, type): return io.BytesIO(), 0 def close(self): pass self._ftpwrapper_class = urllib.request.ftpwrapper urllib.request.ftpwrapper = FakeFtpWrapper def unfakeftp(self): urllib.request.ftpwrapper = self._ftpwrapper_class class urlopen_FileTests(unittest.TestCase): """Test urlopen() opening a temporary file. Try to test as much functionality as possible so as to cut down on reliance on connecting to the Net for testing. """ def setUp(self): # Create a temp file to use for testing self.text = bytes("test_urllib: %s\n" % self.__class__.__name__, "ascii") f = open(support.TESTFN, 'wb') try: f.write(self.text) finally: f.close() self.pathname = support.TESTFN self.returned_obj = urlopen("file:%s" % self.pathname) def tearDown(self): """Shut down the open object""" self.returned_obj.close() os.remove(support.TESTFN) def test_interface(self): # Make sure object returned by urlopen() has the specified methods for attr in ("read", "readline", "readlines", "fileno", "close", "info", "geturl", "getcode", "__iter__"): self.assertTrue(hasattr(self.returned_obj, attr), "object returned by urlopen() lacks %s attribute" % attr) def test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual(b'', self.returned_obj.readline(), "calling readline() after exhausting the file did not" " return an empty string") def test_readlines(self): lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, "readlines() returned the wrong number of lines") self.assertEqual(lines_list[0], self.text, "readlines() returned improper text") def test_fileno(self): file_num = self.returned_obj.fileno() self.assertIsInstance(file_num, int, "fileno() did not return an int") self.assertEqual(os.read(file_num, len(self.text)), self.text, "Reading on the file descriptor returned by fileno() " "did not return the expected text") def test_close(self): # Test close() by calling it here and then having it be called again # by the tearDown() method for the test self.returned_obj.close() def test_info(self): self.assertIsInstance(self.returned_obj.info(), email.message.Message) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_getcode(self): self.assertIsNone(self.returned_obj.getcode()) def test_iter(self): # Test iterator # Don't need to count number of iterations since test would fail the # instant it returned anything beyond the first line from the # comparison. # Use the iterator in the usual implicit way to test for ticket #4608. for line in self.returned_obj: self.assertEqual(line, self.text) def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname) class ProxyTests(unittest.TestCase): def setUp(self): # Records changes to env vars self.env = support.EnvironmentVarGuard() # Delete all proxy related env vars for k in list(os.environ): if 'proxy' in k.lower(): self.env.unset(k) def tearDown(self): # Restore all proxy related env vars self.env.__exit__() del self.env def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost') proxies = urllib.request.getproxies_environment() # getproxies_environment use lowered case truncated (no '_proxy') keys self.assertEqual('localhost', proxies['no']) # List of no_proxies with space. self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin): """Test urlopen() opening a fake http connection.""" def check_read(self, ver): self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!") try: fp = urlopen("http://python.org/") self.assertEqual(fp.readline(), b"Hello!") self.assertEqual(fp.readline(), b"") self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_url_fragment(self): # Issue #11703: geturl() omits fragments in the original URL. url = 'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!") try: fp = urllib.request.urlopen(url) self.assertEqual(fp.geturl(), url) finally: self.unfakehttp() def test_willclose(self): self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!") try: resp = urlopen("http://www.python.org") self.assertTrue(resp.fp.will_close) finally: self.unfakehttp() def test_read_0_9(self): # "0.9" response accepted (but not "simple responses" without # a status line) self.check_read(b"0.9") def test_read_1_0(self): self.check_read(b"1.0") def test_read_1_1(self): self.check_read(b"1.1") def test_read_bogus(self): # urlopen() should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 401 Authentication Required Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Type: text/html; charset=iso-8859-1 ''') try: self.assertRaises(OSError, urlopen, "http://python.org/") finally: self.unfakehttp() def test_invalid_redirect(self): # urlopen() should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 302 Found Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license Connection: close Content-Type: text/html; charset=iso-8859-1 ''') try: self.assertRaises(urllib.error.HTTPError, urlopen, "http://python.org/") finally: self.unfakehttp() def test_empty_socket(self): # urlopen() raises OSError if the underlying socket does not send any # data. (#1680230) self.fakehttp(b'') try: self.assertRaises(OSError, urlopen, "http://something") finally: self.unfakehttp() def test_missing_localfile(self): # Test for #10836 with self.assertRaises(urllib.error.URLError) as e: urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason) def test_file_notexists(self): fd, tmp_file = tempfile.mkstemp() tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/') try: self.assertTrue(os.path.exists(tmp_file)) with urlopen(tmp_fileurl) as fobj: self.assertTrue(fobj) finally: os.close(fd) os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) with self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl) def test_ftp_nohost(self): test_ftp_url = 'ftp:///path' with self.assertRaises(urllib.error.URLError) as e: urlopen(test_ftp_url) self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) def test_ftp_nonexisting(self): with self.assertRaises(urllib.error.URLError) as e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) @patch.object(urllib.request, 'MAXFTPCACHE', 0) def test_ftp_cache_pruning(self): self.fakeftp() try: urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, []) urlopen('ftp://localhost') finally: self.unfakeftp() def test_userpass_inurl(self): self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!") try: fp = urlopen("http://user:pass@python.org/") self.assertEqual(fp.readline(), b"Hello!") self.assertEqual(fp.readline(), b"") self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_userpass_inurl_w_spaces(self): self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!") try: userpass = "a b:c d" url = "http://{}@python.org/".format(userpass) fakehttp_wrapper = http.client.HTTPConnection authorization = ("Authorization: Basic %s\r\n" % b64encode(userpass.encode("ASCII")).decode("ASCII")) fp = urlopen(url) # The authorization header must be in place self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8")) self.assertEqual(fp.readline(), b"Hello!") self.assertEqual(fp.readline(), b"") # the spaces are quoted in URL so no match self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_URLopener_deprecation(self): with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() @unittest.skipUnless(ssl, "ssl module required") def test_cafile_and_context(self): context = ssl.create_default_context() with self.assertRaises(ValueError): urllib.request.urlopen( "https://localhost", cafile="/nonexistent/path", context=context ) class urlopen_DataTests(unittest.TestCase): """Test urlopen() opening a data URL.""" def setUp(self): # text containing URL special- and unicode-characters self.text = "test data URLs :;,%=& \u00f6 \u00c4 " # 2x1 pixel RGB PNG image with one black and one white pixel self.image = ( b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00' b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae' b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00' b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82') self.text_url = ( "data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3" "D%26%20%C3%B6%20%C3%84%20") self.text_url_base64 = ( "data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs" "sJT0mIPYgxCA%3D") # base64 encoded data URL that contains ignorable spaces, # such as "\n", " ", "%0A", and "%20". self.image_url = ( "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n" "QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 " "vHgAAAABJRU5ErkJggg%3D%3D%0A%20") self.text_url_resp = urllib.request.urlopen(self.text_url) self.text_url_base64_resp = urllib.request.urlopen( self.text_url_base64) self.image_url_resp = urllib.request.urlopen(self.image_url) def test_interface(self): # Make sure object returned by urlopen() has the specified methods for attr in ("read", "readline", "readlines", "close", "info", "geturl", "getcode", "__iter__"): self.assertTrue(hasattr(self.text_url_resp, attr), "object returned by urlopen() lacks %s attribute" % attr) def test_info(self): self.assertIsInstance(self.text_url_resp.info(), email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain', ''), ('charset', 'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image))) self.assertEqual(urllib.request.urlopen("data:,").info().get_params(), [('text/plain', ''), ('charset', 'US-ASCII')]) def test_geturl(self): self.assertEqual(self.text_url_resp.geturl(), self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(), self.image_url) def test_read_text(self): self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']), self.text) def test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']), self.text) def test_read_image(self): self.assertEqual(self.image_url_resp.read(), self.image) def test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def test_invalid_base64_data(self): # missing padding character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class urlretrieve_FileTests(unittest.TestCase): """Test urllib.urlretrieve() on local files""" def setUp(self): # Create a list of temporary files. Each item in the list is a file # name (absolute path or relative to the current working directory). # All files in this list will be deleted in the tearDown method. Note, # this only helps to makes sure temporary files get deleted, but it # does nothing about trying to close files that may still be open. It # is the responsibility of the developer to properly close files even # when exceptional conditions occur. self.tempFiles = [] # Create a temporary file. self.registerFileForCleanUp(support.TESTFN) self.text = b'testing urllib.urlretrieve' try: FILE = open(support.TESTFN, 'wb') FILE.write(self.text) FILE.close() finally: try: FILE.close() except: pass def tearDown(self): # Delete the temporary files. for each in self.tempFiles: try: os.remove(each) except: pass def constructLocalFileUrl(self, filePath): filePath = os.path.abspath(filePath) try: filePath.encode("utf-8") except UnicodeEncodeError: raise unittest.SkipTest("filePath is not encodable to utf8") return "file://%s" % urllib.request.pathname2url(filePath) def createNewTempFile(self, data=b""): """Creates a new temporary file containing the specified data, registers the file for deletion during the test fixture tear down, and returns the absolute path of the file.""" newFd, newFilePath = tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd, "wb") newFile.write(data) newFile.close() finally: try: newFile.close() except: pass return newFilePath def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def test_basic(self): # Make sure that a local file just gets its own location returned and # a headers value is returned. result = urllib.request.urlretrieve("file:%s" % support.TESTFN) self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1], email.message.Message, "did not get a email.message.Message instance " "as second returned value") def test_copy(self): # Test that setting the filename argument works. second_temp = "%s.2" % support.TESTFN self.registerFileForCleanUp(second_temp) result = urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN), second_temp) self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp), "copy of the file was not " "made") FILE = open(second_temp, 'rb') try: text = FILE.read() FILE.close() finally: try: FILE.close() except: pass self.assertEqual(self.text, text) def test_reporthook(self): # Make sure that the reporthook works. def hooktester(block_count, block_read_size, file_size, count_holder=[0]): self.assertIsInstance(block_count, int) self.assertIsInstance(block_read_size, int) self.assertIsInstance(file_size, int) self.assertEqual(block_count, count_holder[0]) count_holder[0] = count_holder[0] + 1 second_temp = "%s.2" % support.TESTFN self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN), second_temp, hooktester) def test_reporthook_0_bytes(self): # Test on zero length file. Should call reporthook only 1 time. report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self): # Test on 5 byte file. Should call reporthook only 2 times (once when # the "network connection" is established and once when the block is # read). report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b"x" * 5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][2], 5) self.assertEqual(report[1][2], 5) def test_reporthook_8193_bytes(self): # Test on 8193 byte file. Should call reporthook only 3 times (once # when the "network connection" is established, once for the next 8192 # bytes, and once for the last byte). report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b"x" * 8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][2], 8193) self.assertEqual(report[0][1], 8192) self.assertEqual(report[1][1], 8192) self.assertEqual(report[2][1], 8192) class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): """Test urllib.urlretrieve() using fake http connections""" def test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') def _reporthook(par1, par2, par3): pass with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve('http://example.com/', reporthook=_reporthook) finally: self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve('http://example.com/') finally: self.unfakehttp() class QuotingTests(unittest.TestCase): """Tests for urllib.quote() and urllib.quote_plus() According to RFC 2396 (Uniform Resource Identifiers), to escape a character you write it as '%' + <2 character US-ASCII hex value>. The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly. Case does not matter on the hex letters. The various character sets specified are: Reserved characters : ";/?:@&=+$," Have special meaning in URIs and must be escaped if not being used for their special meaning Data characters : letters, digits, and "-_.!~*'()" Unreserved and do not need to be escaped; can be, though, if desired Control characters : 0x00 - 0x1F, 0x7F Have no use in URIs so must be escaped space : 0x20 Must be escaped Delimiters : '<>#%"' Must be escaped Unwise : "{}|\^[]`" Must be escaped """ def test_never_quote(self): # Make sure quote() does not quote letters, digits, and "_,.-" do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz", "0123456789", "_.-"]) result = urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote, result, "using quote(): %r != %r" % (do_not_quote, result)) result = urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, "using quote_plus(): %r != %r" % (do_not_quote, result)) def test_default_safe(self): # Test '/' is default value for 'safe' parameter self.assertEqual(urllib.parse.quote.__defaults__[0], '/') def test_safe(self): # Test setting 'safe' parameter does what it should do quote_by_default = "<>" result = urllib.parse.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, "using quote(): %r != %r" % (quote_by_default, result)) result = urllib.parse.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, "using quote_plus(): %r != %r" % (quote_by_default, result)) # Safe expressed as bytes rather than str result = urllib.parse.quote(quote_by_default, safe=b"<>") self.assertEqual(quote_by_default, result, "using quote(): %r != %r" % (quote_by_default, result)) # "Safe" non-ASCII characters should have no effect # (Since URIs are not allowed to have non-ASCII characters) result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc") expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Same as above, but using a bytes rather than str result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc") expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) def test_default_quoting(self): # Make sure all characters that should be quoted are by default sans # space (separate test for that). should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F should_quote.append('<>#%"{}|\^[]`') should_quote.append(chr(127)) # For 0x7F should_quote = ''.join(should_quote) for char in should_quote: result = urllib.parse.quote(char) self.assertEqual(hexescape(char), result, "using quote(): " "%s should be escaped to %s, not %s" % (char, hexescape(char), result)) result = urllib.parse.quote_plus(char) self.assertEqual(hexescape(char), result, "using quote_plus(): " "%s should be escapes to %s, not %s" % (char, hexescape(char), result)) del should_quote partial_quote = "ab[]cd" expected = "ab%5B%5Dcd" result = urllib.parse.quote(partial_quote) self.assertEqual(expected, result, "using quote(): %r != %r" % (expected, result)) result = urllib.parse.quote_plus(partial_quote) self.assertEqual(expected, result, "using quote_plus(): %r != %r" % (expected, result)) def test_quoting_space(self): # Make sure quote() and quote_plus() handle spaces as specified in # their unique way result = urllib.parse.quote(' ') self.assertEqual(result, hexescape(' '), "using quote(): %r != %r" % (result, hexescape(' '))) result = urllib.parse.quote_plus(' ') self.assertEqual(result, '+', "using quote_plus(): %r != +" % result) given = "a b cd e f" expect = given.replace(' ', hexescape(' ')) result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) expect = given.replace(' ', '+') result = urllib.parse.quote_plus(given) self.assertEqual(expect, result, "using quote_plus(): %r != %r" % (expect, result)) def test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma') # Test with bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), 'alpha%2Bbeta+gamma') # Test with safe bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'), 'alpha+beta+gamma') def test_quote_bytes(self): # Bytes should quote directly to percent-encoded values given = b"\xa2\xd8ab\xff" expect = "%A2%D8ab%FF" result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Encoding argument should raise type error on bytes input self.assertRaises(TypeError, urllib.parse.quote, given, encoding="latin-1") # quote_from_bytes should work the same result = urllib.parse.quote_from_bytes(given) self.assertEqual(expect, result, "using quote_from_bytes(): %r != %r" % (expect, result)) def test_quote_with_unicode(self): # Characters in Latin-1 range, encoded by default in UTF-8 given = "\xa2\xd8ab\xff" expect = "%C2%A2%C3%98ab%C3%BF" result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in Latin-1 range, encoded by with None (default) result = urllib.parse.quote(given, encoding=None, errors=None) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in Latin-1 range, encoded with Latin-1 given = "\xa2\xd8ab\xff" expect = "%A2%D8ab%FF" result = urllib.parse.quote(given, encoding="latin-1") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in BMP, encoded by default in UTF-8 given = "\u6f22\u5b57" # "Kanji" expect = "%E6%BC%A2%E5%AD%97" result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in BMP, encoded with Latin-1 given = "\u6f22\u5b57" self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given, encoding="latin-1") # Characters in BMP, encoded with Latin-1, with replace error handling given = "\u6f22\u5b57" expect = "%3F%3F" # "??" result = urllib.parse.quote(given, encoding="latin-1", errors="replace") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in BMP, Latin-1, with xmlcharref error handling given = "\u6f22\u5b57" expect = "%26%2328450%3B%26%2323383%3B" # "&#28450;&#23383;" result = urllib.parse.quote(given, encoding="latin-1", errors="xmlcharrefreplace") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) def test_quote_plus_with_unicode(self): # Encoding (latin-1) test for quote_plus given = "\xa2\xd8 \xff" expect = "%A2%D8+%FF" result = urllib.parse.quote_plus(given, encoding="latin-1") self.assertEqual(expect, result, "using quote_plus(): %r != %r" % (expect, result)) # Errors test for quote_plus given = "ab\u6f22\u5b57 cd" expect = "ab%3F%3F+cd" result = urllib.parse.quote_plus(given, encoding="latin-1", errors="replace") self.assertEqual(expect, result, "using quote_plus(): %r != %r" % (expect, result)) class UnquotingTests(unittest.TestCase): """Tests for unquote() and unquote_plus() See the doc string for quoting_Tests for details on quoting and such. """ def test_unquoting(self): # Make sure unquoting of all ASCII values works escape_list = [] for num in range(128): given = hexescape(chr(num)) expect = chr(num) result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %r != %r" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result = urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using unquote(): not all characters escaped: " "%s" % result) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ()) with support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'') def test_unquoting_badpercent(self): # Test unquoting on bad percent-escapes given = '%xab' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) given = '%x' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) given = '%' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # unquote_to_bytes given = '%xab' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) given = '%x' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) given = '%' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ()) def test_unquoting_mixed_case(self): # Test unquoting on mixed-case hex digits in the percent-escapes given = '%Ab%eA' expect = b'\xab\xea' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) def test_unquoting_parts(self): # Make sure unquoting works when have non-quoted characters # interspersed given = 'ab%sd' % hexescape('c') expect = "abcd" result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %r != %r" % (expect, result)) def test_unquoting_plus(self): # Test difference between unquote() and unquote_plus() given = "are+there+spaces..." expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) expect = given.replace('+', ' ') result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %r != %r" % (expect, result)) def test_unquote_to_bytes(self): given = 'br%C3%BCckner_sapporo_20050930.doc' expect = b'br\xc3\xbcckner_sapporo_20050930.doc' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) # Test on a string with unescaped non-ASCII characters # (Technically an invalid URI; expect those characters to be UTF-8 # encoded). result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC") expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc" self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) # Test with a bytes as input given = b'%A2%D8ab%FF' expect = b'\xa2\xd8ab\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) # Test with a bytes as input, with unescaped non-ASCII bytes # (Technically an invalid URI; expect those bytes to be preserved) given = b'%A2\xd8ab%FF' expect = b'\xa2\xd8ab\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) def test_unquote_with_unicode(self): # Characters in the Latin-1 range, encoded with UTF-8 given = 'br%C3%BCckner_sapporo_20050930.doc' expect = 'br\u00fcckner_sapporo_20050930.doc' result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Characters in the Latin-1 range, encoded with None (default) result = urllib.parse.unquote(given, encoding=None, errors=None) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Characters in the Latin-1 range, encoded with Latin-1 result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding="latin-1") expect = 'br\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Characters in BMP, encoded with UTF-8 given = "%E6%BC%A2%E5%AD%97" expect = "\u6f22\u5b57" # "Kanji" result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Decode with UTF-8, invalid sequence given = "%F3%B1" expect = "\ufffd" # Replacement character result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Decode with UTF-8, invalid sequence, replace errors result = urllib.parse.unquote(given, errors="replace") self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Decode with UTF-8, invalid sequence, ignoring errors given = "%F3%B1" expect = "" result = urllib.parse.unquote(given, errors="ignore") self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # A mix of non-ASCII and percent-encoded characters, UTF-8 result = urllib.parse.unquote("\u6f22%C3%BC") expect = '\u6f22\u00fc' self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # A mix of non-ASCII and percent-encoded characters, Latin-1 # (Note, the string contains non-Latin-1-representable characters) result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1") expect = '\u6f22\u00fc' self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) class urlencode_Tests(unittest.TestCase): """Tests for urlencode()""" def help_inputtype(self, given, test_type): """Helper method for testing different input types. 'given' must lead to only the pairs: * 1st, 1 * 2nd, 2 * 3rd, 3 Test cannot assume anything about order. Docs make no guarantee and have possible dictionary input. """ expect_somewhere = ["1st=1", "2nd=2", "3rd=3"] result = urllib.parse.urlencode(given) for expected in expect_somewhere: self.assertIn(expected, result, "testing %s: %s not found in %s" % (test_type, expected, result)) self.assertEqual(result.count('&'), 2, "testing %s: expected 2 '&'s; got %s" % (test_type, result.count('&'))) amp_location = result.index('&') on_amp_left = result[amp_location - 1] on_amp_right = result[amp_location + 1] self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), "testing %s: '&' not located in proper place in %s" % (test_type, result)) self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps "testing %s: " "unexpected number of characters: %s != %s" % (test_type, len(result), (5 * 3) + 2)) def test_using_mapping(self): # Test passing in a mapping object as an argument. self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'}, "using dict as input type") def test_using_sequence(self): # Test passing in a sequence of two-item sequences as an argument. self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')], "using sequence of two-item tuples as input") def test_quoting(self): # Make sure keys and values are quoted using quote_plus() given = {"&":"="} expect = "%s=%s" % (hexescape('&'), hexescape('=')) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) given = {"key name":"A bunch of pluses"} expect = "key+name=A+bunch+of+pluses" result = urllib.parse.urlencode(given) self.assertEqual(expect, result) def test_doseq(self): # Test that passing True for 'doseq' parameter works correctly given = {'sequence':['1', '2', '3']} expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3'])) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) for value in given["sequence"]: expect = "sequence=%s" % value self.assertIn(expect, result) self.assertEqual(result.count('&'), 2, "Expected 2 '&'s, got %s" % result.count('&')) def test_empty_sequence(self): self.assertEqual("", urllib.parse.urlencode({})) self.assertEqual("", urllib.parse.urlencode([])) def test_nonstring_values(self): self.assertEqual("a=1", urllib.parse.urlencode({"a": 1})) self.assertEqual("a=None", urllib.parse.urlencode({"a": None})) def test_nonstring_seq_values(self): self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True)) self.assertEqual("a=None&a=a", urllib.parse.urlencode({"a": [None, "a"]}, True)) data = collections.OrderedDict([("a", 1), ("b", 1)]) self.assertEqual("a=a&a=b", urllib.parse.urlencode({"a": data}, True)) def test_urlencode_encoding(self): # ASCII encoding. Expect %3F with errors="replace' given = (('\u00a0', '\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace") self.assertEqual(expect, result) # Default is UTF-8 encoding. given = (('\u00a0', '\u00c1'),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) # Latin-1 encoding. given = (('\u00a0', '\u00c1'),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, encoding="latin-1") self.assertEqual(expect, result) def test_urlencode_encoding_doseq(self): # ASCII Encoding. Expect %3F with errors="replace' given = (('\u00a0', '\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, doseq=True, encoding="ASCII", errors="replace") self.assertEqual(expect, result) # ASCII Encoding. On a sequence of values. given = (("\u00a0", (1, "\u00c1")),) expect = '%3F=1&%3F=%3F' result = urllib.parse.urlencode(given, True, encoding="ASCII", errors="replace") self.assertEqual(expect, result) # Utf-8 given = (("\u00a0", "\u00c1"),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) given = (("\u00a0", (42, "\u00c1")),) expect = '%C2%A0=42&%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # latin-1 given = (("\u00a0", "\u00c1"),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, True, encoding="latin-1") self.assertEqual(expect, result) given = (("\u00a0", (42, "\u00c1")),) expect = '%A0=42&%A0=%C1' result = urllib.parse.urlencode(given, True, encoding="latin-1") self.assertEqual(expect, result) def test_urlencode_bytes(self): given = ((b'\xa0\x24', b'\xc1\x24'),) expect = '%A0%24=%C1%24' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # Sequence of values given = ((b'\xa0\x24', (42, b'\xc1\x24')),) expect = '%A0%24=42&%A0%24=%C1%24' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) def test_urlencode_encoding_safe_parameter(self): # Send '$' (\x24) as safe character # Default utf-8 encoding given = ((b'\xa0\x24', b'\xc1\x24'),) result = urllib.parse.urlencode(given, safe=":$") expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\xa0\x24', b'\xc1\x24'),) result = urllib.parse.urlencode(given, doseq=True, safe=":$") expect = '%A0$=%C1$' self.assertEqual(expect, result) # Safe parameter in sequence given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=":$") self.assertEqual(expect, result) # Test all above in latin-1 encoding given = ((b'\xa0\x24', b'\xc1\x24'),) result = urllib.parse.urlencode(given, safe=":$", encoding="latin-1") expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\xa0\x24', b'\xc1\x24'),) expect = '%A0$=%C1$' result = urllib.parse.urlencode(given, doseq=True, safe=":$", encoding="latin-1") given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=":$", encoding="latin-1") self.assertEqual(expect, result) class Pathname_Tests(unittest.TestCase): """Test pathname2url() and url2pathname()""" def test_basic(self): # Make sure simple tests pass expected_path = os.path.join("parts", "of", "a", "path") expected_url = "parts/of/a/path" result = urllib.request.pathname2url(expected_path) self.assertEqual(expected_url, result, "pathname2url() failed; %s != %s" % (result, expected_url)) result = urllib.request.url2pathname(expected_url) self.assertEqual(expected_path, result, "url2pathame() failed; %s != %s" % (result, expected_path)) def test_quoting(self): # Test automatic quoting and unquoting works for pathnam2url() and # url2pathname() respectively given = os.path.join("needs", "quot=ing", "here") expect = "needs/%s/here" % urllib.parse.quote("quot=ing") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) expect = given result = urllib.request.url2pathname(result) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result)) given = os.path.join("make sure", "using_quote") expect = "%s/using_quote" % urllib.parse.quote("make sure") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) given = "make+sure/using_unquote" expect = os.path.join("make+sure", "using_unquote") result = urllib.request.url2pathname(given) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result)) @unittest.skipUnless(sys.platform == 'win32', 'test specific to the urllib.url2path function.') def test_ntpath(self): given = ('/C:/', '///C:/', '/C|//') expect = 'C:\\' for url in given: result = urllib.request.url2pathname(url) self.assertEqual(expect, result, 'urllib.request..url2pathname() failed; %s != %s' % (expect, result)) given = '///C|/path' expect = 'C:\\path' result = urllib.request.url2pathname(given) self.assertEqual(expect, result, 'urllib.request.url2pathname() failed; %s != %s' % (expect, result)) class Utility_Tests(unittest.TestCase): """Testcase to test the various utility functions in the urllib.""" def test_splitpasswd(self): """Some of password examples are not sensible, but it is added to confirming to RFC2617 and addressing issue4675. """ self.assertEqual(('user', 'ab'),urllib.parse.splitpasswd('user:ab')) self.assertEqual(('user', 'a\nb'),urllib.parse.splitpasswd('user:a\nb')) self.assertEqual(('user', 'a\tb'),urllib.parse.splitpasswd('user:a\tb')) self.assertEqual(('user', 'a\rb'),urllib.parse.splitpasswd('user:a\rb')) self.assertEqual(('user', 'a\fb'),urllib.parse.splitpasswd('user:a\fb')) self.assertEqual(('user', 'a\vb'),urllib.parse.splitpasswd('user:a\vb')) self.assertEqual(('user', 'a:b'),urllib.parse.splitpasswd('user:a:b')) self.assertEqual(('user', 'a b'),urllib.parse.splitpasswd('user:a b')) self.assertEqual(('user 2', 'ab'),urllib.parse.splitpasswd('user 2:ab')) self.assertEqual(('user+1', 'a+b'),urllib.parse.splitpasswd('user+1:a+b')) def test_thishost(self): """Test the urllib.request.thishost utility function returns a tuple""" self.assertIsInstance(urllib.request.thishost(), tuple) class URLopener_Tests(unittest.TestCase): """Testcase to test the open method of URLopener class.""" def test_quoted_open(self): class DummyURLopener(urllib.request.URLopener): def open_spam(self, url): return url with support.check_warnings( ('DummyURLopener style of invoking requests is deprecated.', DeprecationWarning)): self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/') # test the safe characters are not quoted by urlopen self.assertEqual(DummyURLopener().open( "spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"), "//c:|windows%/:=&?~#+!$,;'@()*[]|/path/") # Just commented them out. # Can't really tell why keep failing in windows and sparc. # Everywhere else they work ok, but on those machines, sometimes # fail in one of the tests, sometimes in other. I have a linux, and # the tests go ok. # If anybody has one of the problematic environments, please help! # . Facundo # # def server(evt): # import socket, time # serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # serv.bind(("", 9093)) # serv.listen() # try: # conn, addr = serv.accept() # conn.send("1 Hola mundo\n") # cantdata = 0 # while cantdata < 13: # data = conn.recv(13-cantdata) # cantdata += len(data) # time.sleep(.3) # conn.send("2 No more lines\n") # conn.close() # except socket.timeout: # pass # finally: # serv.close() # evt.set() # # class FTPWrapperTests(unittest.TestCase): # # def setUp(self): # import ftplib, time, threading # ftplib.FTP.port = 9093 # self.evt = threading.Event() # threading.Thread(target=server, args=(self.evt,)).start() # time.sleep(.1) # # def tearDown(self): # self.evt.wait() # # def testBasic(self): # # connects # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # ftp.close() # # def testTimeoutNone(self): # # global default timeout is ignored # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutDefault(self): # # global default timeout is used # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutValue(self): # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [], # timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() class RequestTests(unittest.TestCase): """Unit tests for urllib.request.Request.""" def test_default_values(self): Request = urllib.request.Request request = Request("http://www.python.org") self.assertEqual(request.get_method(), 'GET') request = Request("http://www.python.org", {}) self.assertEqual(request.get_method(), 'POST') def test_with_method_arg(self): Request = urllib.request.Request request = Request("http://www.python.org", method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request("http://www.python.org", {}, method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request("http://www.python.org", method='GET') self.assertEqual(request.get_method(), 'GET') request.method = 'HEAD' self.assertEqual(request.get_method(), 'HEAD') class URL2PathNameTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(url2pathname("///C|"), 'C:') self.assertEqual(url2pathname("///C:"), 'C:') self.assertEqual(url2pathname("///C|/"), 'C:\\') def test_converting_when_no_drive_letter(self): # cannot end a raw string in \ self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\') self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\') def test_simple_compare(self): self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"), r'C:\foo\bar\spam.foo') def test_non_ascii_drive_letter(self): self.assertRaises(IOError, url2pathname, "///\u00e8|/") def test_roundtrip_url2pathname(self): list_of_paths = ['C:', r'\\\C\test\\', r'C:\foo\bar\spam.foo' ] for path in list_of_paths: self.assertEqual(url2pathname(pathname2url(path)), path) class PathName2URLTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(pathname2url("C:"), '///C:') self.assertEqual(pathname2url("C:\\"), '///C:') def test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r"\\\folder\test" "\\"), '/////folder/test/') self.assertEqual(pathname2url(r"\\folder\test" "\\"), '////folder/test/') self.assertEqual(pathname2url(r"\folder\test" "\\"), '/folder/test/') def test_simple_compare(self): self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'), "///C:/foo/bar/spam.foo" ) def test_long_drive_letter(self): self.assertRaises(IOError, pathname2url, "XX:\\") def test_roundtrip_pathname2url(self): list_of_paths = ['///C:', '/////folder/test/', '///C:/foo/bar/spam.foo'] for path in list_of_paths: self.assertEqual(pathname2url(url2pathname(path)), path) if __name__ == '__main__': unittest.main()
oscsrv.py
import liblo import threading class OSCsrv(object): def __init__(self, port=1234, queue=None): self.server = None self.port = port self.queue = queue try: self.server = liblo.Server(self.port) self.isrunning = True except liblo.ServerError as err: print(err) self.isrunning = False # sys.exit() # register method taking an int and a float # self.server.add_method("/mfcc", 'f' * 38, self.cb_mfcc) # self.server.add_method("/beat", 'f' * 3, self.cb_beat) self.server.add_method("/address", 'if', self.cb_address) self.server.add_method("/perspective", 'f', self.cb_perspective) self.server.add_method("/translate", 'fff', self.cb_translate) self.server.add_method("/scale", 'fff', self.cb_scale) self.server.add_method("/vert", 'fffffffffffff', self.cb_vert) # self.server.add_method("/load", 'i', self.cb_load) self.server.add_method("/load", 's', self.cb_load) self.server.add_method("/facecolor", 'ifff', self.cb_facecolor) self.callbacks = [] self.st = threading.Thread( target = self.run ) self.st.start() def cb_address(self, path, args): # print('received args {0}'.format(args)) self.address = args self.queue.put((path, args)) def cb_perspective(self, path, args): print('cb_perspective received args {0}'.format(args)) self.perspective = args self.queue.put((path, args)) def cb_translate(self, path, args): print('cb_translate received args {0}'.format(args)) self.translate = args self.queue.put((path, args)) def cb_scale(self, path, args): print('cb_scale received args {0}'.format(args)) self.scale = args self.queue.put((path, args)) def cb_vert(self, path, args): # print('received args {0}'.format(args)) self.vert = args self.queue.put((path, args)) def cb_facecolor(self, path, args): # print('cb_facecolor received facecolor args {0}'.format(args)) self.facecolor = args self.queue.put((path, args)) def cb_load(self, path, args): print('cb_load received args {0}'.format(args)) self.load = args self.queue.put((path, args)) # def add_callback(self, address, types, func): # self.callbacks.append((address, types, func)) # self.server.add_method(address, types, func) # def cb_mfcc(self, path, args): # # i, f = args # # print("received message '%s' with arguments '%d' and '%f'" % (path, i, f)) # self.queue.put(args) # # print('received args {0}'.format(args)) # def cb_beat(self, path, args): # print('got args = {0}'.format(args)) # self.queue.put(args) def run(self): # loop and dispatch messages every 100ms while self.isrunning: self.server.recv(100) print('oscsrv run terminating') def join(self): self.st.join()
test_workflow_event_processor.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import multiprocessing import unittest import unittest.mock as mock from multiprocessing import Queue, Value from multiprocessing.connection import Pipe import cloudpickle from ai_flow.meta.project_meta import ProjectMeta from ai_flow.workflow.status import Status from ai_flow.api.context_extractor import ContextExtractor, EventContext, Broadcast, ContextList from ai_flow.meta.workflow_meta import WorkflowMeta from ai_flow.scheduler_service.service.workflow_event_handler import WorkflowEventHandler from ai_flow.scheduler_service.service.workflow_execution_event_handler_state import WorkflowContextEventHandlerState from ai_flow.workflow.control_edge import WorkflowSchedulingRule, MeetAllEventCondition, WorkflowAction from notification_service.base_notification import BaseEvent from ai_flow.plugin_interface.scheduler_interface import Scheduler, WorkflowExecutionInfo from ai_flow.store.abstract_store import AbstractStore from ai_flow.scheduler_service.service.workflow_event_processor import WorkflowEventProcessor, Poison class MyContextExtractor(ContextExtractor): def extract_context(self, event: BaseEvent) -> EventContext: if event.event_type == 'exception': raise Exception() if event.event_type == 'broadcast': return Broadcast() context_list = ContextList() context_list.add_context(event.context) return context_list class TestWorkflowEventProcessor(unittest.TestCase): def setUp(self) -> None: self.mock_store: AbstractStore = mock.Mock() self.mock_scheduler: Scheduler = mock.Mock() self.mock_event_handler: WorkflowEventHandler = mock.Mock() def mock_event_handler_factory(scheduler_rule): return self.mock_event_handler self.c1, self.c2 = Pipe() self.processor = WorkflowEventProcessor(self.c1, self.mock_store, self.mock_scheduler, workflow_event_handler_factory=mock_event_handler_factory) self._prepare_workflows() def test_run_and_stop(self): import time self.call_cnt = Value('i', 0) def mock__process_event(*args, **kwargs): self.call_cnt.value += 1 self.processor._process_event = mock__process_event process = multiprocessing.Process(target=self.processor.run) process.start() event = BaseEvent('k', 'v', namespace='test_namespace') self.c2.send(event) time.sleep(1) self.assertEqual(1, self.call_cnt.value) self.c2.send(Poison()) process.join() def test__process_event(self): class WorkflowMetaMatcher: def __init__(self, project_id, name): self.project_id = project_id self.name = name def __eq__(self, other): if not isinstance(other, WorkflowMeta): return False return self.project_id == other.project_id and self.name == other.name with mock.patch.object(self.processor, '_handle_event_for_workflow') as handle_method, \ mock.patch.object(self.processor, '_update_workflow_last_event_version') as update_last_event_version: e = BaseEvent('k', 'v', namespace='test_namespace') self.processor._process_event(e) handle_method.assert_has_calls([mock.call('test_project1', WorkflowMetaMatcher(0, 'workflow1'), e), mock.call('test_project2', WorkflowMetaMatcher(1, 'workflow2'), e), mock.call('test_project2', WorkflowMetaMatcher(1, 'workflow3'), e)]) update_last_event_version.assert_has_calls( [mock.call('test_project1', WorkflowMetaMatcher(0, 'workflow1'), e), mock.call('test_project2', WorkflowMetaMatcher(1, 'workflow2'), e), mock.call('test_project2', WorkflowMetaMatcher(1, 'workflow3'), e)]) def _prepare_workflows(self): context_extractor = MyContextExtractor() rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'), WorkflowAction.STOP) rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'), WorkflowAction.START) rule2 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k2', 'v2', namespace='test_namespace'), WorkflowAction.START) w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor), scheduling_rules=[rule, rule1]) w2 = WorkflowMeta('workflow2', 1, context_extractor_in_bytes=cloudpickle.dumps(context_extractor), scheduling_rules=[rule, rule2]) w3 = WorkflowMeta('workflow3', 1, context_extractor_in_bytes=cloudpickle.dumps(context_extractor), scheduling_rules=[rule]) def mock_list_workflows(project_name): if project_name == 'test_project1': return [w1] elif project_name == 'test_project2': return [w2, w3] else: return None self.mock_store.list_workflows = mock_list_workflows p1 = ProjectMeta(name='test_project1', uri='dummy') p2 = ProjectMeta(name='test_project2', uri='dummy') self.mock_store.list_projects.return_value = [p1, p2] def test__get_workflow_execution_state_register_state_if_not_exist(self): state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1') self.mock_store.get_workflow_context_event_handler_state.return_value = None self.mock_store.register_workflow_context_event_handler_state.return_value = state context_list = ContextList() context_list.add_context('context_1') states = self.processor._get_workflow_execution_state(context_list, 'project', 'workflow1') self.assertEqual(1, len(states)) self.assertEqual(state, states[0]) self.mock_store.register_workflow_context_event_handler_state.assert_called_with('project', 'workflow1', 'context_1') def test__get_workflow_execution_state_with_context(self): state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1') self.mock_store.get_workflow_context_event_handler_state.return_value = state context_list = ContextList() context_list.add_context('context_1') context_list.add_context('context_2') states = self.processor._get_workflow_execution_state(context_list, 'project', 'workflow1') calls = [mock.call('project', 'workflow1', 'context_1'), mock.call('project', 'workflow1', 'context_2')] self.mock_store.get_workflow_context_event_handler_state.assert_has_calls(calls, any_order=True) self.assertEqual(2, len(states)) def test__get_workflow_execution_state_with_broadcast(self): state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1') self.mock_store.list_workflow_context_event_handler_states.return_value = [state] states = self.processor._get_workflow_execution_state(Broadcast(), 'project', 'workflow1') self.mock_store.list_workflow_context_event_handler_states.assert_called_with('project', 'workflow1') self.assertEqual(1, len(states)) self.assertEqual(state, states[0]) def test__handler_event_for_workflow_none_action(self): context_extractor = MyContextExtractor() rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'), WorkflowAction.START) rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'), WorkflowAction.START) w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor), scheduling_rules=[rule, rule1]) state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1') self.mock_store.get_workflow_context_event_handler_state.return_value = state self.mock_event_handler.handle_event.return_value = (WorkflowAction.NONE, 1) e = BaseEvent('k1', 'v1', namespace='test_namespace') self.processor._handle_event_for_workflow('project', w1, e) self.mock_store.update_workflow_context_event_handler_state \ .assert_called_with('project', 'workflow1', 'context_1', None, 1) def test__handler_event_for_workflow_start_action(self): context_extractor = MyContextExtractor() rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'), WorkflowAction.START) rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'), WorkflowAction.START) w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor), scheduling_rules=[rule, rule1]) state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1') self.mock_store.get_workflow_context_event_handler_state.return_value = state # Start Action self.mock_scheduler.start_new_workflow_execution.return_value = WorkflowExecutionInfo('execution_id') self.mock_event_handler.handle_event.return_value = (WorkflowAction.START, 1) e = BaseEvent('k1', 'v1', namespace='test_namespace') self.processor._handle_event_for_workflow('project', w1, e) self.mock_scheduler.start_new_workflow_execution.assert_called_with('project', 'workflow1', 'context_1') self.mock_store.update_workflow_context_event_handler_state \ .assert_called_with('project', 'workflow1', 'context_1', 'execution_id', 1) def test__handler_event_for_workflow_start_with_running_workflow_execution(self): context_extractor = MyContextExtractor() rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'), WorkflowAction.START) rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'), WorkflowAction.START) w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor), scheduling_rules=[rule, rule1]) state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1', '1') self.mock_store.get_workflow_context_event_handler_state.return_value = state # Start Action self.mock_scheduler.get_workflow_execution.return_value = WorkflowExecutionInfo('1', status=Status.RUNNING) self.mock_event_handler.handle_event.return_value = (WorkflowAction.START, 1) e = BaseEvent('k1', 'v1', namespace='test_namespace') self.processor._handle_event_for_workflow('project', w1, e) self.mock_scheduler.start_new_workflow_execution.assert_not_called() self.mock_store.update_workflow_context_event_handler_state \ .assert_called_with('project', 'workflow1', 'context_1', '1', 1) def test__handler_event_for_workflow_start_with_non_running_workflow_execution(self): context_extractor = MyContextExtractor() rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'), WorkflowAction.START) rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'), WorkflowAction.START) w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor), scheduling_rules=[rule, rule1]) state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1', '1') self.mock_store.get_workflow_context_event_handler_state.return_value = state # Start Action self.mock_scheduler.get_workflow_execution.return_value = WorkflowExecutionInfo('1', status=Status.FINISHED) self.mock_scheduler.start_new_workflow_execution.return_value = WorkflowExecutionInfo('execution_id') self.mock_event_handler.handle_event.return_value = (WorkflowAction.START, 1) e = BaseEvent('k1', 'v1', namespace='test_namespace') self.processor._handle_event_for_workflow('project', w1, e) self.mock_scheduler.start_new_workflow_execution.assert_called_with('project', 'workflow1', 'context_1') self.mock_store.update_workflow_context_event_handler_state \ .assert_called_with('project', 'workflow1', 'context_1', 'execution_id', 1) def test__handler_event_for_workflow_stop_action(self): context_extractor = MyContextExtractor() rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'), WorkflowAction.START) rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'), WorkflowAction.START) w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor), scheduling_rules=[rule, rule1]) state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1') self.mock_store.get_workflow_context_event_handler_state.return_value = state self.mock_event_handler.handle_event.return_value = (WorkflowAction.STOP, 1) e = BaseEvent('k1', 'v1', namespace='test_namespace') self.processor._handle_event_for_workflow('project', w1, e) self.mock_store.update_workflow_context_event_handler_state \ .assert_called_with('project', 'workflow1', 'context_1', None, 1) state.workflow_execution_id = 'execution_id' self.processor._handle_event_for_workflow('project', w1, e) self.mock_scheduler.stop_workflow_execution.assert_called_with('execution_id') self.mock_store.update_workflow_context_event_handler_state \ .assert_called_with('project', 'workflow1', 'context_1', 'execution_id', 1) def test__handler_event_for_workflow_exception(self): context_extractor = MyContextExtractor() rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'), WorkflowAction.START) rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'), WorkflowAction.START) w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor), scheduling_rules=[rule, rule1]) state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1') self.mock_store.get_workflow_context_event_handler_state.return_value = state # Start Action e = BaseEvent('k1', 'v1', namespace='test_namespace', event_type='exception') self.processor._handle_event_for_workflow('project', w1, e) def test__update_workflow_last_event_version(self): e = BaseEvent('k1', 'v1', namespace='test_project', version=15213) workflow = WorkflowMeta('w', 0) self.processor._update_workflow_last_event_version('test_project', workflow, e) self.mock_store.update_workflow.assert_called_once_with('w', 'test_project', context_extractor_in_bytes=None, scheduling_rules=[], last_event_version=15213)
osc.py
import logging import os import threading from collections import OrderedDict from tempfile import mkstemp from time import sleep, time from uuid import uuid4 import rv.api import sunvox from pythonosc import osc_server, udp_client from pythonosc.osc_message_builder import OscMessageBuilder from sails.api import c from sunvosc.dispatcher import PeerDispatcher class OscPlayback(object): def __init__(self, interface='localhost', port=9001, sunvosc_host='localhost', sunvosc_port=9000): self.interface = interface self.port = port self.sunvosc_host = sunvosc_host self.sunvosc_port = sunvosc_port self.client = udp_client.UDPClient(sunvosc_host, sunvosc_port) self.dispatcher = PeerDispatcher() server = osc_server.ThreadingOSCUDPServer( (interface, port), self.dispatcher) logging.info('Peer serving on %s:%s', *server.server_address) self.server_thread = threading.Thread(target=server.serve_forever) self.server_thread.start() self.engine = None self.generators = OrderedDict() self.default_velocity = 128 b = OscMessageBuilder('/slot0/inform/start') b.add_arg(interface, 's') b.add_arg(port, 'i') msg = b.build() self.client.send(msg) def advance_generators(self, session, pos): with session[pos]: for gen, last_pos in self.generators.items(): if gen.started and last_pos < pos: with session[pos]: gen.advance(session.cursor()) self.generators[gen] = pos yield gen def process(self, pos, command): if isinstance(command, c.ConnectModules): module_numbers = self.dispatcher.module_numbers[0] src = command.src if hasattr(src, 'index'): while src.index is None and src.tag not in module_numbers: sleep(0) if src.index is None: src.index = module_numbers[src.tag] src = src.index dest = command.dest if hasattr(dest, 'index'): while ( dest.index is None and dest.tag not in self.dispatcher.module_numbers[0] ): sleep(0) if dest.index is None: dest.index = module_numbers[dest.tag] dest = dest.index b = OscMessageBuilder('/slot0/connect') b.add_arg(src, 'i') b.add_arg(dest, 'i') msg = b.build() self.client.send(msg) elif isinstance(command, c.Engine): if self.engine is not None: logging.warning( 'Only one engine is supported for OSC playback') else: self.engine = command b = OscMessageBuilder('/slot0/init') b.add_arg(1, 'i') b.add_arg(128, 'i') msg = b.build() self.client.send(msg) elif isinstance(command, c.Generator): self.generators[command] = (-1, 0) command.start() elif isinstance(command, c.GeneratorStop): del self.generators[command.parent] command.parent.generator = None elif isinstance(command, c.Module): data = rv.Synth(command.module).read() command.module.tag = uuid4().hex b = OscMessageBuilder('/slot0/load_module') b.add_arg(command.module.tag, 's') fd, name = mkstemp('.sunsynth') os.write(fd, data) os.close(fd) b.add_arg(name, 's') # TODO: delete later after we know it's loaded msg = b.build() self.client.send(msg) elif isinstance(command, c.NoteOff): beat, tick = pos row = beat * 24 + tick b = OscMessageBuilder('/slot0/queue') b.add_arg(row, 'i') b.add_arg(0, 'i') b.add_arg(command.track.index, 'i') b.add_arg(sunvox.NOTECMD.NOTE_OFF, 'i'), b.add_arg(False, 'F') b.add_arg(False, 'F') b.add_arg(False, 'F') b.add_arg(False, 'F') b.add_arg(False, 'F') msg = b.build() self.client.send(msg) elif isinstance(command, c.NoteOn): beat, tick = pos row = beat * 24 + tick vel = getattr(command, 'vel', None) vel = self.default_velocity if vel is None else vel b = OscMessageBuilder('/slot0/queue') b.add_arg(row, 'i') b.add_arg(0, 'i') b.add_arg(command.track.index, 'i') b.add_arg(command.note, 'i'), b.add_arg(vel, 'i') b.add_arg(getattr(command.module, 'index', command.module), 'i') b.add_arg(False, 'F') b.add_arg(False, 'F') b.add_arg(False, 'F') msg = b.build() self.client.send(msg) command.processed = True def play_sunvosc(session, bpm=125, forever=False, writeahead=12): """ :type session: sails.session.Session :type forever: bool """ playback = OscPlayback() pos = (-1, 0) last_ctl_pos = max(session._ctl_timelines) with session[last_ctl_pos]: last_cmd_pos = max(session.cmd_timeline) start_time = None while forever or pos <= last_cmd_pos: with session[pos] as cpos: processed = 0 last_played = playback.dispatcher.last_played[0] if last_played is not None: last_played, frames = last_played while cpos.ticks >= (last_played or 0) + writeahead: if start_time is None: start_time = time() b = OscMessageBuilder('/slot0/start') msg = b.build() playback.client.send(msg) else: sleep(0.1) last_played = playback.dispatcher.last_played[0] if last_played is not None: last_played, frames = last_played keep_processing = True while keep_processing: cmds = session.cmd_timeline.get(pos, []) processed = 0 for cmd in cmds: if not cmd.processed: print('pos={!r} cmd={!r}'.format(pos, cmd)) playback.process(pos, cmd) processed += 1 advanced = list(playback.advance_generators(session, pos)) keep_processing = len(advanced) > 0 cmds = session.cmd_timeline.get(pos, []) for cmd in cmds: if not cmd.processed: print('pos={!r} cmd={!r}'.format(pos, cmd)) playback.process(pos, cmd) processed += 1 if pos[0] >= 0 and processed == 0: print('pos={!r}'.format(pos), end='\r') pos = (cpos + 1).pos
test_change_stream.py
# Copyright 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the change_stream module.""" import random import os import re import sys import string import threading import time import uuid from itertools import product sys.path[0:0] = [''] from bson import ObjectId, SON, Timestamp, encode, json_util from bson.binary import (ALL_UUID_REPRESENTATIONS, Binary, STANDARD, PYTHON_LEGACY) from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from pymongo import MongoClient from pymongo.command_cursor import CommandCursor from pymongo.errors import (InvalidOperation, OperationFailure, ServerSelectionTimeoutError) from pymongo.message import _CursorAddress from pymongo.read_concern import ReadConcern from pymongo.write_concern import WriteConcern from test import client_context, unittest, IntegrationTest from test.utils import ( EventListener, WhiteListEventListener, rs_or_single_client, wait_until) class TestChangeStreamBase(IntegrationTest): def change_stream_with_client(self, client, *args, **kwargs): """Create a change stream using the given client and return it.""" raise NotImplementedError def change_stream(self, *args, **kwargs): """Create a change stream using the default client and return it.""" return self.change_stream_with_client(self.client, *args, **kwargs) def client_with_listener(self, *commands): """Return a client with a WhiteListEventListener.""" listener = WhiteListEventListener(*commands) client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) return client, listener def watched_collection(self, *args, **kwargs): """Return a collection that is watched by self.change_stream().""" # Construct a unique collection for each test. collname = '.'.join(self.id().rsplit('.', 2)[1:]) return self.db.get_collection(collname, *args, **kwargs) def generate_invalidate_event(self, change_stream): """Cause a change stream invalidate event.""" raise NotImplementedError def generate_unique_collnames(self, numcolls): """Generate numcolls collection names unique to a test.""" collnames = [] for idx in range(1, numcolls + 1): collnames.append(self.id() + '_' + str(idx)) return collnames def get_resume_token(self, invalidate=False): """Get a resume token to use for starting a change stream.""" # Ensure targeted collection exists before starting. coll = self.watched_collection(write_concern=WriteConcern('majority')) coll.insert_one({}) if invalidate: with self.change_stream( [{'$match': {'operationType': 'invalidate'}}]) as cs: if isinstance(cs._target, MongoClient): self.skipTest( "cluster-level change streams cannot be invalidated") self.generate_invalidate_event(cs) return cs.next()['_id'] else: with self.change_stream() as cs: coll.insert_one({'data': 1}) return cs.next()['_id'] def get_start_at_operation_time(self): """Get an operationTime. Advances the operation clock beyond the most recently returned timestamp.""" optime = self.client.admin.command("ping")["operationTime"] return Timestamp(optime.time, optime.inc + 1) def insert_one_and_check(self, change_stream, doc): """Insert a document and check that it shows up in the change stream.""" raise NotImplementedError def kill_change_stream_cursor(self, change_stream): """Cause a cursor not found error on the next getMore.""" cursor = change_stream._cursor address = _CursorAddress(cursor.address, cursor._CommandCursor__ns) client = self.watched_collection().database.client client._close_cursor_now(cursor.cursor_id, address) class APITestsMixin(object): def test_watch(self): with self.change_stream( [{'$project': {'foo': 0}}], full_document='updateLookup', max_await_time_ms=1000, batch_size=100) as change_stream: self.assertEqual([{'$project': {'foo': 0}}], change_stream._pipeline) self.assertEqual('updateLookup', change_stream._full_document) self.assertEqual(1000, change_stream._max_await_time_ms) self.assertEqual(100, change_stream._batch_size) self.assertIsInstance(change_stream._cursor, CommandCursor) self.assertEqual( 1000, change_stream._cursor._CommandCursor__max_await_time_ms) self.watched_collection( write_concern=WriteConcern("majority")).insert_one({}) _ = change_stream.next() resume_token = change_stream.resume_token with self.assertRaises(TypeError): self.change_stream(pipeline={}) with self.assertRaises(TypeError): self.change_stream(full_document={}) # No Error. with self.change_stream(resume_after=resume_token): pass def test_try_next(self): # ChangeStreams only read majority committed data so use w:majority. coll = self.watched_collection().with_options( write_concern=WriteConcern("majority")) coll.drop() coll.insert_one({}) self.addCleanup(coll.drop) with self.change_stream(max_await_time_ms=250) as stream: self.assertIsNone(stream.try_next()) # No changes initially. coll.insert_one({}) # Generate a change. # On sharded clusters, even majority-committed changes only show # up once an event that sorts after it shows up on the other # shard. So, we wait on try_next to eventually return changes. wait_until(lambda: stream.try_next() is not None, "get change from try_next") def test_try_next_runs_one_getmore(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command('ping') listener.results.clear() # ChangeStreams only read majority committed data so use w:majority. coll = self.watched_collection().with_options( write_concern=WriteConcern("majority")) coll.drop() # Create the watched collection before starting the change stream to # skip any "create" events. coll.insert_one({'_id': 1}) self.addCleanup(coll.drop) with self.change_stream_with_client( client, max_await_time_ms=250) as stream: self.assertEqual(listener.started_command_names(), ["aggregate"]) listener.results.clear() # Confirm that only a single getMore is run even when no documents # are returned. self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore"]) listener.results.clear() self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore"]) listener.results.clear() # Get at least one change before resuming. coll.insert_one({'_id': 2}) wait_until(lambda: stream.try_next() is not None, "get change from try_next") listener.results.clear() # Cause the next request to initiate the resume process. self.kill_change_stream_cursor(stream) listener.results.clear() # The sequence should be: # - getMore, fail # - resume with aggregate command # - no results, return immediately without another getMore self.assertIsNone(stream.try_next()) self.assertEqual( listener.started_command_names(), ["getMore", "aggregate"]) listener.results.clear() # Stream still works after a resume. coll.insert_one({'_id': 3}) wait_until(lambda: stream.try_next() is not None, "get change from try_next") self.assertEqual(set(listener.started_command_names()), set(["getMore"])) self.assertIsNone(stream.try_next()) def test_batch_size_is_honored(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command('ping') listener.results.clear() # ChangeStreams only read majority committed data so use w:majority. coll = self.watched_collection().with_options( write_concern=WriteConcern("majority")) coll.drop() # Create the watched collection before starting the change stream to # skip any "create" events. coll.insert_one({'_id': 1}) self.addCleanup(coll.drop) # Expected batchSize. expected = {'batchSize': 23} with self.change_stream_with_client( client, max_await_time_ms=250, batch_size=23) as stream: # Confirm that batchSize is honored for initial batch. cmd = listener.results['started'][0].command self.assertEqual(cmd['cursor'], expected) listener.results.clear() # Confirm that batchSize is honored by getMores. self.assertIsNone(stream.try_next()) cmd = listener.results['started'][0].command key = next(iter(expected)) self.assertEqual(expected[key], cmd[key]) # $changeStream.startAtOperationTime was added in 4.0.0. @client_context.require_version_min(4, 0, 0) def test_start_at_operation_time(self): optime = self.get_start_at_operation_time() coll = self.watched_collection( write_concern=WriteConcern("majority")) ndocs = 3 coll.insert_many([{"data": i} for i in range(ndocs)]) with self.change_stream(start_at_operation_time=optime) as cs: for i in range(ndocs): cs.next() def _test_full_pipeline(self, expected_cs_stage): client, listener = self.client_with_listener("aggregate") results = listener.results with self.change_stream_with_client( client, [{'$project': {'foo': 0}}]) as _: pass self.assertEqual(1, len(results['started'])) command = results['started'][0] self.assertEqual('aggregate', command.command_name) self.assertEqual([ {'$changeStream': expected_cs_stage}, {'$project': {'foo': 0}}], command.command['pipeline']) def test_full_pipeline(self): """$changeStream must be the first stage in a change stream pipeline sent to the server. """ self._test_full_pipeline({}) def test_iteration(self): with self.change_stream(batch_size=2) as change_stream: num_inserted = 10 self.watched_collection().insert_many( [{} for _ in range(num_inserted)]) inserts_received = 0 for change in change_stream: self.assertEqual(change['operationType'], 'insert') inserts_received += 1 if inserts_received == num_inserted: break self._test_invalidate_stops_iteration(change_stream) def _test_next_blocks(self, change_stream): inserted_doc = {'_id': ObjectId()} changes = [] t = threading.Thread( target=lambda: changes.append(change_stream.next())) t.start() # Sleep for a bit to prove that the call to next() blocks. time.sleep(1) self.assertTrue(t.is_alive()) self.assertFalse(changes) self.watched_collection().insert_one(inserted_doc) # Join with large timeout to give the server time to return the change, # in particular for shard clusters. t.join(30) self.assertFalse(t.is_alive()) self.assertEqual(1, len(changes)) self.assertEqual(changes[0]['operationType'], 'insert') self.assertEqual(changes[0]['fullDocument'], inserted_doc) def test_next_blocks(self): """Test that next blocks until a change is readable""" # Use a short await time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: self._test_next_blocks(change_stream) def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.watched_collection().aggregate( [{'$changeStream': {}}], maxAwaitTimeMS=250) as change_stream: self._test_next_blocks(change_stream) def test_concurrent_close(self): """Ensure a ChangeStream can be closed from another thread.""" # Use a short await time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: def iterate_cursor(): for _ in change_stream: pass t = threading.Thread(target=iterate_cursor) t.start() self.watched_collection().insert_one({}) time.sleep(1) change_stream.close() t.join(3) self.assertFalse(t.is_alive()) def test_unknown_full_document(self): """Must rely on the server to raise an error on unknown fullDocument. """ try: with self.change_stream(full_document='notValidatedByPyMongo'): pass except OperationFailure: pass def test_change_operations(self): """Test each operation type.""" expected_ns = {'db': self.watched_collection().database.name, 'coll': self.watched_collection().name} with self.change_stream() as change_stream: # Insert. inserted_doc = {'_id': ObjectId(), 'foo': 'bar'} self.watched_collection().insert_one(inserted_doc) change = change_stream.next() self.assertTrue(change['_id']) self.assertEqual(change['operationType'], 'insert') self.assertEqual(change['ns'], expected_ns) self.assertEqual(change['fullDocument'], inserted_doc) # Update. update_spec = {'$set': {'new': 1}, '$unset': {'foo': 1}} self.watched_collection().update_one(inserted_doc, update_spec) change = change_stream.next() self.assertTrue(change['_id']) self.assertEqual(change['operationType'], 'update') self.assertEqual(change['ns'], expected_ns) self.assertNotIn('fullDocument', change) expected_update_description = { 'updatedFields': {'new': 1}, 'removedFields': ['foo']} if client_context.version.at_least(4, 5, 0): expected_update_description['truncatedArrays'] = [] self.assertEqual(expected_update_description, change['updateDescription']) # Replace. self.watched_collection().replace_one({'new': 1}, {'foo': 'bar'}) change = change_stream.next() self.assertTrue(change['_id']) self.assertEqual(change['operationType'], 'replace') self.assertEqual(change['ns'], expected_ns) self.assertEqual(change['fullDocument'], inserted_doc) # Delete. self.watched_collection().delete_one({'foo': 'bar'}) change = change_stream.next() self.assertTrue(change['_id']) self.assertEqual(change['operationType'], 'delete') self.assertEqual(change['ns'], expected_ns) self.assertNotIn('fullDocument', change) # Invalidate. self._test_get_invalidate_event(change_stream) @client_context.require_version_min(4, 1, 1) def test_start_after(self): resume_token = self.get_resume_token(invalidate=True) # resume_after cannot resume after invalidate. with self.assertRaises(OperationFailure): self.change_stream(resume_after=resume_token) # start_after can resume after invalidate. with self.change_stream(start_after=resume_token) as change_stream: self.watched_collection().insert_one({'_id': 2}) change = change_stream.next() self.assertEqual(change['operationType'], 'insert') self.assertEqual(change['fullDocument'], {'_id': 2}) @client_context.require_version_min(4, 1, 1) def test_start_after_resume_process_with_changes(self): resume_token = self.get_resume_token(invalidate=True) with self.change_stream(start_after=resume_token, max_await_time_ms=250) as change_stream: self.watched_collection().insert_one({'_id': 2}) change = change_stream.next() self.assertEqual(change['operationType'], 'insert') self.assertEqual(change['fullDocument'], {'_id': 2}) self.assertIsNone(change_stream.try_next()) self.kill_change_stream_cursor(change_stream) self.watched_collection().insert_one({'_id': 3}) change = change_stream.next() self.assertEqual(change['operationType'], 'insert') self.assertEqual(change['fullDocument'], {'_id': 3}) @client_context.require_no_mongos # Remove after SERVER-41196 @client_context.require_version_min(4, 1, 1) def test_start_after_resume_process_without_changes(self): resume_token = self.get_resume_token(invalidate=True) with self.change_stream(start_after=resume_token, max_await_time_ms=250) as change_stream: self.assertIsNone(change_stream.try_next()) self.kill_change_stream_cursor(change_stream) self.watched_collection().insert_one({'_id': 2}) change = change_stream.next() self.assertEqual(change['operationType'], 'insert') self.assertEqual(change['fullDocument'], {'_id': 2}) class ProseSpecTestsMixin(object): def _client_with_listener(self, *commands): listener = WhiteListEventListener(*commands) client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) return client, listener def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): self.watched_collection().insert_many( [{"data": k} for k in range(batch_size)]) for _ in range(batch_size): change = next(change_stream) return change def _get_expected_resume_token_legacy(self, stream, listener, previous_change=None): """Predicts what the resume token should currently be for server versions that don't support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None.""" if previous_change is None: agg_cmd = listener.results['started'][0] stage = agg_cmd.command["pipeline"][0]["$changeStream"] return stage.get("resumeAfter") or stage.get("startAfter") return previous_change['_id'] def _get_expected_resume_token(self, stream, listener, previous_change=None): """Predicts what the resume token should currently be for server versions that support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None. Assumes listener is a WhiteListEventListener that listens for aggregate and getMore commands.""" if previous_change is None or stream._cursor._has_next(): token = self._get_expected_resume_token_legacy( stream, listener, previous_change) if token is not None: return token response = listener.results['succeeded'][-1].reply return response['cursor']['postBatchResumeToken'] def _test_raises_error_on_missing_id(self, expected_exception): """ChangeStream will raise an exception if the server response is missing the resume token. """ with self.change_stream([{'$project': {'_id': 0}}]) as change_stream: self.watched_collection().insert_one({}) with self.assertRaises(expected_exception): next(change_stream) # The cursor should now be closed. with self.assertRaises(StopIteration): next(change_stream) def _test_update_resume_token(self, expected_rt_getter): """ChangeStream must continuously track the last seen resumeToken.""" client, listener = self._client_with_listener("aggregate", "getMore") coll = self.watched_collection(write_concern=WriteConcern('majority')) with self.change_stream_with_client(client) as change_stream: self.assertEqual( change_stream.resume_token, expected_rt_getter(change_stream, listener)) for _ in range(3): coll.insert_one({}) change = next(change_stream) self.assertEqual( change_stream.resume_token, expected_rt_getter(change_stream, listener, change)) # Prose test no. 1 @client_context.require_version_min(4, 0, 7) def test_update_resume_token(self): self._test_update_resume_token(self._get_expected_resume_token) # Prose test no. 1 @client_context.require_version_max(4, 0, 7) def test_update_resume_token_legacy(self): self._test_update_resume_token(self._get_expected_resume_token_legacy) # Prose test no. 2 @client_context.require_version_max(4, 3, 3) # PYTHON-2120 @client_context.require_version_min(4, 1, 8) def test_raises_error_on_missing_id_418plus(self): # Server returns an error on 4.1.8+ self._test_raises_error_on_missing_id(OperationFailure) # Prose test no. 2 @client_context.require_version_max(4, 1, 8) def test_raises_error_on_missing_id_418minus(self): # PyMongo raises an error self._test_raises_error_on_missing_id(InvalidOperation) # Prose test no. 3 def test_resume_on_error(self): with self.change_stream() as change_stream: self.insert_one_and_check(change_stream, {'_id': 1}) # Cause a cursor not found error on the next getMore. self.kill_change_stream_cursor(change_stream) self.insert_one_and_check(change_stream, {'_id': 2}) # Prose test no. 4 @client_context.require_failCommand_fail_point def test_no_resume_attempt_if_aggregate_command_fails(self): # Set non-retryable error on aggregate command. fail_point = {'mode': {'times': 1}, 'data': {'errorCode': 2, 'failCommands': ['aggregate']}} client, listener = self._client_with_listener("aggregate", "getMore") with self.fail_point(fail_point): try: _ = self.change_stream_with_client(client) except OperationFailure: pass # Driver should have attempted aggregate command only once. self.assertEqual(len(listener.results['started']), 1) self.assertEqual(listener.results['started'][0].command_name, 'aggregate') # Prose test no. 5 - REMOVED # Prose test no. 6 - SKIPPED # Reason: readPreference is not configurable using the watch() helpers # so we can skip this test. Also, PyMongo performs server selection for # each operation which ensure compliance with this prose test. # Prose test no. 7 def test_initial_empty_batch(self): with self.change_stream() as change_stream: # The first batch should be empty. self.assertFalse(change_stream._cursor._has_next()) cursor_id = change_stream._cursor.cursor_id self.assertTrue(cursor_id) self.insert_one_and_check(change_stream, {}) # Make sure we're still using the same cursor. self.assertEqual(cursor_id, change_stream._cursor.cursor_id) # Prose test no. 8 def test_kill_cursors(self): def raise_error(): raise ServerSelectionTimeoutError('mock error') with self.change_stream() as change_stream: self.insert_one_and_check(change_stream, {'_id': 1}) # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor self.kill_change_stream_cursor(change_stream) cursor.close = raise_error self.insert_one_and_check(change_stream, {'_id': 2}) # Prose test no. 9 @client_context.require_version_min(4, 0, 0) @client_context.require_version_max(4, 0, 7) def test_start_at_operation_time_caching(self): # Case 1: change stream not started with startAtOperationTime client, listener = self.client_with_listener("aggregate") with self.change_stream_with_client(client) as cs: self.kill_change_stream_cursor(cs) cs.try_next() cmd = listener.results['started'][-1].command self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get( "startAtOperationTime")) # Case 2: change stream started with startAtOperationTime listener.results.clear() optime = self.get_start_at_operation_time() with self.change_stream_with_client( client, start_at_operation_time=optime) as cs: self.kill_change_stream_cursor(cs) cs.try_next() cmd = listener.results['started'][-1].command self.assertEqual(cmd["pipeline"][0]["$changeStream"].get( "startAtOperationTime"), optime, str([k.command for k in listener.results['started']])) # Prose test no. 10 - SKIPPED # This test is identical to prose test no. 3. # Prose test no. 11 @client_context.require_version_min(4, 0, 7) def test_resumetoken_empty_batch(self): client, listener = self._client_with_listener("getMore") with self.change_stream_with_client(client) as change_stream: self.assertIsNone(change_stream.try_next()) resume_token = change_stream.resume_token response = listener.results['succeeded'][0].reply self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 11 @client_context.require_version_min(4, 0, 7) def test_resumetoken_exhausted_batch(self): client, listener = self._client_with_listener("getMore") with self.change_stream_with_client(client) as change_stream: self._populate_and_exhaust_change_stream(change_stream) resume_token = change_stream.resume_token response = listener.results['succeeded'][-1].reply self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 12 @client_context.require_version_max(4, 0, 7) def test_resumetoken_empty_batch_legacy(self): resume_point = self.get_resume_token() # Empty resume token when neither resumeAfter or startAfter specified. with self.change_stream() as change_stream: change_stream.try_next() self.assertIsNone(change_stream.resume_token) # Resume token value is same as resumeAfter. with self.change_stream(resume_after=resume_point) as change_stream: change_stream.try_next() resume_token = change_stream.resume_token self.assertEqual(resume_token, resume_point) # Prose test no. 12 @client_context.require_version_max(4, 0, 7) def test_resumetoken_exhausted_batch_legacy(self): # Resume token is _id of last change. with self.change_stream() as change_stream: change = self._populate_and_exhaust_change_stream(change_stream) self.assertEqual(change_stream.resume_token, change["_id"]) resume_point = change['_id'] # Resume token is _id of last change even if resumeAfter is specified. with self.change_stream(resume_after=resume_point) as change_stream: change = self._populate_and_exhaust_change_stream(change_stream) self.assertEqual(change_stream.resume_token, change["_id"]) # Prose test no. 13 def test_resumetoken_partially_iterated_batch(self): # When batch has been iterated up to but not including the last element. # Resume token should be _id of previous change document. with self.change_stream() as change_stream: self.watched_collection( write_concern=WriteConcern('majority')).insert_many( [{"data": k} for k in range(3)]) for _ in range(2): change = next(change_stream) resume_token = change_stream.resume_token self.assertEqual(resume_token, change["_id"]) def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): # When the batch is not empty and hasn't been iterated at all. # Resume token should be same as the resume option used. resume_point = self.get_resume_token() # Insert some documents so that firstBatch isn't empty. self.watched_collection( write_concern=WriteConcern("majority")).insert_many( [{'a': 1}, {'b': 2}, {'c': 3}]) # Resume token should be same as the resume option. with self.change_stream( **{resume_option: resume_point}) as change_stream: self.assertTrue(change_stream._cursor._has_next()) resume_token = change_stream.resume_token self.assertEqual(resume_token, resume_point) # Prose test no. 14 @client_context.require_no_mongos def test_resumetoken_uniterated_nonempty_batch_resumeafter(self): self._test_resumetoken_uniterated_nonempty_batch("resume_after") # Prose test no. 14 @client_context.require_no_mongos @client_context.require_version_min(4, 1, 1) def test_resumetoken_uniterated_nonempty_batch_startafter(self): self._test_resumetoken_uniterated_nonempty_batch("start_after") # Prose test no. 17 @client_context.require_version_min(4, 1, 1) def test_startafter_resume_uses_startafter_after_empty_getMore(self): # Resume should use startAfter after no changes have been returned. resume_point = self.get_resume_token() client, listener = self._client_with_listener("aggregate") with self.change_stream_with_client( client, start_after=resume_point) as change_stream: self.assertFalse(change_stream._cursor._has_next()) # No changes change_stream.try_next() # No changes self.kill_change_stream_cursor(change_stream) change_stream.try_next() # Resume attempt response = listener.results['started'][-1] self.assertIsNone( response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) self.assertIsNotNone( response.command["pipeline"][0]["$changeStream"].get("startAfter")) # Prose test no. 18 @client_context.require_version_min(4, 1, 1) def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): # Resume should use resumeAfter after some changes have been returned. resume_point = self.get_resume_token() client, listener = self._client_with_listener("aggregate") with self.change_stream_with_client( client, start_after=resume_point) as change_stream: self.assertFalse(change_stream._cursor._has_next()) # No changes self.watched_collection().insert_one({}) next(change_stream) # Changes self.kill_change_stream_cursor(change_stream) change_stream.try_next() # Resume attempt response = listener.results['started'][-1] self.assertIsNotNone( response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) self.assertIsNone( response.command["pipeline"][0]["$changeStream"].get("startAfter")) class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): @classmethod @client_context.require_version_min(4, 0, 0, -1) @client_context.require_no_mmap @client_context.require_no_standalone def setUpClass(cls): super(TestClusterChangeStream, cls).setUpClass() cls.dbs = [cls.db, cls.client.pymongo_test_2] @classmethod def tearDownClass(cls): for db in cls.dbs: cls.client.drop_database(db) super(TestClusterChangeStream, cls).tearDownClass() def change_stream_with_client(self, client, *args, **kwargs): return client.watch(*args, **kwargs) def generate_invalidate_event(self, change_stream): self.skipTest("cluster-level change streams cannot be invalidated") def _test_get_invalidate_event(self, change_stream): # Cluster-level change streams don't get invalidated. pass def _test_invalidate_stops_iteration(self, change_stream): # Cluster-level change streams don't get invalidated. pass def _insert_and_check(self, change_stream, db, collname, doc): coll = db[collname] coll.insert_one(doc) change = next(change_stream) self.assertEqual(change['operationType'], 'insert') self.assertEqual(change['ns'], {'db': db.name, 'coll': collname}) self.assertEqual(change['fullDocument'], doc) def insert_one_and_check(self, change_stream, doc): db = random.choice(self.dbs) collname = self.id() self._insert_and_check(change_stream, db, collname, doc) def test_simple(self): collnames = self.generate_unique_collnames(3) with self.change_stream() as change_stream: for db, collname in product(self.dbs, collnames): self._insert_and_check( change_stream, db, collname, {'_id': collname} ) def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.client.admin.aggregate( [{'$changeStream': {'allChangesForCluster': True}}], maxAwaitTimeMS=250) as change_stream: self._test_next_blocks(change_stream) def test_full_pipeline(self): """$changeStream must be the first stage in a change stream pipeline sent to the server. """ self._test_full_pipeline({'allChangesForCluster': True}) class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): @classmethod @client_context.require_version_min(4, 0, 0, -1) @client_context.require_no_mmap @client_context.require_no_standalone def setUpClass(cls): super(TestDatabaseChangeStream, cls).setUpClass() def change_stream_with_client(self, client, *args, **kwargs): return client[self.db.name].watch(*args, **kwargs) def generate_invalidate_event(self, change_stream): # Dropping the database invalidates the change stream. change_stream._client.drop_database(self.db.name) def _test_get_invalidate_event(self, change_stream): # Cache collection names. dropped_colls = self.db.list_collection_names() # Drop the watched database to get an invalidate event. self.generate_invalidate_event(change_stream) change = change_stream.next() # 4.1+ returns "drop" events for each collection in dropped database # and a "dropDatabase" event for the database itself. if change['operationType'] == 'drop': self.assertTrue(change['_id']) for _ in range(len(dropped_colls)): ns = change['ns'] self.assertEqual(ns['db'], change_stream._target.name) self.assertIn(ns['coll'], dropped_colls) change = change_stream.next() self.assertEqual(change['operationType'], 'dropDatabase') self.assertTrue(change['_id']) self.assertEqual(change['ns'], {'db': change_stream._target.name}) # Get next change. change = change_stream.next() self.assertTrue(change['_id']) self.assertEqual(change['operationType'], 'invalidate') self.assertNotIn('ns', change) self.assertNotIn('fullDocument', change) # The ChangeStream should be dead. with self.assertRaises(StopIteration): change_stream.next() def _test_invalidate_stops_iteration(self, change_stream): # Drop the watched database to get an invalidate event. change_stream._client.drop_database(self.db.name) # Check drop and dropDatabase events. for change in change_stream: self.assertIn(change['operationType'], ( 'drop', 'dropDatabase', 'invalidate')) # Last change must be invalidate. self.assertEqual(change['operationType'], 'invalidate') # Change stream must not allow further iteration. with self.assertRaises(StopIteration): change_stream.next() with self.assertRaises(StopIteration): next(change_stream) def _insert_and_check(self, change_stream, collname, doc): coll = self.db[collname] coll.insert_one(doc) change = next(change_stream) self.assertEqual(change['operationType'], 'insert') self.assertEqual(change['ns'], {'db': self.db.name, 'coll': collname}) self.assertEqual(change['fullDocument'], doc) def insert_one_and_check(self, change_stream, doc): self._insert_and_check(change_stream, self.id(), doc) def test_simple(self): collnames = self.generate_unique_collnames(3) with self.change_stream() as change_stream: for collname in collnames: self._insert_and_check( change_stream, collname, {'_id': uuid.uuid4()}) def test_isolation(self): # Ensure inserts to other dbs don't show up in our ChangeStream. other_db = self.client.pymongo_test_temp self.assertNotEqual( other_db, self.db, msg="Isolation must be tested on separate DBs") collname = self.id() with self.change_stream() as change_stream: other_db[collname].insert_one({'_id': uuid.uuid4()}) self._insert_and_check( change_stream, collname, {'_id': uuid.uuid4()}) self.client.drop_database(other_db) class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecTestsMixin): @classmethod @client_context.require_version_min(3, 5, 11) @client_context.require_no_mmap @client_context.require_no_standalone def setUpClass(cls): super(TestCollectionChangeStream, cls).setUpClass() def setUp(self): # Use a new collection for each test. self.watched_collection().drop() self.watched_collection().insert_one({}) def change_stream_with_client(self, client, *args, **kwargs): return client[self.db.name].get_collection( self.watched_collection().name).watch(*args, **kwargs) def generate_invalidate_event(self, change_stream): # Dropping the collection invalidates the change stream. change_stream._target.drop() def _test_invalidate_stops_iteration(self, change_stream): self.generate_invalidate_event(change_stream) # Check drop and dropDatabase events. for change in change_stream: self.assertIn(change['operationType'], ('drop', 'invalidate')) # Last change must be invalidate. self.assertEqual(change['operationType'], 'invalidate') # Change stream must not allow further iteration. with self.assertRaises(StopIteration): change_stream.next() with self.assertRaises(StopIteration): next(change_stream) def _test_get_invalidate_event(self, change_stream): # Drop the watched database to get an invalidate event. change_stream._target.drop() change = change_stream.next() # 4.1+ returns a "drop" change document. if change['operationType'] == 'drop': self.assertTrue(change['_id']) self.assertEqual(change['ns'], { 'db': change_stream._target.database.name, 'coll': change_stream._target.name}) # Last change should be invalidate. change = change_stream.next() self.assertTrue(change['_id']) self.assertEqual(change['operationType'], 'invalidate') self.assertNotIn('ns', change) self.assertNotIn('fullDocument', change) # The ChangeStream should be dead. with self.assertRaises(StopIteration): change_stream.next() def insert_one_and_check(self, change_stream, doc): self.watched_collection().insert_one(doc) change = next(change_stream) self.assertEqual(change['operationType'], 'insert') self.assertEqual( change['ns'], {'db': self.watched_collection().database.name, 'coll': self.watched_collection().name}) self.assertEqual(change['fullDocument'], doc) def test_raw(self): """Test with RawBSONDocument.""" raw_coll = self.watched_collection( codec_options=DEFAULT_RAW_BSON_OPTIONS) with raw_coll.watch() as change_stream: raw_doc = RawBSONDocument(encode({'_id': 1})) self.watched_collection().insert_one(raw_doc) change = next(change_stream) self.assertIsInstance(change, RawBSONDocument) self.assertEqual(change['operationType'], 'insert') self.assertEqual( change['ns']['db'], self.watched_collection().database.name) self.assertEqual( change['ns']['coll'], self.watched_collection().name) self.assertEqual(change['fullDocument'], raw_doc) def test_uuid_representations(self): """Test with uuid document _ids and different uuid_representation.""" for uuid_representation in ALL_UUID_REPRESENTATIONS: for id_subtype in (STANDARD, PYTHON_LEGACY): options = self.watched_collection().codec_options.with_options( uuid_representation=uuid_representation) coll = self.watched_collection(codec_options=options) with coll.watch() as change_stream: coll.insert_one( {'_id': Binary(uuid.uuid4().bytes, id_subtype)}) _ = change_stream.next() resume_token = change_stream.resume_token # Should not error. coll.watch(resume_after=resume_token) def test_document_id_order(self): """Test with document _ids that need their order preserved.""" random_keys = random.sample(string.ascii_letters, len(string.ascii_letters)) random_doc = {'_id': SON([(key, key) for key in random_keys])} for document_class in (dict, SON, RawBSONDocument): options = self.watched_collection().codec_options.with_options( document_class=document_class) coll = self.watched_collection(codec_options=options) with coll.watch() as change_stream: coll.insert_one(random_doc) _ = change_stream.next() resume_token = change_stream.resume_token # The resume token is always a document. self.assertIsInstance(resume_token, document_class) # Should not error. coll.watch(resume_after=resume_token) coll.delete_many({}) def test_read_concern(self): """Test readConcern is not validated by the driver.""" # Read concern 'local' is not allowed for $changeStream. coll = self.watched_collection(read_concern=ReadConcern('local')) with self.assertRaises(OperationFailure): coll.watch() # Does not error. coll = self.watched_collection(read_concern=ReadConcern('majority')) with coll.watch(): pass class TestAllScenarios(unittest.TestCase): @classmethod @client_context.require_connection def setUpClass(cls): cls.listener = WhiteListEventListener("aggregate", "getMore") cls.client = rs_or_single_client(event_listeners=[cls.listener]) @classmethod def tearDownClass(cls): cls.client.close() def setUp(self): self.listener.results.clear() def setUpCluster(self, scenario_dict): assets = [(scenario_dict["database_name"], scenario_dict["collection_name"]), (scenario_dict.get("database2_name", "db2"), scenario_dict.get("collection2_name", "coll2"))] for db, coll in assets: self.client.drop_database(db) self.client[db].create_collection(coll) def setFailPoint(self, scenario_dict): fail_point = scenario_dict.get("failPoint") if fail_point is None: return elif not client_context.test_commands_enabled: self.skipTest("Test commands must be enabled") fail_cmd = SON([('configureFailPoint', 'failCommand')]) fail_cmd.update(fail_point) client_context.client.admin.command(fail_cmd) self.addCleanup( client_context.client.admin.command, 'configureFailPoint', fail_cmd['configureFailPoint'], mode='off') def assert_list_contents_are_subset(self, superlist, sublist): """Check that each element in sublist is a subset of the corresponding element in superlist.""" self.assertEqual(len(superlist), len(sublist)) for sup, sub in zip(superlist, sublist): if isinstance(sub, dict): self.assert_dict_is_subset(sup, sub) continue if isinstance(sub, (list, tuple)): self.assert_list_contents_are_subset(sup, sub) continue self.assertEqual(sup, sub) def assert_dict_is_subset(self, superdict, subdict): """Check that subdict is a subset of superdict.""" exempt_fields = ["documentKey", "_id", "getMore"] for key, value in subdict.items(): if key not in superdict: self.fail('Key %s not found in %s' % (key, superdict)) if isinstance(value, dict): self.assert_dict_is_subset(superdict[key], value) continue if isinstance(value, (list, tuple)): self.assert_list_contents_are_subset(superdict[key], value) continue if key in exempt_fields: # Only check for presence of these exempt fields, but not value. self.assertIn(key, superdict) else: self.assertEqual(superdict[key], value) def check_event(self, event, expectation_dict): if event is None: self.fail() for key, value in expectation_dict.items(): if isinstance(value, dict): self.assert_dict_is_subset(getattr(event, key), value) else: self.assertEqual(getattr(event, key), value) def tearDown(self): self.listener.results.clear() _TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'change_streams' ) def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() def get_change_stream(client, scenario_def, test): # Get target namespace on which to instantiate change stream target = test["target"] if target == "collection": db = client.get_database(scenario_def["database_name"]) cs_target = db.get_collection(scenario_def["collection_name"]) elif target == "database": cs_target = client.get_database(scenario_def["database_name"]) elif target == "client": cs_target = client else: raise ValueError("Invalid target in spec") # Construct change stream kwargs dict cs_pipeline = test["changeStreamPipeline"] options = test["changeStreamOptions"] cs_options = {} for key, value in options.items(): cs_options[camel_to_snake(key)] = value # Create and return change stream return cs_target.watch(pipeline=cs_pipeline, **cs_options) def run_operation(client, operation): # Apply specified operations opname = camel_to_snake(operation["name"]) arguments = operation.get("arguments", {}) if opname == 'rename': # Special case for rename operation. arguments = {'new_name': arguments["to"]} cmd = getattr(client.get_database( operation["database"]).get_collection( operation["collection"]), opname ) return cmd(**arguments) def create_test(scenario_def, test): def run_scenario(self): # Set up self.setUpCluster(scenario_def) self.setFailPoint(test) is_error = test["result"].get("error", False) try: with get_change_stream( self.client, scenario_def, test ) as change_stream: for operation in test["operations"]: # Run specified operations run_operation(self.client, operation) num_expected_changes = len(test["result"].get("success", [])) changes = [ change_stream.next() for _ in range(num_expected_changes)] # Run a next() to induce an error if one is expected and # there are no changes. if is_error and not changes: change_stream.next() except OperationFailure as exc: if not is_error: raise expected_code = test["result"]["error"]["code"] self.assertEqual(exc.code, expected_code) else: # Check for expected output from change streams if test["result"].get("success"): for change, expected_changes in zip(changes, test["result"]["success"]): self.assert_dict_is_subset(change, expected_changes) self.assertEqual(len(changes), len(test["result"]["success"])) finally: # Check for expected events results = self.listener.results for idx, expectation in enumerate(test.get("expectations", [])): for event_type, event_desc in expectation.items(): results_key = event_type.split("_")[1] event = results[results_key][idx] if len(results[results_key]) > idx else None self.check_event(event, event_desc) return run_scenario def create_tests(): for dirpath, _, filenames in os.walk(_TEST_PATH): dirname = os.path.split(dirpath)[-1] for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: scenario_def = json_util.loads(scenario_stream.read()) test_type = os.path.splitext(filename)[0] for test in scenario_def['tests']: new_test = create_test(scenario_def, test) new_test = client_context.require_no_mmap(new_test) if 'minServerVersion' in test: min_ver = tuple( int(elt) for elt in test['minServerVersion'].split('.')) new_test = client_context.require_version_min(*min_ver)( new_test) if 'maxServerVersion' in test: max_ver = tuple( int(elt) for elt in test['maxServerVersion'].split('.')) new_test = client_context.require_version_max(*max_ver)( new_test) topologies = test['topology'] new_test = client_context.require_cluster_type(topologies)( new_test) test_name = 'test_%s_%s_%s' % ( dirname, test_type.replace("-", "_"), str(test['description'].replace(" ", "_"))) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) create_tests() if __name__ == '__main__': unittest.main()
test_key_cache_v2.py
import threading from time import sleep from typing import List, Callable import pytest from httpx import Response, HTTPStatusError from sap.xssec.key_tools import jwk_to_pem from tests.ias.ias_configs import JWKS, WELL_KNOWN from tests.ias.ias_tokens import PAYLOAD, HEADER def test_thread_safe_decorator(): sum = 0 def add_to_sum(x: int): nonlocal sum local_sum = sum sleep(0.1) sum = local_sum + x def run_func_in_threads(func: Callable[[int], None], func_args: List[int]): threads = [] for arg in func_args: t = threading.Thread(target=func, args=[arg]) threads.append(t) t.start() for t in threads: t.join() # not thread-safe without decorator sum = 0 run_func_in_threads(add_to_sum, [1]*10) assert 10 != sum # thread-safe when args are same sum = 0 from sap.xssec.key_cache_v2 import thread_safe_by_args run_func_in_threads(thread_safe_by_args(add_to_sum), [1]*10) assert 10 == sum # not thread-safe when args are different sum = 0 run_func_in_threads(thread_safe_by_args(add_to_sum), list(range(1, 11))) assert 55 != sum @pytest.fixture def well_known_endpoint_mock(respx_mock): return respx_mock.get(PAYLOAD["iss"] + '/.well-known/openid-configuration').mock( return_value=Response(200, json=WELL_KNOWN)) @pytest.fixture def jwk_endpoint_mock(respx_mock): return respx_mock.get(WELL_KNOWN["jwks_uri"]).mock(return_value=Response(200, json=JWKS)) def test_get_verification_key_ias_should_return_key(well_known_endpoint_mock, jwk_endpoint_mock): from sap.xssec.key_cache_v2 import get_verification_key_ias, key_cache key_cache.clear() pem_key = get_verification_key_ias(PAYLOAD["iss"], PAYLOAD["zone_uuid"], HEADER["kid"]) assert well_known_endpoint_mock.called assert jwk_endpoint_mock.called jwk = next(filter(lambda k: k["kid"] == HEADER["kid"], JWKS["keys"])) assert jwk_to_pem(jwk) == pem_key def test_get_verification_key_ias_should_cache_key(well_known_endpoint_mock, jwk_endpoint_mock): from sap.xssec.key_cache_v2 import get_verification_key_ias, key_cache key_cache.clear() for _ in range(0, 10): get_verification_key_ias(PAYLOAD["iss"], PAYLOAD["zone_uuid"], HEADER["kid"]) assert 1 == well_known_endpoint_mock.call_count assert 1 == jwk_endpoint_mock.call_count def test_get_verification_key_ias_should_raise_http_error(respx_mock): respx_mock.get(PAYLOAD["iss"] + '/.well-known/openid-configuration').mock( return_value=Response(500)) from sap.xssec.key_cache_v2 import get_verification_key_ias, key_cache key_cache.clear() with pytest.raises(HTTPStatusError): get_verification_key_ias(PAYLOAD["iss"], PAYLOAD["zone_uuid"], HEADER["kid"])
impala_lite.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A simple, single-process IMPALA in JAX with Haiku. This implementation is a simple, minimal implementation of IMPALA. For a more full-fledged implementation, see examples/impala/README.md. See: https://arxiv.org/abs/1802.01561 """ import functools import queue import threading from typing import Any, Callable, NamedTuple, Tuple from absl import app from absl import logging from bsuite.environments import catch import dm_env import haiku as hk import jax import jax.numpy as jnp import numpy as np import optax import rlax class Transition(NamedTuple): timestep: dm_env.TimeStep action: int agent_out: Any class SimpleNet(hk.Module): """A simple network.""" def __init__(self, num_actions: int): super().__init__() self._num_actions = num_actions def __call__( self, timestep: dm_env.TimeStep, ) -> Tuple[jnp.ndarray, jnp.ndarray]: """Process a batch of observations.""" torso = hk.Sequential([hk.Flatten(), hk.Linear(128), jax.nn.relu, hk.Linear(64), jax.nn.relu]) hidden = torso(timestep.observation) policy_logits = hk.Linear(self._num_actions)(hidden) baseline = hk.Linear(1)(hidden) baseline = jnp.squeeze(baseline, axis=-1) return policy_logits, baseline class Agent: """A simple, feed-forward agent.""" def __init__(self, net_apply): self._net = net_apply self._discount = 0.99 @functools.partial(jax.jit, static_argnums=0) def step( self, params: hk.Params, rng: jnp.ndarray, timestep: dm_env.TimeStep, ) -> Tuple[jnp.ndarray, jnp.ndarray]: """Steps on a single observation.""" timestep = jax.tree_map(lambda t: jnp.expand_dims(t, 0), timestep) logits, _ = self._net(params, timestep) logits = jnp.squeeze(logits, axis=0) action = hk.multinomial(rng, logits, num_samples=1) action = jnp.squeeze(action, axis=-1) return action, logits def loss(self, params: hk.Params, trajs: Transition) -> jnp.ndarray: """Computes a loss of trajs wrt params.""" # Re-run the agent over the trajectories. # Due to https://github.com/google/jax/issues/1459, we use hk.BatchApply # instead of vmap. # BatchApply turns the input tensors from [T, B, ...] into [T*B, ...]. # We `functools.partial` params in so it does not get transformed. net_curried = hk.BatchApply(functools.partial(self._net, params)) learner_logits, baseline_with_bootstrap = net_curried(trajs.timestep) # Separate the bootstrap from the value estimates. baseline = baseline_with_bootstrap[:-1] baseline_tp1 = baseline_with_bootstrap[1:] # Remove bootstrap timestep from non-observations. _, actions, behavior_logits = jax.tree_map(lambda t: t[:-1], trajs) learner_logits = learner_logits[:-1] # Shift step_type/reward/discount back by one, so that actions match the # timesteps caused by the action. timestep = jax.tree_map(lambda t: t[1:], trajs.timestep) discount = timestep.discount * self._discount # The step is uninteresting if we transitioned LAST -> FIRST. mask = jnp.not_equal(timestep.step_type, int(dm_env.StepType.FIRST)) mask = mask.astype(jnp.float32) # Compute v-trace returns. vtrace_td_error_and_advantage = jax.vmap( rlax.vtrace_td_error_and_advantage, in_axes=1, out_axes=1) rhos = rlax.categorical_importance_sampling_ratios(learner_logits, behavior_logits, actions) vtrace_returns = vtrace_td_error_and_advantage(baseline, baseline_tp1, timestep.reward, discount, rhos) # Note that we use mean here, rather than sum as in canonical IMPALA. # Compute policy gradient loss. pg_advantage = jax.lax.stop_gradient(vtrace_returns.pg_advantage) tb_pg_loss_fn = jax.vmap(rlax.policy_gradient_loss, in_axes=1, out_axes=0) pg_loss = tb_pg_loss_fn(learner_logits, actions, pg_advantage, mask) pg_loss = jnp.mean(pg_loss) # Baseline loss. bl_loss = 0.5 * jnp.mean(jnp.square(vtrace_returns.errors) * mask) # Entropy regularization. ent_loss_fn = jax.vmap(rlax.entropy_loss, in_axes=1, out_axes=0) ent_loss = ent_loss_fn(learner_logits, mask) ent_loss = jnp.mean(ent_loss) total_loss = pg_loss + 0.5 * bl_loss + 0.01 * ent_loss return total_loss def preprocess_step(ts: dm_env.TimeStep) -> dm_env.TimeStep: # reward: None -> 0, discount: None -> 1, # scalar -> np.array(), and StepType -> int. if ts.reward is None: ts = ts._replace(reward=0.) if ts.discount is None: ts = ts._replace(discount=1.) return jax.tree_map(np.asarray, ts) def run_actor( agent: Agent, rng_key: jnp.ndarray, get_params: Callable[[], hk.Params], enqueue_traj: Callable[[Transition], None], unroll_len: int, num_trajectories: int, ): """Runs an actor to produce num_trajectories trajectories.""" env = catch.Catch() state = env.reset() traj = [] for i in range(num_trajectories): params = get_params() # The first rollout is one step longer. for _ in range(unroll_len + int(i == 0)): rng_key, step_key = jax.random.split(rng_key) state = preprocess_step(state) action, logits = agent.step(params, step_key, state) transition = Transition(state, action, logits) traj.append(transition) state = env.step(action) if state.step_type == dm_env.StepType.LAST: logging.log_every_n(logging.INFO, 'Episode ended with reward: %s', 5, state.reward) # Stack and send the trajectory. stacked_traj = jax.tree_multimap(lambda *ts: np.stack(ts), *traj) enqueue_traj(stacked_traj) # Reset the trajectory, keeping the last timestep. traj = traj[-1:] class Learner: """Slim wrapper around an agent/optimizer pair.""" def __init__(self, agent: Agent, opt_update): self._agent = agent self._opt_update = opt_update @functools.partial(jax.jit, static_argnums=0) def update( self, params: hk.Params, opt_state: optax.OptState, trajs: Transition, ) -> Tuple[hk.Params, optax.OptState]: g = jax.grad(self._agent.loss)(params, trajs) updates, new_opt_state = self._opt_update(g, opt_state) return optax.apply_updates(params, updates), new_opt_state def run(*, trajectories_per_actor, num_actors, unroll_len): """Runs the example.""" # Construct the agent network. We need a sample environment for its spec. env = catch.Catch() num_actions = env.action_spec().num_values net = hk.without_apply_rng( hk.transform(lambda ts: SimpleNet(num_actions)(ts))) # pylint: disable=unnecessary-lambda # Construct the agent and learner. agent = Agent(net.apply) opt = optax.rmsprop(5e-3, decay=0.99, eps=1e-7) learner = Learner(agent, opt.update) # Initialize the optimizer state. sample_ts = env.reset() sample_ts = preprocess_step(sample_ts) ts_with_batch = jax.tree_map(lambda t: np.expand_dims(t, 0), sample_ts) params = jax.jit(net.init)(jax.random.PRNGKey(428), ts_with_batch) opt_state = opt.init(params) # Create accessor and queueing functions. current_params = lambda: params batch_size = 2 q = queue.Queue(maxsize=batch_size) def dequeue(): batch = [] for _ in range(batch_size): batch.append(q.get()) batch = jax.tree_multimap(lambda *ts: np.stack(ts, axis=1), *batch) return jax.device_put(batch) # Start the actors. for i in range(num_actors): key = jax.random.PRNGKey(i) args = (agent, key, current_params, q.put, unroll_len, trajectories_per_actor) threading.Thread(target=run_actor, args=args).start() # Run the learner. num_steps = num_actors * trajectories_per_actor // batch_size for i in range(num_steps): traj = dequeue() params, opt_state = learner.update(params, opt_state, traj) def main(_): run(trajectories_per_actor=500, num_actors=2, unroll_len=20) if __name__ == '__main__': app.run(main)
qt.py
from .tc_plugins import TCPlugin from .tools import TOS, HandlerTwoFactor from .tc_requests import tc_requests import os import sys import threading from functools import partial from PyQt5.QtGui import QPixmap from PyQt5.QtWidgets import (QVBoxLayout, QLabel, QGridLayout, QHBoxLayout, QRadioButton, QCheckBox, QLineEdit) from electrum.plugin import hook from electrum.i18n import _ from electrum.util import is_valid_email from electrum.gui.qt.util import (read_QIcon, WindowModalDialog, WaitingDialog, OkButton, CancelButton, Buttons, icon_path, WWLabel, CloseButton, EnterButton, HelpLabel) from electrum.gui.qt.amountedit import AmountEdit from electrum.gui.qt.qrcodewidget import QRCodeWidget from electrum.gui.qt.installwizard import InstallWizard from electrum.gui.qt.main_window import StatusBarButton from electrum.base_wizard import GoBack class Plugin(TCPlugin): def __init__(self, parent, config, name): super().__init__(parent, config, name) def requires_settings(self): # Return True to add a Settings button. return True def settings_widget(self, window): # Return a button that when pressed presents a settings dialog. return EnterButton(_('Settings'), partial(self.settings_dialog, window)) def settings_dialog(self, window): # Return a settings dialog. d = WindowModalDialog(window, _("Email settings")) vbox = QVBoxLayout(d) d.setMinimumSize(500, 200) vbox.addStretch() vbox.addLayout(Buttons(CloseButton(d), OkButton(d))) d.show() def accept_terms_of_use(self, window): vbox = QVBoxLayout() vbox.addWidget(QLabel(_("Terms of Service"))) tos_e = TOS() tos_e.setReadOnly(True) vbox.addWidget(tos_e) tos_received = False vbox.addWidget(QLabel(_("Please enter your e-mail address"))) email_e = QLineEdit() vbox.addWidget(email_e) next_button = window.next_button prior_button_text = next_button.text() next_button.setText(_('Accept')) def request_TOS(): self.TOS = '====================================ABC' tos_e.tos_signal.emit() def on_result(): tos_e.setText(self.TOS) nonlocal tos_received tos_received = True set_enabled() def on_error(msg): window.show_error(str(msg)) window.terminate() def set_enabled(): next_button.setEnabled(tos_received and is_valid_email(email_e.text())) tos_e.tos_signal.connect(on_result) tos_e.error_signal.connect(on_error) t = threading.Thread(target=request_TOS) t.setDaemon(True) t.start() email_e.textChanged.connect(set_enabled) email_e.setFocus(True) window.exec_layout(vbox, next_enabled=False) next_button.setText(prior_button_text) email = str(email_e.text()) self.create_remote_key(email, window) def request_otp_dialog(self, window, short_id, otp_secret, xpub3): vbox = QVBoxLayout() if otp_secret is not None: uri = "otpauth://hotp/%s?secret=%s" % ('探诚科技', otp_secret) l = QLabel( "Please scan the following QR code in Google Authenticator. You may as well use the following key: %s" % otp_secret) l.setWordWrap(True) vbox.addWidget(l) qrw = QRCodeWidget(uri) vbox.addWidget(qrw, 1) msg = _('Then, enter your Google Authenticator code:') else: label = QLabel( "This wallet is already registered with TrustedCoin. " "To finalize wallet creation, please enter your Google Authenticator Code. " ) label.setWordWrap(1) vbox.addWidget(label) msg = _('Google Authenticator code:') hbox = QHBoxLayout() hbox.addWidget(WWLabel(msg)) pw = AmountEdit(None, is_int=True) pw.setFocus(True) pw.setMaximumWidth(50) hbox.addWidget(pw) vbox.addLayout(hbox) cb_lost = QCheckBox(_("I have lost my Google Authenticator account")) cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed.")) vbox.addWidget(cb_lost) cb_lost.setVisible(otp_secret is None) def set_enabled(): b = True if cb_lost.isChecked() else len(pw.text()) == 6 window.next_button.setEnabled(b) pw.textChanged.connect(set_enabled) cb_lost.toggled.connect(set_enabled) window.exec_layout(vbox, next_enabled=False, raise_on_cancel=False) self.check_otp(window, short_id, otp_secret, xpub3, pw.get_amount(), cb_lost.isChecked()) @hook def abort_send(self, window): """ Called when the abort dialog is displayed prior to broadcasting a transaction. Args: window: electrum_gui.qt.main_window.ElectrumWindow """ wallet = window.wallet if not isinstance(wallet, self.wallet_class): return if wallet.can_sign_without_server(): return if wallet.billing_info is None: self.waiting_dialog_for_billing_info(window) return True return False def waiting_dialog_for_billing_info(self, window, *, on_finished=None): def task(): return self.request_billing_info(window.wallet, suppress_connection_error=False) def on_error(exc_info): e = exc_info[1] window.show_error("{header}\n{exc}\n\n{tor}" .format(header=_('Error getting TrustedCoin account info.'), exc=str(e), tor=_('If you keep experiencing network problems, try using a Tor proxy.'))) return WaitingDialog(parent=window, message=_('Requesting account info from TrustedCoin server...'), task=task, on_success=on_finished, on_error=on_error) def prompt_user_for_otp(self, wallet, tx, on_success, on_failure): wallet.handler_2fa.prompt_user_for_otp(wallet, tx, on_success, on_failure) @hook def on_new_window(self, window): wallet = window.wallet if not isinstance(wallet, self.wallet_class): return wallet.handler_2fa = HandlerTwoFactor(self, window) if wallet.can_sign_without_server(): msg = ' '.join([ _('This wallet was restored from seed, and it contains two master private keys.'), _('Therefore, two-factor authentication is disabled.') ]) action = lambda: window.show_message(msg) else: action = partial(self.settings_dialog, window) button = StatusBarButton(read_QIcon("tc.jpeg"), _("TC"), action) window.statusBar().addPermanentWidget(button) self.start_request_thread(window.wallet) def auth_dialog(self, window): d = WindowModalDialog(window, _("Authorization")) vbox = QVBoxLayout(d) pw = AmountEdit(None, is_int=True) msg = _('Please enter your Google Authenticator code') vbox.addWidget(QLabel(msg)) grid = QGridLayout() grid.setSpacing(8) grid.addWidget(QLabel(_('Code')), 1, 0) grid.addWidget(pw, 1, 1) vbox.addLayout(grid) msg = _( 'If you have lost your second factor, you need to restore your wallet from seed in order to request a new code.') label = QLabel(msg) label.setWordWrap(1) vbox.addWidget(label) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return return pw.get_amount() def go_online_dialog(self, wizard: InstallWizard): msg = [ _("Your wallet123456789 file is : {}.").format(os.path.abspath(wizard.path)), _("You need to be online in order to complete the creation of " "your wallet. If you generated your seed on an offline " 'computer, click on "{}" to close this window, move your ' "wallet file to an online computer, and reopen it with " "Electrum.").format(_('Cancel')), _('If you are online, click on "{}" to continue.').format(_('Next')) ] msg = '\n\n'.join(msg) wizard.reset_stack() try: wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('accept_terms_of_use')) except GoBack: # user clicked 'Cancel' and decided to move wallet file manually wizard.create_storage(wizard.path) raise def show_settings_dialog(self, window, success): if not success: window.show_message(_('Server not reachable.')) return wallet = window.wallet d = WindowModalDialog(window, _("TrustedCoin Information")) d.setMinimumSize(500, 200) vbox = QVBoxLayout(d) hbox = QHBoxLayout() logo = QLabel() logo.setPixmap(QPixmap(icon_path("tc.jpeg"))) msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>' \ + _( "For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>" label = QLabel(msg) label.setOpenExternalLinks(1) hbox.addStretch(10) hbox.addWidget(logo) hbox.addStretch(10) hbox.addWidget(label) hbox.addStretch(10) vbox.addLayout(hbox) vbox.addStretch(10) msg = _( 'TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction every time you run out of prepaid transactions.') + '<br/>' label = QLabel(msg) label.setWordWrap(1) vbox.addWidget(label) vbox.addStretch(10) grid = QGridLayout() vbox.addLayout(grid) price_per_tx = wallet.price_per_tx n_prepay = wallet.num_prepay(self.config) i = 0 for k, v in sorted(price_per_tx.items()): if k == 1: continue grid.addWidget(QLabel("Pay every %d transactions:" % k), i, 0) grid.addWidget(QLabel(window.format_amount(v / k) + ' ' + window.base_unit() + "/tx"), i, 1) b = QRadioButton() b.setChecked(k == n_prepay) b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True)) grid.addWidget(b, i, 2) i += 1 n = wallet.billing_info.get('tx_remaining', 0) grid.addWidget(QLabel(_("Your wallet has {} prepaid transactions.").format(n)), i, 0) vbox.addLayout(Buttons(CloseButton(d))) d.exec_()
image_publisher_node.py
#!/usr/bin/env python from __future__ import print_function import roslib import sys import rospy import cv2 import os from std_msgs.msg import String from sensor_msgs.msg import Image from sensor_msgs.msg import CameraInfo from cv_bridge import CvBridge, CvBridgeError #from camera_calibration_parsers import readCalibration from subprocess import call import subprocess #from image_publisher.srv import set_gain, set_exposure import threading import time from dynamic_reconfigure.server import Server from image_publisher.cfg import camera_cfgsConfig from utils import * def cfgs_callback(config, level): set_manual(camera_id,camera_id) set_gain(camera_id,config.gain) set_exposure(camera_id,config.exposure) return config def handle_set_gain(req): set_gain(camera_id,req.gain) print('The gain for the camera'+str(camera_id)+' has been set to:'+str(req.gain)) return True def handle_set_exposure(req): set_exposure(camera_id,req.exposure) print('The exposure for the camera'+str(camera_id)+' has been set to:'+str(req.exposure)) return True rospy.init_node('general_image_publisher') print("Node Initialized") #Load the node paramters param_manager=paramManager() params=param_manager.load_params() #Image Publisher Subsystem # calib_load=(True if params['~path_to_calib_file'] is not None else False) ipm = imgPubliherMachine(params) #Image Publisher Thread if params['~publisher_mode']=='camera': img_loop=cameraLoop(params['~camera_id'],params['~fps'], ipm) elif params['~publisher_mode']=='dir': img_loop=directoryLoop(params['~img_dir_path'], params['~fps'], ipm, replay=params['~replay']) publisher_thread=threading.Thread(target=img_loop.loop) #Initialize the Service Server # gain_service=rospy.Service('set_gain',set_gain,handle_set_gain) # gain_service=rospy.Service('set_exposure',set_exposure,handle_set_exposure) # server=Server(camera_cfgsConfig,cfgs_callback) try: publisher_thread.start() rospy.spin() #if the ros goes down, the image capruring thread will have to terminate img_loop.kill=True publisher_thread.join() except KeyboardInterrupt: print("Shutting down")
observer.py
import difflib import multiprocessing as mp import time import requests import ujson as json from artemis_utils import get_logger from artemis_utils.constants import CONFIGURATION_HOST from artemis_utils.envvars import REST_PORT from tornado.ioloop import IOLoop from tornado.web import Application from tornado.web import RequestHandler from watchdog.events import FileSystemEventHandler from watchdog.observers import Observer as WatchObserver # logger log = get_logger() # shared memory object locks shared_memory_locks = {"data_worker": mp.Lock()} # global vars SERVICE_NAME = "fileobserver" class ConfigHandler(RequestHandler): """ REST request handler for configuration. """ def initialize(self, shared_memory_manager_dict): self.shared_memory_manager_dict = shared_memory_manager_dict def get(self): """ Provides current configuration primitives (in the form of a JSON dict) to the requester. Note that autoignore does not have any actual configuration, but we return the following information that it uses to operate: { "dirname": <string>, "filename": <string> } """ self.write( { "dirname": self.shared_memory_manager_dict["dirname"], "filename": self.shared_memory_manager_dict["filename"], } ) def post(self): """ Pseudo-configures fileobserver and responds with a success message. :return: {"success": True | False, "message": < message >} """ self.write({"success": True, "message": "configured"}) class HealthHandler(RequestHandler): """ REST request handler for health checks. """ def initialize(self, shared_memory_manager_dict): self.shared_memory_manager_dict = shared_memory_manager_dict def get(self): """ Extract the status of a service via a GET request. :return: {"status" : <unconfigured|running|stopped><,reconfiguring>} """ status = "stopped" shared_memory_locks["data_worker"].acquire() if self.shared_memory_manager_dict["data_worker_running"]: status = "running" shared_memory_locks["data_worker"].release() if self.shared_memory_manager_dict["service_reconfiguring"]: status += ",reconfiguring" self.write({"status": status}) class ControlHandler(RequestHandler): """ REST request handler for control commands. """ def initialize(self, shared_memory_manager_dict): self.shared_memory_manager_dict = shared_memory_manager_dict def start_data_worker(self): shared_memory_locks["data_worker"].acquire() if self.shared_memory_manager_dict["data_worker_running"]: log.info("data worker already running") shared_memory_locks["data_worker"].release() return "already running" shared_memory_locks["data_worker"].release() mp.Process(target=self.run_data_worker_process).start() return "instructed to start" def run_data_worker_process(self): shared_memory_locks["data_worker"].acquire() observer = WatchObserver() try: event_handler = Handler( self.shared_memory_manager_dict["dirname"], self.shared_memory_manager_dict["filename"], ) observer.schedule( event_handler, self.shared_memory_manager_dict["dirname"], recursive=False, ) observer.start() self.shared_memory_manager_dict["data_worker_running"] = True shared_memory_locks["data_worker"].release() log.info("data worker started") while True: time.sleep(5) if not self.shared_memory_manager_dict["data_worker_running"]: break except Exception: log.exception("exception") shared_memory_locks["data_worker"].release() finally: observer.stop() observer.join() shared_memory_locks["data_worker"].acquire() self.shared_memory_manager_dict["data_worker_running"] = False shared_memory_locks["data_worker"].release() log.info("data worker stopped") def stop_data_worker(self): shared_memory_locks["data_worker"].acquire() self.shared_memory_manager_dict["data_worker_running"] = False shared_memory_locks["data_worker"].release() message = "instructed to stop" return message def post(self): """ Instruct a service to start or stop by posting a command. Sample request body { "command": <start|stop> } :return: {"success": True|False, "message": <message>} """ try: msg = json.loads(self.request.body) command = msg["command"] # start/stop data_worker if command == "start": message = self.start_data_worker() self.write({"success": True, "message": message}) elif command == "stop": message = self.stop_data_worker() self.write({"success": True, "message": message}) else: self.write({"success": False, "message": "unknown command"}) except Exception: log.exception("Exception") self.write({"success": False, "message": "error during control"}) class FileObserver: """ FileObserver REST Service. """ def __init__(self): # initialize shared memory shared_memory_manager = mp.Manager() self.shared_memory_manager_dict = shared_memory_manager.dict() self.shared_memory_manager_dict["data_worker_running"] = False self.shared_memory_manager_dict["service_reconfiguring"] = False self.shared_memory_manager_dict["dirname"] = "/etc/artemis" self.shared_memory_manager_dict["filename"] = "config.yaml" log.info("service initiated") def make_rest_app(self): return Application( [ ( "/config", ConfigHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict), ), ( "/control", ControlHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict), ), ( "/health", HealthHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict), ), ] ) def start_rest_app(self): app = self.make_rest_app() app.listen(REST_PORT) log.info("REST worker started and listening to port {}".format(REST_PORT)) IOLoop.current().start() class Handler(FileSystemEventHandler): def __init__(self, d, fn): super().__init__() self.response = None self.path = "{}/{}".format(d, fn) try: with open(self.path, "r") as f: self.content = f.readlines() except Exception: log.exception("exception") def on_modified(self, event): if event.is_directory: return None if event.src_path == self.path: self.check_changes() def on_moved(self, event): if event.is_directory: return None if event.dest_path == self.path: self.check_changes() def check_changes(self): with open(self.path, "r") as f: content = f.readlines() changes = "".join(difflib.unified_diff(self.content, content)) if changes: try: r = requests.post( url="http://{}:{}/config".format(CONFIGURATION_HOST, REST_PORT), data=json.dumps( {"type": "yaml", "content": content, "origin": "fileobserver"} ), ) response = r.json() if response["success"]: if response["message"] == "ignored": text = "new configuration ok but ignored (no need to change)" else: text = "new configuration accepted:\n{}".format(changes) log.info(text) self.content = content else: log.error( "invalid configuration due to error '{}':\n{}".format( response["message"], content ) ) except Exception: log.error( "could not send configuration to service '{}'".format( CONFIGURATION_HOST ) ) def make_app(): return Application( [ ("/config", ConfigHandler), ("/control", ControlHandler), ("/health", HealthHandler), ] ) def main(): # initiate file observer service with REST fileObserverService = FileObserver() # start REST within main process fileObserverService.start_rest_app() if __name__ == "__main__": main()
geoprocessing.py
# coding=UTF-8 """A collection of raster and vector algorithms and utilities.""" import collections import functools import logging import math import os import pprint import queue import shutil import sys import tempfile import threading import time from . import geoprocessing_core from .geoprocessing_core import DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS from .geoprocessing_core import DEFAULT_OSR_AXIS_MAPPING_STRATEGY from osgeo import gdal from osgeo import ogr from osgeo import osr import numpy import numpy.ma import rtree import scipy.interpolate import scipy.ndimage import scipy.signal import scipy.signal.signaltools import scipy.sparse import shapely.ops import shapely.prepared import shapely.wkb # This is used to efficiently pass data to the raster stats worker if available if sys.version_info >= (3, 8): import multiprocessing.shared_memory class ReclassificationMissingValuesError(Exception): """Raised when a raster value is not a valid key to a dictionary. Attributes: msg (str) - error message missing_values (list) - a list of the missing values from the raster that are not keys in the dictionary """ def __init__(self, msg, missing_values): """See Attributes for args docstring.""" self.msg = msg self.missing_values = missing_values super().__init__(msg, missing_values) LOGGER = logging.getLogger(__name__) # Used in joining finished TaskGraph Tasks. _MAX_TIMEOUT = 60.0 _VALID_GDAL_TYPES = ( set([getattr(gdal, x) for x in dir(gdal.gdalconst) if 'GDT_' in x])) _LOGGING_PERIOD = 5.0 # min 5.0 seconds per update log message for the module _LARGEST_ITERBLOCK = 2**16 # largest block for iterblocks to read in cells _GDAL_TYPE_TO_NUMPY_LOOKUP = { gdal.GDT_Byte: numpy.uint8, gdal.GDT_Int16: numpy.int16, gdal.GDT_Int32: numpy.int32, gdal.GDT_UInt16: numpy.uint16, gdal.GDT_UInt32: numpy.uint32, gdal.GDT_Float32: numpy.float32, gdal.GDT_Float64: numpy.float64, gdal.GDT_CFloat32: numpy.csingle, gdal.GDT_CFloat64: numpy.complex64, } def raster_calculator( base_raster_path_band_const_list, local_op, target_raster_path, datatype_target, nodata_target, calc_raster_stats=True, use_shared_memory=False, largest_block=_LARGEST_ITERBLOCK, max_timeout=_MAX_TIMEOUT, raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS): """Apply local a raster operation on a stack of rasters. This function applies a user defined function across a stack of rasters' pixel stack. The rasters in ``base_raster_path_band_list`` must be spatially aligned and have the same cell sizes. Args: base_raster_path_band_const_list (sequence): a sequence containing: * ``(str, int)`` tuples, referring to a raster path/band index pair to use as an input. * ``numpy.ndarray`` s of up to two dimensions. These inputs must all be broadcastable to each other AND the size of the raster inputs. * ``(object, 'raw')`` tuples, where ``object`` will be passed directly into the ``local_op``. All rasters must have the same raster size. If only arrays are input, numpy arrays must be broadcastable to each other and the final raster size will be the final broadcast array shape. A value error is raised if only "raw" inputs are passed. local_op (function): a function that must take in as many parameters as there are elements in ``base_raster_path_band_const_list``. The parameters in ``local_op`` will map 1-to-1 in order with the values in ``base_raster_path_band_const_list``. ``raster_calculator`` will call ``local_op`` to generate the pixel values in ``target_raster`` along memory block aligned processing windows. Note any particular call to ``local_op`` will have the arguments from ``raster_path_band_const_list`` sliced to overlap that window. If an argument from ``raster_path_band_const_list`` is a raster/path band tuple, it will be passed to ``local_op`` as a 2D numpy array of pixel values that align with the processing window that ``local_op`` is targeting. A 2D or 1D array will be sliced to match the processing window and in the case of a 1D array tiled in whatever dimension is flat. If an argument is a scalar it is passed as as scalar. The return value must be a 2D array of the same size as any of the input parameter 2D arrays and contain the desired pixel values for the target raster. target_raster_path (string): the path of the output raster. The projection, size, and cell size will be the same as the rasters in ``base_raster_path_const_band_list`` or the final broadcast size of the constant/ndarray values in the list. datatype_target (gdal datatype; int): the desired GDAL output type of the target raster. nodata_target (numerical value): the desired nodata value of the target raster. calc_raster_stats (boolean): If True, calculates and sets raster statistics (min, max, mean, and stdev) for target raster. use_shared_memory (boolean): If True, uses Python Multiprocessing shared memory to calculate raster stats for faster performance. This feature is available for Python >= 3.8 and will otherwise be ignored for earlier versions of Python. largest_block (int): Attempts to internally iterate over raster blocks with this many elements. Useful in cases where the blocksize is relatively small, memory is available, and the function call overhead dominates the iteration. Defaults to 2**20. A value of anything less than the original blocksize of the raster will result in blocksizes equal to the original size. max_timeout (float): amount of time in seconds to wait for stats worker thread to join. Default is _MAX_TIMEOUT. raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver name string as the first element and a GDAL creation options tuple/list as the second. Defaults to geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS. Return: None Raises: ValueError: invalid input provided """ if not base_raster_path_band_const_list: raise ValueError( "`base_raster_path_band_const_list` is empty and " "should have at least one value.") # It's a common error to not pass in path/band tuples, so check for that # and report error if so bad_raster_path_list = False if not isinstance(base_raster_path_band_const_list, (list, tuple)): bad_raster_path_list = True else: for value in base_raster_path_band_const_list: if (not _is_raster_path_band_formatted(value) and not isinstance(value, numpy.ndarray) and not (isinstance(value, tuple) and len(value) == 2 and value[1] == 'raw')): bad_raster_path_list = True break if bad_raster_path_list: raise ValueError( "Expected a sequence of path / integer band tuples, " "ndarrays, or (value, 'raw') pairs for " "`base_raster_path_band_const_list`, instead got: " "%s" % pprint.pformat(base_raster_path_band_const_list)) # check that any rasters exist on disk and have enough bands not_found_paths = [] gdal.PushErrorHandler('CPLQuietErrorHandler') base_raster_path_band_list = [ path_band for path_band in base_raster_path_band_const_list if _is_raster_path_band_formatted(path_band)] for value in base_raster_path_band_list: if gdal.OpenEx(value[0], gdal.OF_RASTER) is None: not_found_paths.append(value[0]) gdal.PopErrorHandler() if not_found_paths: raise ValueError( "The following files were expected but do not exist on the " "filesystem: " + str(not_found_paths)) # check that band index exists in raster invalid_band_index_list = [] for value in base_raster_path_band_list: raster = gdal.OpenEx(value[0], gdal.OF_RASTER) if not (1 <= value[1] <= raster.RasterCount): invalid_band_index_list.append(value) raster = None if invalid_band_index_list: raise ValueError( "The following rasters do not contain requested band " "indexes: %s" % invalid_band_index_list) # check that the target raster is not also an input raster if target_raster_path in [x[0] for x in base_raster_path_band_list]: raise ValueError( "%s is used as a target path, but it is also in the base input " "path list %s" % ( target_raster_path, str(base_raster_path_band_const_list))) # check that raster inputs are all the same dimensions raster_info_list = [ get_raster_info(path_band[0]) for path_band in base_raster_path_band_const_list if _is_raster_path_band_formatted(path_band)] geospatial_info_set = set() for raster_info in raster_info_list: geospatial_info_set.add(raster_info['raster_size']) if len(geospatial_info_set) > 1: raise ValueError( "Input Rasters are not the same dimensions. The " "following raster are not identical %s" % str( geospatial_info_set)) numpy_broadcast_list = [ x for x in base_raster_path_band_const_list if isinstance(x, numpy.ndarray)] stats_worker_thread = None try: # numpy.broadcast can only take up to 32 arguments, this loop works # around that restriction: while len(numpy_broadcast_list) > 1: numpy_broadcast_list = ( [numpy.broadcast(*numpy_broadcast_list[:32])] + numpy_broadcast_list[32:]) if numpy_broadcast_list: numpy_broadcast_size = numpy_broadcast_list[0].shape except ValueError: # this gets raised if numpy.broadcast fails raise ValueError( "Numpy array inputs cannot be broadcast into a single shape %s" % numpy_broadcast_list) if numpy_broadcast_list and len(numpy_broadcast_list[0].shape) > 2: raise ValueError( "Numpy array inputs must be 2 dimensions or less %s" % numpy_broadcast_list) # if there are both rasters and arrays, check the numpy shape will # be broadcastable with raster shape if raster_info_list and numpy_broadcast_list: # geospatial lists x/y order and numpy does y/x so reverse size list raster_shape = tuple(reversed(raster_info_list[0]['raster_size'])) invalid_broadcast_size = False if len(numpy_broadcast_size) == 1: # if there's only one dimension it should match the last # dimension first, in the raster case this is the columns # because of the row/column order of numpy. No problem if # that value is ``1`` because it will be broadcast, otherwise # it should be the same as the raster. if (numpy_broadcast_size[0] != raster_shape[1] and numpy_broadcast_size[0] != 1): invalid_broadcast_size = True else: for dim_index in range(2): # no problem if 1 because it'll broadcast, otherwise must # be the same value if (numpy_broadcast_size[dim_index] != raster_shape[dim_index] and numpy_broadcast_size[dim_index] != 1): invalid_broadcast_size = True if invalid_broadcast_size: raise ValueError( "Raster size %s cannot be broadcast to numpy shape %s" % ( raster_shape, numpy_broadcast_size)) # create a "canonical" argument list that's bands, 2d numpy arrays, or # raw values only base_canonical_arg_list = [] base_raster_list = [] base_band_list = [] for value in base_raster_path_band_const_list: # the input has been tested and value is either a raster/path band # tuple, 1d ndarray, 2d ndarray, or (value, 'raw') tuple. if _is_raster_path_band_formatted(value): # it's a raster/path band, keep track of open raster and band # for later so we can `None` them. base_raster_list.append(gdal.OpenEx(value[0], gdal.OF_RASTER)) base_band_list.append( base_raster_list[-1].GetRasterBand(value[1])) base_canonical_arg_list.append(base_band_list[-1]) elif isinstance(value, numpy.ndarray): if value.ndim == 1: # easier to process as a 2d array for writing to band base_canonical_arg_list.append( value.reshape((1, value.shape[0]))) else: # dimensions are two because we checked earlier. base_canonical_arg_list.append(value) elif isinstance(value, tuple): base_canonical_arg_list.append(value) else: raise ValueError( "An unexpected ``value`` occurred. This should never happen. " "Value: %r" % value) # create target raster if raster_info_list: # if rasters are passed, the target is the same size as the raster n_cols, n_rows = raster_info_list[0]['raster_size'] elif numpy_broadcast_list: # numpy arrays in args and no raster result is broadcast shape # expanded to two dimensions if necessary if len(numpy_broadcast_size) == 1: n_rows, n_cols = 1, numpy_broadcast_size[0] else: n_rows, n_cols = numpy_broadcast_size else: raise ValueError( "Only (object, 'raw') values have been passed. Raster " "calculator requires at least a raster or numpy array as a " "parameter. This is the input list: %s" % pprint.pformat( base_raster_path_band_const_list)) if datatype_target not in _VALID_GDAL_TYPES: raise ValueError( 'Invalid target type, should be a gdal.GDT_* type, received ' '"%s"' % datatype_target) # create target raster raster_driver = gdal.GetDriverByName(raster_driver_creation_tuple[0]) try: os.makedirs(os.path.dirname(target_raster_path)) except OSError: pass target_raster = raster_driver.Create( target_raster_path, n_cols, n_rows, 1, datatype_target, options=raster_driver_creation_tuple[1]) target_band = target_raster.GetRasterBand(1) if nodata_target is not None: target_band.SetNoDataValue(nodata_target) if base_raster_list: # use the first raster in the list for the projection and geotransform target_raster.SetProjection(base_raster_list[0].GetProjection()) target_raster.SetGeoTransform(base_raster_list[0].GetGeoTransform()) target_band.FlushCache() target_raster.FlushCache() try: last_time = time.time() block_offset_list = list(iterblocks( (target_raster_path, 1), offset_only=True, largest_block=largest_block)) if calc_raster_stats: # if this queue is used to send computed valid blocks of # the raster to an incremental statistics calculator worker stats_worker_queue = queue.Queue() exception_queue = queue.Queue() if sys.version_info >= (3, 8): # The stats worker keeps running variables as a float64, so # all input rasters are dtype float64 -- make the shared memory # size equivalent. block_size_bytes = ( numpy.dtype(numpy.float64).itemsize * block_offset_list[0]['win_xsize'] * block_offset_list[0]['win_ysize']) shared_memory = multiprocessing.shared_memory.SharedMemory( create=True, size=block_size_bytes) else: stats_worker_queue = None if calc_raster_stats: # To avoid doing two passes on the raster to calculate standard # deviation, we implement a continuous statistics calculation # as the raster is computed. This computational effort is high # and benefits from running in parallel. This queue and worker # takes a valid block of a raster and incrementally calculates # the raster's statistics. When ``None`` is pushed to the queue # the worker will finish and return a (min, max, mean, std) # tuple. LOGGER.info('starting stats_worker') stats_worker_thread = threading.Thread( target=geoprocessing_core.stats_worker, args=(stats_worker_queue, len(block_offset_list))) stats_worker_thread.daemon = True stats_worker_thread.start() LOGGER.info('started stats_worker %s', stats_worker_thread) pixels_processed = 0 n_pixels = n_cols * n_rows # iterate over each block and calculate local_op for block_offset in block_offset_list: # read input blocks offset_list = (block_offset['yoff'], block_offset['xoff']) blocksize = (block_offset['win_ysize'], block_offset['win_xsize']) data_blocks = [] for value in base_canonical_arg_list: if isinstance(value, gdal.Band): data_blocks.append(value.ReadAsArray(**block_offset)) # I've encountered the following error when a gdal raster # is corrupt, often from multiple threads writing to the # same file. This helps to catch the error early rather # than lead to confusing values of ``data_blocks`` later. if not isinstance(data_blocks[-1], numpy.ndarray): raise ValueError( f"got a {data_blocks[-1]} when trying to read " f"{value.GetDataset().GetFileList()} at " f"{block_offset}, expected numpy.ndarray.") elif isinstance(value, numpy.ndarray): # must be numpy array and all have been conditioned to be # 2d, so start with 0:1 slices and expand if possible slice_list = [slice(0, 1)] * 2 tile_dims = list(blocksize) for dim_index in [0, 1]: if value.shape[dim_index] > 1: slice_list[dim_index] = slice( offset_list[dim_index], offset_list[dim_index] + blocksize[dim_index],) tile_dims[dim_index] = 1 data_blocks.append( numpy.tile(value[tuple(slice_list)], tile_dims)) else: # must be a raw tuple data_blocks.append(value[0]) target_block = local_op(*data_blocks) if (not isinstance(target_block, numpy.ndarray) or target_block.shape != blocksize): raise ValueError( "Expected `local_op` to return a numpy.ndarray of " "shape %s but got this instead: %s" % ( blocksize, target_block)) target_band.WriteArray( target_block, yoff=block_offset['yoff'], xoff=block_offset['xoff']) # send result to stats calculator if stats_worker_queue: # guard against an undefined nodata target if nodata_target is not None: target_block = target_block[target_block != nodata_target] target_block = target_block.astype(numpy.float64).flatten() if sys.version_info >= (3, 8) and use_shared_memory: shared_memory_array = numpy.ndarray( target_block.shape, dtype=target_block.dtype, buffer=shared_memory.buf) shared_memory_array[:] = target_block[:] stats_worker_queue.put(( shared_memory_array.shape, shared_memory_array.dtype, shared_memory)) else: stats_worker_queue.put(target_block) pixels_processed += blocksize[0] * blocksize[1] last_time = _invoke_timed_callback( last_time, lambda: LOGGER.info( '%.1f%% complete', float(pixels_processed) / n_pixels * 100.0), _LOGGING_PERIOD) LOGGER.info('100.0% complete') if calc_raster_stats: LOGGER.info("Waiting for raster stats worker result.") stats_worker_thread.join(max_timeout) if stats_worker_thread.is_alive(): LOGGER.error("stats_worker_thread.join() timed out") raise RuntimeError("stats_worker_thread.join() timed out") payload = stats_worker_queue.get(True, max_timeout) if payload is not None: target_min, target_max, target_mean, target_stddev = payload target_band.SetStatistics( float(target_min), float(target_max), float(target_mean), float(target_stddev)) target_band.FlushCache() except Exception: LOGGER.exception('exception encountered in raster_calculator') raise finally: # This block ensures that rasters are destroyed even if there's an # exception raised. base_band_list[:] = [] base_raster_list[:] = [] target_band.FlushCache() target_band = None target_raster.FlushCache() target_raster = None if calc_raster_stats and stats_worker_thread: if stats_worker_thread.is_alive(): stats_worker_queue.put(None, True, max_timeout) LOGGER.info("Waiting for raster stats worker result.") stats_worker_thread.join(max_timeout) if stats_worker_thread.is_alive(): LOGGER.error("stats_worker_thread.join() timed out") raise RuntimeError( "stats_worker_thread.join() timed out") if sys.version_info >= (3, 8) and use_shared_memory: LOGGER.debug( f'unlink shared memory for process {os.getpid()}') shared_memory.close() shared_memory.unlink() LOGGER.debug( f'unlinked shared memory for process {os.getpid()}') # check for an exception in the workers, otherwise get result # and pass to writer try: exception = exception_queue.get_nowait() LOGGER.error("Exception encountered at termination.") raise exception except queue.Empty: pass def align_and_resize_raster_stack( base_raster_path_list, target_raster_path_list, resample_method_list, target_pixel_size, bounding_box_mode, base_vector_path_list=None, raster_align_index=None, base_projection_wkt_list=None, target_projection_wkt=None, vector_mask_options=None, gdal_warp_options=None, raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS, osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY): """Generate rasters from a base such that they align geospatially. This function resizes base rasters that are in the same geospatial projection such that the result is an aligned stack of rasters that have the same cell size, dimensions, and bounding box. This is achieved by clipping or resizing the rasters to intersected, unioned, or equivocated bounding boxes of all the raster and vector input. Args: base_raster_path_list (sequence): a sequence of base raster paths that will be transformed and will be used to determine the target bounding box. target_raster_path_list (sequence): a sequence of raster paths that will be created to one-to-one map with ``base_raster_path_list`` as aligned versions of those original rasters. If there are duplicate paths in this list, the function will raise a ValueError. resample_method_list (sequence): a sequence of resampling methods which one to one map each path in ``base_raster_path_list`` during resizing. Each element must be one of "near|bilinear|cubic|cubicspline|lanczos|mode". target_pixel_size (list/tuple): the target raster's x and y pixel size example: (30, -30). bounding_box_mode (string): one of "union", "intersection", or a sequence of floats of the form [minx, miny, maxx, maxy] in the target projection coordinate system. Depending on the value, output extents are defined as the union, intersection, or the explicit bounding box. base_vector_path_list (sequence): a sequence of base vector paths whose bounding boxes will be used to determine the final bounding box of the raster stack if mode is 'union' or 'intersection'. If mode is 'bb=[...]' then these vectors are not used in any calculation. raster_align_index (int): indicates the index of a raster in ``base_raster_path_list`` that the target rasters' bounding boxes pixels should align with. This feature allows rasters whose raster dimensions are the same, but bounding boxes slightly shifted less than a pixel size to align with a desired grid layout. If ``None`` then the bounding box of the target rasters is calculated as the precise intersection, union, or bounding box. base_projection_wkt_list (sequence): if not None, this is a sequence of base projections of the rasters in ``base_raster_path_list``. If a value is ``None`` the ``base_sr`` is assumed to be whatever is defined in that raster. This value is useful if there are rasters with no projection defined, but otherwise known. target_projection_wkt (string): if not None, this is the desired projection of all target rasters in Well Known Text format. If None, the base SRS will be passed to the target. vector_mask_options (dict): optional, if not None, this is a dictionary of options to use an existing vector's geometry to mask out pixels in the target raster that do not overlap the vector's geometry. Keys to this dictionary are: * ``'mask_vector_path'`` (str): path to the mask vector file. This vector will be automatically projected to the target projection if its base coordinate system does not match the target. * ``'mask_layer_name'`` (str): the layer name to use for masking. If this key is not in the dictionary the default is to use the layer at index 0. * ``'mask_vector_where_filter'`` (str): an SQL WHERE string. This will be used to filter the geometry in the mask. Ex: ``'id > 10'`` would use all features whose field value of 'id' is > 10. gdal_warp_options (sequence): if present, the contents of this list are passed to the ``warpOptions`` parameter of ``gdal.Warp``. See the `GDAL Warp documentation <https://gdal.org/api/gdalwarp_cpp.html#_CPPv415GDALWarpOptions>`_ for valid options. raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver name string as the first element and a GDAL creation options tuple/list as the second. Defaults to a GTiff driver tuple defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS. osr_axis_mapping_strategy (int): OSR axis mapping strategy for ``SpatialReference`` objects. Defaults to ``geoprocessing.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This parameter should not be changed unless you know what you are doing. Return: None Raises: ValueError If any combination of the raw bounding boxes, raster bounding boxes, vector bounding boxes, and/or vector_mask bounding box does not overlap to produce a valid target. ValueError If any of the input or target lists are of different lengths. ValueError If there are duplicate paths on the target list which would risk corrupted output. ValueError If some combination of base, target, and embedded source reference systems results in an ambiguous target coordinate system. ValueError If ``vector_mask_options`` is not None but the ``mask_vector_path`` is undefined or doesn't point to a valid file. ValueError If ``pixel_size`` is not a 2 element sequence of numbers. """ # make sure that the input lists are of the same length list_lengths = [ len(base_raster_path_list), len(target_raster_path_list), len(resample_method_list)] if len(set(list_lengths)) != 1: raise ValueError( "base_raster_path_list, target_raster_path_list, and " "resample_method_list must be the same length " " current lengths are %s" % (str(list_lengths))) unique_targets = set(target_raster_path_list) if len(unique_targets) != len(target_raster_path_list): seen = set() duplicate_list = [] for path in target_raster_path_list: if path not in seen: seen.add(path) else: duplicate_list.append(path) raise ValueError( "There are duplicated paths on the target list. This is an " "invalid state of ``target_path_list``. Duplicates: %s" % ( duplicate_list)) # we can accept 'union', 'intersection', or a 4 element list/tuple if bounding_box_mode not in ["union", "intersection"] and ( not isinstance(bounding_box_mode, (list, tuple)) or len(bounding_box_mode) != 4): raise ValueError("Unknown bounding_box_mode %s" % ( str(bounding_box_mode))) n_rasters = len(base_raster_path_list) if ((raster_align_index is not None) and ((raster_align_index < 0) or (raster_align_index >= n_rasters))): raise ValueError( "Alignment index is out of bounds of the datasets index: %s" " n_elements %s" % (raster_align_index, n_rasters)) _assert_is_valid_pixel_size(target_pixel_size) # used to get bounding box, projection, and possible alignment info raster_info_list = [ get_raster_info(path) for path in base_raster_path_list] # get the literal or intersecting/unioned bounding box if isinstance(bounding_box_mode, (list, tuple)): # if it's a sequence or tuple, it must be a manual bounding box LOGGER.debug( "assuming manual bounding box mode of %s", bounding_box_mode) target_bounding_box = bounding_box_mode else: # either intersection or union, get list of bounding boxes, reproject # if necessary, and reduce to a single box if base_vector_path_list is not None: # vectors are only interesting for their bounding boxes, that's # this construction is inside an else. vector_info_list = [ get_vector_info(path) for path in base_vector_path_list] else: vector_info_list = [] raster_bounding_box_list = [] for raster_index, raster_info in enumerate(raster_info_list): # this block calculates the base projection of ``raster_info`` if # ``target_projection_wkt`` is defined, thus implying a # reprojection will be necessary. if target_projection_wkt: if base_projection_wkt_list and \ base_projection_wkt_list[raster_index]: # a base is defined, use that base_raster_projection_wkt = \ base_projection_wkt_list[raster_index] else: # otherwise use the raster's projection and there must # be one since we're reprojecting base_raster_projection_wkt = raster_info['projection_wkt'] if not base_raster_projection_wkt: raise ValueError( "no projection for raster %s" % base_raster_path_list[raster_index]) # since the base spatial reference is potentially different # than the target, we need to transform the base bounding # box into target coordinates so later we can calculate # accurate bounding box overlaps in the target coordinate # system raster_bounding_box_list.append( transform_bounding_box( raster_info['bounding_box'], base_raster_projection_wkt, target_projection_wkt)) else: raster_bounding_box_list.append(raster_info['bounding_box']) # include the vector bounding box information to make a global list # of target bounding boxes bounding_box_list = [ vector_info['bounding_box'] if target_projection_wkt is None else transform_bounding_box( vector_info['bounding_box'], vector_info['projection_wkt'], target_projection_wkt) for vector_info in vector_info_list] + raster_bounding_box_list target_bounding_box = merge_bounding_box_list( bounding_box_list, bounding_box_mode) if vector_mask_options: # ensure the mask exists and intersects with the target bounding box if 'mask_vector_path' not in vector_mask_options: raise ValueError( 'vector_mask_options passed, but no value for ' '"mask_vector_path": %s', vector_mask_options) mask_vector_info = get_vector_info( vector_mask_options['mask_vector_path']) if 'mask_vector_where_filter' in vector_mask_options: # the bounding box only exists for the filtered features mask_vector = gdal.OpenEx( vector_mask_options['mask_vector_path'], gdal.OF_VECTOR) mask_layer = mask_vector.GetLayer() mask_layer.SetAttributeFilter( vector_mask_options['mask_vector_where_filter']) mask_bounding_box = merge_bounding_box_list( [[feature.GetGeometryRef().GetEnvelope()[i] for i in [0, 2, 1, 3]] for feature in mask_layer], 'union') mask_layer = None mask_vector = None else: # if no where filter then use the raw vector bounding box mask_bounding_box = mask_vector_info['bounding_box'] mask_vector_projection_wkt = mask_vector_info['projection_wkt'] if mask_vector_projection_wkt is not None and \ target_projection_wkt is not None: mask_vector_bb = transform_bounding_box( mask_bounding_box, mask_vector_info['projection_wkt'], target_projection_wkt) else: mask_vector_bb = mask_vector_info['bounding_box'] # Calling `merge_bounding_box_list` will raise an ValueError if the # bounding box of the mask and the target do not intersect. The # result is otherwise not used. _ = merge_bounding_box_list( [target_bounding_box, mask_vector_bb], 'intersection') if raster_align_index is not None and raster_align_index >= 0: # bounding box needs alignment align_bounding_box = ( raster_info_list[raster_align_index]['bounding_box']) align_pixel_size = ( raster_info_list[raster_align_index]['pixel_size']) # adjust bounding box so lower left corner aligns with a pixel in # raster[raster_align_index] for index in [0, 1]: n_pixels = int( (target_bounding_box[index] - align_bounding_box[index]) / float(align_pixel_size[index])) target_bounding_box[index] = ( n_pixels * align_pixel_size[index] + align_bounding_box[index]) for index, (base_path, target_path, resample_method) in enumerate(zip( base_raster_path_list, target_raster_path_list, resample_method_list)): warp_raster( base_path, target_pixel_size, target_path, resample_method, target_bb=target_bounding_box, raster_driver_creation_tuple=(raster_driver_creation_tuple), target_projection_wkt=target_projection_wkt, base_projection_wkt=( None if not base_projection_wkt_list else base_projection_wkt_list[index]), vector_mask_options=vector_mask_options, gdal_warp_options=gdal_warp_options) LOGGER.info( '%d of %d aligned: %s', index+1, n_rasters, os.path.basename(target_path)) LOGGER.info("aligned all %d rasters.", n_rasters) def new_raster_from_base( base_path, target_path, datatype, band_nodata_list, fill_value_list=None, n_rows=None, n_cols=None, raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS): """Create new raster by coping spatial reference/geotransform of base. A convenience function to simplify the creation of a new raster from the basis of an existing one. Depending on the input mode, one can create a new raster of the same dimensions, geotransform, and georeference as the base. Other options are provided to change the raster dimensions, number of bands, nodata values, data type, and core raster creation options. Args: base_path (string): path to existing raster. target_path (string): path to desired target raster. datatype: the pixel datatype of the output raster, for example gdal.GDT_Float32. See the following header file for supported pixel types: http://www.gdal.org/gdal_8h.html#22e22ce0a55036a96f652765793fb7a4 band_nodata_list (sequence): list of nodata values, one for each band, to set on target raster. If value is 'None' the nodata value is not set for that band. The number of target bands is inferred from the length of this list. fill_value_list (sequence): list of values to fill each band with. If None, no filling is done. n_rows (int): if not None, defines the number of target raster rows. n_cols (int): if not None, defines the number of target raster columns. raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver name string as the first element and a GDAL creation options tuple/list as the second. Defaults to a GTiff driver tuple defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS. Return: None """ base_raster = gdal.OpenEx(base_path, gdal.OF_RASTER) if n_rows is None: n_rows = base_raster.RasterYSize if n_cols is None: n_cols = base_raster.RasterXSize driver = gdal.GetDriverByName(raster_driver_creation_tuple[0]) local_raster_creation_options = list(raster_driver_creation_tuple[1]) # PIXELTYPE is sometimes used to define signed vs. unsigned bytes and # the only place that is stored is in the IMAGE_STRUCTURE metadata # copy it over if it exists and it not already defined by the input # creation options. It's okay to get this info from the first band since # all bands have the same datatype base_band = base_raster.GetRasterBand(1) metadata = base_band.GetMetadata('IMAGE_STRUCTURE') if 'PIXELTYPE' in metadata and not any( ['PIXELTYPE' in option for option in local_raster_creation_options]): local_raster_creation_options.append( 'PIXELTYPE=' + metadata['PIXELTYPE']) block_size = base_band.GetBlockSize() # It's not clear how or IF we can determine if the output should be # striped or tiled. Here we leave it up to the default inputs or if its # obviously not striped we tile. if not any( ['TILED' in option for option in local_raster_creation_options]): # TILED not set, so lets try to set it to a reasonable value if block_size[0] != n_cols: # if x block is not the width of the raster it *must* be tiled # otherwise okay if it's striped or tiled, I can't construct a # test case to cover this, but there is nothing in the spec that # restricts this so I have it just in case. local_raster_creation_options.append('TILED=YES') if not any( ['BLOCK' in option for option in local_raster_creation_options]): # not defined, so lets copy what we know from the current raster local_raster_creation_options.extend([ 'BLOCKXSIZE=%d' % block_size[0], 'BLOCKYSIZE=%d' % block_size[1]]) # make target directory if it doesn't exist try: os.makedirs(os.path.dirname(target_path)) except OSError: pass base_band = None n_bands = len(band_nodata_list) target_raster = driver.Create( target_path, n_cols, n_rows, n_bands, datatype, options=local_raster_creation_options) target_raster.SetProjection(base_raster.GetProjection()) target_raster.SetGeoTransform(base_raster.GetGeoTransform()) base_raster = None for index, nodata_value in enumerate(band_nodata_list): if nodata_value is None: continue target_band = target_raster.GetRasterBand(index + 1) try: target_band.SetNoDataValue(nodata_value.item()) except AttributeError: target_band.SetNoDataValue(nodata_value) target_raster.FlushCache() last_time = time.time() pixels_processed = 0 n_pixels = n_cols * n_rows if fill_value_list is not None: for index, fill_value in enumerate(fill_value_list): if fill_value is None: continue target_band = target_raster.GetRasterBand(index + 1) # some rasters are very large and a fill can appear to cause # computation to hang. This block, though possibly slightly less # efficient than ``band.Fill`` will give real-time feedback about # how the fill is progressing. for offsets in iterblocks((target_path, 1), offset_only=True): fill_array = numpy.empty( (offsets['win_ysize'], offsets['win_xsize'])) pixels_processed += ( offsets['win_ysize'] * offsets['win_xsize']) fill_array[:] = fill_value target_band.WriteArray( fill_array, offsets['xoff'], offsets['yoff']) last_time = _invoke_timed_callback( last_time, lambda: LOGGER.info( f'filling new raster {target_path} with {fill_value} ' f'-- {float(pixels_processed)/n_pixels*100.0:.2f}% ' f'complete'), _LOGGING_PERIOD) target_band = None target_band = None target_raster = None def create_raster_from_vector_extents( base_vector_path, target_raster_path, target_pixel_size, target_pixel_type, target_nodata, fill_value=None, raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS): """Create a blank raster based on a vector file extent. Args: base_vector_path (string): path to vector shapefile to base the bounding box for the target raster. target_raster_path (string): path to location of generated geotiff; the upper left hand corner of this raster will be aligned with the bounding box of the source vector and the extent will be exactly equal or contained the source vector's bounding box depending on whether the pixel size divides evenly into the source bounding box; if not coordinates will be rounded up to contain the original extent. target_pixel_size (list/tuple): the x/y pixel size as a sequence Example:: [30.0, -30.0] target_pixel_type (int): gdal GDT pixel type of target raster target_nodata (numeric): target nodata value. Can be None if no nodata value is needed. fill_value (int/float): value to fill in the target raster; no fill if value is None raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver name string as the first element and a GDAL creation options tuple/list as the second. Defaults to a GTiff driver tuple defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS. Return: None """ if target_pixel_type not in _VALID_GDAL_TYPES: raise ValueError( f'Invalid target type, should be a gdal.GDT_* type, received ' f'"{target_pixel_type}"') # Determine the width and height of the tiff in pixels based on the # maximum size of the combined envelope of all the features vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR) shp_extent = None for layer_index in range(vector.GetLayerCount()): layer = vector.GetLayer(layer_index) for feature in layer: try: # envelope is [xmin, xmax, ymin, ymax] feature_extent = feature.GetGeometryRef().GetEnvelope() if shp_extent is None: shp_extent = list(feature_extent) else: # expand bounds of current bounding box to include that # of the newest feature shp_extent = [ f(shp_extent[index], feature_extent[index]) for index, f in enumerate([min, max, min, max])] except AttributeError as error: # For some valid OGR objects the geometry can be undefined # since it's valid to have a NULL entry in the attribute table # this is expressed as a None value in the geometry reference # this feature won't contribute LOGGER.warning(error) layer = None if shp_extent is None: raise ValueError( f'the vector at {base_vector_path} has no geometry, cannot ' f'create a raster from these extents') # round up on the rows and cols so that the target raster encloses the # base vector n_cols = int(numpy.ceil( abs((shp_extent[1] - shp_extent[0]) / target_pixel_size[0]))) n_cols = max(1, n_cols) n_rows = int(numpy.ceil( abs((shp_extent[3] - shp_extent[2]) / target_pixel_size[1]))) n_rows = max(1, n_rows) driver = gdal.GetDriverByName(raster_driver_creation_tuple[0]) n_bands = 1 raster = driver.Create( target_raster_path, n_cols, n_rows, n_bands, target_pixel_type, options=raster_driver_creation_tuple[1]) raster.GetRasterBand(1).SetNoDataValue(target_nodata) # Set the transform based on the upper left corner and given pixel # dimensions if target_pixel_size[0] < 0: x_source = shp_extent[1] else: x_source = shp_extent[0] if target_pixel_size[1] < 0: y_source = shp_extent[3] else: y_source = shp_extent[2] raster_transform = [ x_source, target_pixel_size[0], 0.0, y_source, 0.0, target_pixel_size[1]] raster.SetGeoTransform(raster_transform) # Use the same projection on the raster as the shapefile raster.SetProjection(vector.GetLayer(0).GetSpatialRef().ExportToWkt()) # Initialize everything to nodata if fill_value is not None: band = raster.GetRasterBand(1) band.Fill(fill_value) band = None vector = None raster = None def interpolate_points( base_vector_path, vector_attribute_field, target_raster_path_band, interpolation_mode): """Interpolate point values onto an existing raster. Args: base_vector_path (string): path to a shapefile that contains point vector layers. vector_attribute_field (field): a string in the vector referenced at ``base_vector_path`` that refers to a numeric value in the vector's attribute table. This is the value that will be interpolated across the raster. target_raster_path_band (tuple): a path/band number tuple to an existing raster which likely intersects or is nearby the source vector. The band in this raster will take on the interpolated numerical values provided at each point. interpolation_mode (string): the interpolation method to use for scipy.interpolate.griddata, one of 'linear', near', or 'cubic'. Return: None """ source_vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR) point_list = [] value_list = [] for layer_index in range(source_vector.GetLayerCount()): layer = source_vector.GetLayer(layer_index) for point_feature in layer: value = point_feature.GetField(vector_attribute_field) # Add in the numpy notation which is row, col # Here the point geometry is in the form x, y (col, row) geometry = point_feature.GetGeometryRef() point = geometry.GetPoint() point_list.append([point[1], point[0]]) value_list.append(value) point_array = numpy.array(point_list) value_array = numpy.array(value_list) # getting the offsets first before the raster is opened in update mode offset_list = list( iterblocks(target_raster_path_band, offset_only=True)) target_raster = gdal.OpenEx( target_raster_path_band[0], gdal.OF_RASTER | gdal.GA_Update) band = target_raster.GetRasterBand(target_raster_path_band[1]) nodata = band.GetNoDataValue() geotransform = target_raster.GetGeoTransform() for offset in offset_list: grid_y, grid_x = numpy.mgrid[ offset['yoff']:offset['yoff']+offset['win_ysize'], offset['xoff']:offset['xoff']+offset['win_xsize']] grid_y = grid_y * geotransform[5] + geotransform[3] grid_x = grid_x * geotransform[1] + geotransform[0] # this is to be consistent with GDAL 2.0's change of 'nearest' to # 'near' for an interpolation scheme that SciPy did not change. if interpolation_mode == 'near': interpolation_mode = 'nearest' raster_out_array = scipy.interpolate.griddata( point_array, value_array, (grid_y, grid_x), interpolation_mode, nodata) band.WriteArray(raster_out_array, offset['xoff'], offset['yoff']) def zonal_statistics( base_raster_path_band, aggregate_vector_path, aggregate_layer_name=None, ignore_nodata=True, polygons_might_overlap=True, working_dir=None): """Collect stats on pixel values which lie within polygons. This function summarizes raster statistics including min, max, mean, and pixel count over the regions on the raster that are overlapped by the polygons in the vector layer. Statistics are calculated in two passes, where first polygons aggregate over pixels in the raster whose centers intersect with the polygon. In the second pass, any polygons that are not aggregated use their bounding box to intersect with the raster for overlap statistics. Note: There may be some degenerate cases where the bounding box vs. actual geometry intersection would be incorrect, but these are so unlikely as to be manually constructed. If you encounter one of these please email the description and dataset to richsharp@stanford.edu. Args: base_raster_path_band (tuple): a str/int tuple indicating the path to the base raster and the band index of that raster to analyze. aggregate_vector_path (string): a path to a polygon vector whose geometric features indicate the areas in ``base_raster_path_band`` to calculate zonal statistics. aggregate_layer_name (string): name of shapefile layer that will be used to aggregate results over. If set to None, the first layer in the DataSource will be used as retrieved by ``.GetLayer()``. Note: it is normal and expected to set this field at None if the aggregating shapefile is a single layer as many shapefiles, including the common 'ESRI Shapefile', are. ignore_nodata: if true, then nodata pixels are not accounted for when calculating min, max, count, or mean. However, the value of ``nodata_count`` will always be the number of nodata pixels aggregated under the polygon. polygons_might_overlap (boolean): if True the function calculates aggregation coverage close to optimally by rasterizing sets of polygons that don't overlap. However, this step can be computationally expensive for cases where there are many polygons. this flag to False directs the function rasterize in one step. working_dir (string): If not None, indicates where temporary files should be created during this run. Return: nested dictionary indexed by aggregating feature id, and then by one of 'min' 'max' 'sum' 'count' and 'nodata_count'. Example:: {0: {'min': 0, 'max': 1, 'sum': 1.7, 'count': 3, 'nodata_count': 1 } } Raises: ValueError if ``base_raster_path_band`` is incorrectly formatted. RuntimeError if the aggregate vector or layer cannot open. """ if not _is_raster_path_band_formatted(base_raster_path_band): raise ValueError( "`base_raster_path_band` not formatted as expected. Expects " "(path, band_index), received %s" % repr(base_raster_path_band)) aggregate_vector = gdal.OpenEx(aggregate_vector_path, gdal.OF_VECTOR) if aggregate_vector is None: raise RuntimeError( "Could not open aggregate vector at %s" % aggregate_vector_path) if aggregate_layer_name is not None: aggregate_layer = aggregate_vector.GetLayerByName( aggregate_layer_name) else: aggregate_layer = aggregate_vector.GetLayer() if aggregate_layer is None: raise RuntimeError( "Could not open layer %s on %s" % ( aggregate_layer_name, aggregate_vector_path)) # create a new aggregate ID field to map base vector aggregate fields to # local ones that are guaranteed to be integers. local_aggregate_field_name = 'original_fid' rasterize_layer_args = { 'options': [ 'ALL_TOUCHED=FALSE', 'ATTRIBUTE=%s' % local_aggregate_field_name] } # clip base raster to aggregating vector intersection raster_info = get_raster_info(base_raster_path_band[0]) # -1 here because bands are 1 indexed raster_nodata = raster_info['nodata'][base_raster_path_band[1]-1] temp_working_dir = tempfile.mkdtemp(dir=working_dir) clipped_raster_path = os.path.join( temp_working_dir, 'clipped_raster.tif') try: align_and_resize_raster_stack( [base_raster_path_band[0]], [clipped_raster_path], ['near'], raster_info['pixel_size'], 'intersection', base_vector_path_list=[aggregate_vector_path], raster_align_index=0) clipped_raster = gdal.OpenEx(clipped_raster_path, gdal.OF_RASTER) clipped_band = clipped_raster.GetRasterBand(base_raster_path_band[1]) except ValueError as e: if 'Bounding boxes do not intersect' in repr(e): LOGGER.error( "aggregate vector %s does not intersect with the raster %s", aggregate_vector_path, base_raster_path_band) aggregate_stats = collections.defaultdict( lambda: { 'min': None, 'max': None, 'count': 0, 'nodata_count': 0, 'sum': 0.0}) for feature in aggregate_layer: _ = aggregate_stats[feature.GetFID()] return dict(aggregate_stats) else: # this would be very unexpected to get here, but if it happened # and we didn't raise an exception, execution could get weird. raise # make a shapefile that non-overlapping layers can be added to driver = ogr.GetDriverByName('MEMORY') disjoint_vector = driver.CreateDataSource('disjoint_vector') spat_ref = aggregate_layer.GetSpatialRef() # Initialize these dictionaries to have the shapefile fields in the # original datasource even if we don't pick up a value later LOGGER.info("build a lookup of aggregate field value to FID") aggregate_layer_fid_set = set( [agg_feat.GetFID() for agg_feat in aggregate_layer]) agg_feat = None # Loop over each polygon and aggregate if polygons_might_overlap: LOGGER.info("creating disjoint polygon set") disjoint_fid_sets = calculate_disjoint_polygon_set( aggregate_vector_path, bounding_box=raster_info['bounding_box']) else: disjoint_fid_sets = [aggregate_layer_fid_set] agg_fid_raster_path = os.path.join( temp_working_dir, 'agg_fid.tif') agg_fid_nodata = -1 new_raster_from_base( clipped_raster_path, agg_fid_raster_path, gdal.GDT_Int32, [agg_fid_nodata]) # fetch the block offsets before the raster is opened for writing agg_fid_offset_list = list( iterblocks((agg_fid_raster_path, 1), offset_only=True)) agg_fid_raster = gdal.OpenEx( agg_fid_raster_path, gdal.GA_Update | gdal.OF_RASTER) aggregate_stats = collections.defaultdict(lambda: { 'min': None, 'max': None, 'count': 0, 'nodata_count': 0, 'sum': 0.0}) last_time = time.time() LOGGER.info("processing %d disjoint polygon sets", len(disjoint_fid_sets)) for set_index, disjoint_fid_set in enumerate(disjoint_fid_sets): last_time = _invoke_timed_callback( last_time, lambda: LOGGER.info( "zonal stats approximately %.1f%% complete on %s", 100.0 * float(set_index+1) / len(disjoint_fid_sets), os.path.basename(aggregate_vector_path)), _LOGGING_PERIOD) disjoint_layer = disjoint_vector.CreateLayer( 'disjoint_vector', spat_ref, ogr.wkbPolygon) disjoint_layer.CreateField( ogr.FieldDefn(local_aggregate_field_name, ogr.OFTInteger)) disjoint_layer_defn = disjoint_layer.GetLayerDefn() # add polygons to subset_layer disjoint_layer.StartTransaction() for index, feature_fid in enumerate(disjoint_fid_set): last_time = _invoke_timed_callback( last_time, lambda: LOGGER.info( "polygon set %d of %d approximately %.1f%% processed " "on %s", set_index+1, len(disjoint_fid_sets), 100.0 * float(index+1) / len(disjoint_fid_set), os.path.basename(aggregate_vector_path)), _LOGGING_PERIOD) agg_feat = aggregate_layer.GetFeature(feature_fid) agg_geom_ref = agg_feat.GetGeometryRef() disjoint_feat = ogr.Feature(disjoint_layer_defn) disjoint_feat.SetGeometry(agg_geom_ref.Clone()) agg_geom_ref = None disjoint_feat.SetField( local_aggregate_field_name, feature_fid) disjoint_layer.CreateFeature(disjoint_feat) agg_feat = None disjoint_layer.CommitTransaction() LOGGER.info( "disjoint polygon set %d of %d 100.0%% processed on %s", set_index+1, len(disjoint_fid_sets), os.path.basename( aggregate_vector_path)) # nodata out the mask agg_fid_band = agg_fid_raster.GetRasterBand(1) agg_fid_band.Fill(agg_fid_nodata) LOGGER.info( "rasterizing disjoint polygon set %d of %d %s", set_index+1, len(disjoint_fid_sets), os.path.basename(aggregate_vector_path)) rasterize_callback = _make_logger_callback( "rasterizing polygon " + str(set_index+1) + " of " + str(len(disjoint_fid_set)) + " set %.1f%% complete %s") gdal.RasterizeLayer( agg_fid_raster, [1], disjoint_layer, callback=rasterize_callback, **rasterize_layer_args) agg_fid_raster.FlushCache() # Delete the features we just added to the subset_layer disjoint_layer = None disjoint_vector.DeleteLayer(0) # create a key array # and parallel min, max, count, and nodata count arrays LOGGER.info( "summarizing rasterized disjoint polygon set %d of %d %s", set_index+1, len(disjoint_fid_sets), os.path.basename(aggregate_vector_path)) for agg_fid_offset in agg_fid_offset_list: agg_fid_block = agg_fid_band.ReadAsArray(**agg_fid_offset) clipped_block = clipped_band.ReadAsArray(**agg_fid_offset) valid_mask = (agg_fid_block != agg_fid_nodata) valid_agg_fids = agg_fid_block[valid_mask] valid_clipped = clipped_block[valid_mask] for agg_fid in numpy.unique(valid_agg_fids): masked_clipped_block = valid_clipped[ valid_agg_fids == agg_fid] if raster_nodata is not None: clipped_nodata_mask = numpy.isclose( masked_clipped_block, raster_nodata) else: clipped_nodata_mask = numpy.zeros( masked_clipped_block.shape, dtype=bool) aggregate_stats[agg_fid]['nodata_count'] += ( numpy.count_nonzero(clipped_nodata_mask)) if ignore_nodata: masked_clipped_block = ( masked_clipped_block[~clipped_nodata_mask]) if masked_clipped_block.size == 0: continue if aggregate_stats[agg_fid]['min'] is None: aggregate_stats[agg_fid]['min'] = ( masked_clipped_block[0]) aggregate_stats[agg_fid]['max'] = ( masked_clipped_block[0]) aggregate_stats[agg_fid]['min'] = min( numpy.min(masked_clipped_block), aggregate_stats[agg_fid]['min']) aggregate_stats[agg_fid]['max'] = max( numpy.max(masked_clipped_block), aggregate_stats[agg_fid]['max']) aggregate_stats[agg_fid]['count'] += ( masked_clipped_block.size) aggregate_stats[agg_fid]['sum'] += numpy.sum( masked_clipped_block) unset_fids = aggregate_layer_fid_set.difference(aggregate_stats) LOGGER.debug( "unset_fids: %s of %s ", len(unset_fids), len(aggregate_layer_fid_set)) clipped_gt = numpy.array( clipped_raster.GetGeoTransform(), dtype=numpy.float32) LOGGER.debug("gt %s for %s", clipped_gt, base_raster_path_band) for unset_fid in unset_fids: unset_feat = aggregate_layer.GetFeature(unset_fid) unset_geom_ref = unset_feat.GetGeometryRef() if unset_geom_ref is None: LOGGER.warn( f'no geometry in {aggregate_vector_path} FID: {unset_fid}') continue unset_geom_envelope = list(unset_geom_ref.GetEnvelope()) unset_geom_ref = None unset_feat = None if clipped_gt[1] < 0: unset_geom_envelope[0], unset_geom_envelope[1] = ( unset_geom_envelope[1], unset_geom_envelope[0]) if clipped_gt[5] < 0: unset_geom_envelope[2], unset_geom_envelope[3] = ( unset_geom_envelope[3], unset_geom_envelope[2]) xoff = int((unset_geom_envelope[0] - clipped_gt[0]) / clipped_gt[1]) yoff = int((unset_geom_envelope[2] - clipped_gt[3]) / clipped_gt[5]) win_xsize = int(numpy.ceil( (unset_geom_envelope[1] - clipped_gt[0]) / clipped_gt[1])) - xoff win_ysize = int(numpy.ceil( (unset_geom_envelope[3] - clipped_gt[3]) / clipped_gt[5])) - yoff # clamp offset to the side of the raster if it's negative if xoff < 0: win_xsize += xoff xoff = 0 if yoff < 0: win_ysize += yoff yoff = 0 # clamp the window to the side of the raster if too big if xoff+win_xsize > clipped_band.XSize: win_xsize = clipped_band.XSize-xoff if yoff+win_ysize > clipped_band.YSize: win_ysize = clipped_band.YSize-yoff if win_xsize <= 0 or win_ysize <= 0: continue # here we consider the pixels that intersect with the geometry's # bounding box as being the proxy for the intersection with the # polygon itself. This is not a bad approximation since the case # that caused the polygon to be skipped in the first phase is that it # is as small as a pixel. There could be some degenerate cases that # make this estimation very wrong, but we do not know of any that # would come from natural data. If you do encounter such a dataset # please email the description and datset to richsharp@stanford.edu. unset_fid_block = clipped_band.ReadAsArray( xoff=xoff, yoff=yoff, win_xsize=win_xsize, win_ysize=win_ysize) if raster_nodata is not None: unset_fid_nodata_mask = numpy.isclose( unset_fid_block, raster_nodata) else: unset_fid_nodata_mask = numpy.zeros( unset_fid_block.shape, dtype=bool) valid_unset_fid_block = unset_fid_block[~unset_fid_nodata_mask] if valid_unset_fid_block.size == 0: aggregate_stats[unset_fid]['min'] = 0.0 aggregate_stats[unset_fid]['max'] = 0.0 aggregate_stats[unset_fid]['sum'] = 0.0 else: aggregate_stats[unset_fid]['min'] = numpy.min( valid_unset_fid_block) aggregate_stats[unset_fid]['max'] = numpy.max( valid_unset_fid_block) aggregate_stats[unset_fid]['sum'] = numpy.sum( valid_unset_fid_block) aggregate_stats[unset_fid]['count'] = valid_unset_fid_block.size aggregate_stats[unset_fid]['nodata_count'] = numpy.count_nonzero( unset_fid_nodata_mask) unset_fids = aggregate_layer_fid_set.difference(aggregate_stats) LOGGER.debug( "remaining unset_fids: %s of %s ", len(unset_fids), len(aggregate_layer_fid_set)) # fill in the missing polygon fids in the aggregate stats by invoking the # accessor in the defaultdict for fid in unset_fids: _ = aggregate_stats[fid] LOGGER.info( "all done processing polygon sets for %s", os.path.basename( aggregate_vector_path)) # clean up temporary files spat_ref = None clipped_band = None clipped_raster = None agg_fid_raster = None disjoint_layer = None disjoint_vector = None aggregate_layer = None aggregate_vector = None shutil.rmtree(temp_working_dir) return dict(aggregate_stats) def get_vector_info(vector_path, layer_id=0): """Get information about an GDAL vector. Args: vector_path (str): a path to a GDAL vector. layer_id (str/int): name or index of underlying layer to analyze. Defaults to 0. Raises: ValueError if ``vector_path`` does not exist on disk or cannot be opened as a gdal.OF_VECTOR. Return: raster_properties (dictionary): a dictionary with the following key-value pairs: * ``'projection_wkt'`` (string): projection of the vector in Well Known Text. * ``'bounding_box'`` (sequence): sequence of floats representing the bounding box in projected coordinates in the order [minx, miny, maxx, maxy]. * ``'file_list'`` (sequence): sequence of string paths to the files that make up this vector. """ vector = gdal.OpenEx(vector_path, gdal.OF_VECTOR) if not vector: raise ValueError( "Could not open %s as a gdal.OF_VECTOR" % vector_path) vector_properties = {} vector_properties['file_list'] = vector.GetFileList() layer = vector.GetLayer(iLayer=layer_id) # projection is same for all layers, so just use the first one spatial_ref = layer.GetSpatialRef() if spatial_ref: vector_projection_wkt = spatial_ref.ExportToWkt() else: vector_projection_wkt = None vector_properties['projection_wkt'] = vector_projection_wkt layer_bb = layer.GetExtent() layer = None vector = None # convert form [minx,maxx,miny,maxy] to [minx,miny,maxx,maxy] vector_properties['bounding_box'] = [layer_bb[i] for i in [0, 2, 1, 3]] return vector_properties def get_raster_info(raster_path): """Get information about a GDAL raster (dataset). Args: raster_path (String): a path to a GDAL raster. Raises: ValueError if ``raster_path`` is not a file or cannot be opened as a ``gdal.OF_RASTER``. Return: raster_properties (dictionary): a dictionary with the properties stored under relevant keys. * ``'pixel_size'`` (tuple): (pixel x-size, pixel y-size) from geotransform. * ``'raster_size'`` (tuple): number of raster pixels in (x, y) direction. * ``'nodata'`` (sequence): a sequence of the nodata values in the bands of the raster in the same order as increasing band index. * ``'n_bands'`` (int): number of bands in the raster. * ``'geotransform'`` (tuple): a 6-tuple representing the geotransform of (x orign, x-increase, xy-increase, y origin, yx-increase, y-increase). * ``'datatype'`` (int): An instance of an enumerated gdal.GDT_* int that represents the datatype of the raster. * ``'projection_wkt'`` (string): projection of the raster in Well Known Text. * ``'bounding_box'`` (sequence): sequence of floats representing the bounding box in projected coordinates in the order [minx, miny, maxx, maxy] * ``'block_size'`` (tuple): underlying x/y raster block size for efficient reading. * ``'numpy_type'`` (numpy type): this is the equivalent numpy datatype for the raster bands including signed bytes. """ raster = gdal.OpenEx(raster_path, gdal.OF_RASTER) if not raster: raise ValueError( "Could not open %s as a gdal.OF_RASTER" % raster_path) raster_properties = {} raster_properties['file_list'] = raster.GetFileList() projection_wkt = raster.GetProjection() if not projection_wkt: projection_wkt = None raster_properties['projection_wkt'] = projection_wkt geo_transform = raster.GetGeoTransform() raster_properties['geotransform'] = geo_transform raster_properties['pixel_size'] = (geo_transform[1], geo_transform[5]) raster_properties['raster_size'] = ( raster.GetRasterBand(1).XSize, raster.GetRasterBand(1).YSize) raster_properties['n_bands'] = raster.RasterCount raster_properties['nodata'] = [ raster.GetRasterBand(index).GetNoDataValue() for index in range( 1, raster_properties['n_bands']+1)] # blocksize is the same for all bands, so we can just get the first raster_properties['block_size'] = raster.GetRasterBand(1).GetBlockSize() # we dont' really know how the geotransform is laid out, all we can do is # calculate the x and y bounds, then take the appropriate min/max x_bounds = [ geo_transform[0], geo_transform[0] + raster_properties['raster_size'][0] * geo_transform[1] + raster_properties['raster_size'][1] * geo_transform[2]] y_bounds = [ geo_transform[3], geo_transform[3] + raster_properties['raster_size'][0] * geo_transform[4] + raster_properties['raster_size'][1] * geo_transform[5]] raster_properties['bounding_box'] = [ numpy.min(x_bounds), numpy.min(y_bounds), numpy.max(x_bounds), numpy.max(y_bounds)] # datatype is the same for the whole raster, but is associated with band band = raster.GetRasterBand(1) band_datatype = band.DataType raster_properties['datatype'] = band_datatype raster_properties['numpy_type'] = ( _GDAL_TYPE_TO_NUMPY_LOOKUP[band_datatype]) # this part checks to see if the byte is signed or not if band_datatype == gdal.GDT_Byte: metadata = band.GetMetadata('IMAGE_STRUCTURE') if 'PIXELTYPE' in metadata and metadata['PIXELTYPE'] == 'SIGNEDBYTE': raster_properties['numpy_type'] = numpy.int8 band = None raster = None return raster_properties def reproject_vector( base_vector_path, target_projection_wkt, target_path, layer_id=0, driver_name='ESRI Shapefile', copy_fields=True, osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY): """Reproject OGR DataSource (vector). Transforms the features of the base vector to the desired output projection in a new ESRI Shapefile. Args: base_vector_path (string): Path to the base shapefile to transform. target_projection_wkt (string): the desired output projection in Well Known Text (by layer.GetSpatialRef().ExportToWkt()) target_path (string): the filepath to the transformed shapefile layer_id (str/int): name or index of layer in ``base_vector_path`` to reproject. Defaults to 0. driver_name (string): String to pass to ogr.GetDriverByName, defaults to 'ESRI Shapefile'. copy_fields (bool or iterable): If True, all the fields in ``base_vector_path`` will be copied to ``target_path`` during the reprojection step. If it is an iterable, it will contain the field names to exclusively copy. An unmatched fieldname will be ignored. If ``False`` no fields are copied into the new vector. osr_axis_mapping_strategy (int): OSR axis mapping strategy for ``SpatialReference`` objects. Defaults to ``geoprocessing.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This parameter should not be changed unless you know what you are doing. Return: None """ base_vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR) # if this file already exists, then remove it if os.path.isfile(target_path): LOGGER.warning( "%s already exists, removing and overwriting", target_path) os.remove(target_path) target_sr = osr.SpatialReference(target_projection_wkt) # create a new shapefile from the orginal_datasource target_driver = ogr.GetDriverByName(driver_name) target_vector = target_driver.CreateDataSource(target_path) layer = base_vector.GetLayer(layer_id) layer_dfn = layer.GetLayerDefn() # Create new layer for target_vector using same name and # geometry type from base vector but new projection target_layer = target_vector.CreateLayer( layer_dfn.GetName(), target_sr, layer_dfn.GetGeomType()) # this will map the target field index to the base index it came from # in case we don't need to copy all the fields target_to_base_field_id_map = {} if copy_fields: # Get the number of fields in original_layer original_field_count = layer_dfn.GetFieldCount() # For every field that's copying, create a duplicate field in the # new layer for fld_index in range(original_field_count): original_field = layer_dfn.GetFieldDefn(fld_index) field_name = original_field.GetName() if copy_fields is True or field_name in copy_fields: target_field = ogr.FieldDefn( field_name, original_field.GetType()) target_layer.CreateField(target_field) target_to_base_field_id_map[fld_index] = len( target_to_base_field_id_map) # Get the SR of the original_layer to use in transforming base_sr = layer.GetSpatialRef() base_sr.SetAxisMappingStrategy(osr_axis_mapping_strategy) target_sr.SetAxisMappingStrategy(osr_axis_mapping_strategy) # Create a coordinate transformation coord_trans = osr.CreateCoordinateTransformation(base_sr, target_sr) # Copy all of the features in layer to the new shapefile target_layer.StartTransaction() error_count = 0 last_time = time.time() LOGGER.info("starting reprojection") for feature_index, base_feature in enumerate(layer): last_time = _invoke_timed_callback( last_time, lambda: LOGGER.info( "reprojection approximately %.1f%% complete on %s", 100.0 * float(feature_index+1) / (layer.GetFeatureCount()), os.path.basename(target_path)), _LOGGING_PERIOD) geom = base_feature.GetGeometryRef() if geom is None: # we encountered this error occasionally when transforming clipped # global polygons. Not clear what is happening but perhaps a # feature was retained that otherwise wouldn't have been included # in the clip error_count += 1 continue # Transform geometry into format desired for the new projection error_code = geom.Transform(coord_trans) if error_code != 0: # error # this could be caused by an out of range transformation # whatever the case, don't put the transformed poly into the # output set error_count += 1 continue # Copy original_datasource's feature and set as new shapes feature target_feature = ogr.Feature(target_layer.GetLayerDefn()) target_feature.SetGeometry(geom) # For all the fields in the feature set the field values from the # source field for target_index, base_index in ( target_to_base_field_id_map.items()): target_feature.SetField( target_index, base_feature.GetField(base_index)) target_layer.CreateFeature(target_feature) target_feature = None base_feature = None target_layer.CommitTransaction() LOGGER.info( "reprojection 100.0%% complete on %s", os.path.basename(target_path)) if error_count > 0: LOGGER.warning( '%d features out of %d were unable to be transformed and are' ' not in the output vector at %s', error_count, layer.GetFeatureCount(), target_path) layer = None base_vector = None def reclassify_raster( base_raster_path_band, value_map, target_raster_path, target_datatype, target_nodata, values_required=True, raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS): """Reclassify pixel values in a raster. A function to reclassify values in raster to any output type. By default the values except for nodata must be in ``value_map``. Args: base_raster_path_band (tuple): a tuple including file path to a raster and the band index to operate over. ex: (path, band_index) value_map (dictionary): a dictionary of values of {source_value: dest_value, ...} where source_value's type is the same as the values in ``base_raster_path`` at band ``band_index``. Must contain at least one value. target_raster_path (string): target raster output path; overwritten if it exists target_datatype (gdal type): the numerical type for the target raster target_nodata (numerical type): the nodata value for the target raster Must be the same type as target_datatype values_required (bool): If True, raise a ValueError if there is a value in the raster that is not found in ``value_map``. raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver name string as the first element and a GDAL creation options tuple/list as the second. Defaults to a GTiff driver tuple defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS. Return: None Raises: ReclassificationMissingValuesError if ``values_required`` is ``True`` and a pixel value from ``base_raster_path_band`` is not a key in ``value_map``. """ if len(value_map) == 0: raise ValueError("value_map must contain at least one value") if not _is_raster_path_band_formatted(base_raster_path_band): raise ValueError( "Expected a (path, band_id) tuple, instead got '%s'" % base_raster_path_band) raster_info = get_raster_info(base_raster_path_band[0]) nodata = raster_info['nodata'][base_raster_path_band[1]-1] value_map_copy = value_map.copy() # possible that nodata value is not defined, so test for None first # otherwise if nodata not predefined, remap it into the dictionary if nodata is not None and nodata not in value_map_copy: value_map_copy[nodata] = target_nodata keys = sorted(numpy.array(list(value_map_copy.keys()))) values = numpy.array([value_map_copy[x] for x in keys]) def _map_dataset_to_value_op(original_values): """Convert a block of original values to the lookup values.""" if values_required: unique = numpy.unique(original_values) has_map = numpy.in1d(unique, keys) if not all(has_map): missing_values = unique[~has_map] raise ReclassificationMissingValuesError( f'The following {missing_values.size} raster values' f' {missing_values} from "{base_raster_path_band[0]}"' ' do not have corresponding entries in the ``value_map``:' f' {value_map}.', missing_values) index = numpy.digitize(original_values.ravel(), keys, right=True) return values[index].reshape(original_values.shape) raster_calculator( [base_raster_path_band], _map_dataset_to_value_op, target_raster_path, target_datatype, target_nodata, raster_driver_creation_tuple=raster_driver_creation_tuple) def warp_raster( base_raster_path, target_pixel_size, target_raster_path, resample_method, target_bb=None, base_projection_wkt=None, target_projection_wkt=None, n_threads=None, vector_mask_options=None, gdal_warp_options=None, working_dir=None, raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS, osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY): """Resize/resample raster to desired pixel size, bbox and projection. Args: base_raster_path (string): path to base raster. target_pixel_size (list/tuple): a two element sequence indicating the x and y pixel size in projected units. target_raster_path (string): the location of the resized and resampled raster. resample_method (string): the resampling technique, one of ``near|bilinear|cubic|cubicspline|lanczos|average|mode|max|min|med|q1|q3`` target_bb (sequence): if None, target bounding box is the same as the source bounding box. Otherwise it's a sequence of float describing target bounding box in target coordinate system as [minx, miny, maxx, maxy]. base_projection_wkt (string): if not None, interpret the projection of ``base_raster_path`` as this. target_projection_wkt (string): if not None, desired target projection in Well Known Text format. n_threads (int): optional, if not None this sets the ``N_THREADS`` option for ``gdal.Warp``. vector_mask_options (dict): optional, if not None, this is a dictionary of options to use an existing vector's geometry to mask out pixels in the target raster that do not overlap the vector's geometry. Keys to this dictionary are: * ``'mask_vector_path'``: (str) path to the mask vector file. This vector will be automatically projected to the target projection if its base coordinate system does not match the target. * ``'mask_layer_id'``: (int/str) the layer index or name to use for masking, if this key is not in the dictionary the default is to use the layer at index 0. * ``'mask_vector_where_filter'``: (str) an SQL WHERE string that can be used to filter the geometry in the mask. Ex: 'id > 10' would use all features whose field value of 'id' is > 10. gdal_warp_options (sequence): if present, the contents of this list are passed to the ``warpOptions`` parameter of ``gdal.Warp``. See the GDAL Warp documentation for valid options. working_dir (string): if defined uses this directory to make temporary working files for calculation. Otherwise uses system's temp directory. raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver name string as the first element and a GDAL creation options tuple/list as the second. Defaults to a GTiff driver tuple defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS. osr_axis_mapping_strategy (int): OSR axis mapping strategy for ``SpatialReference`` objects. Defaults to ``geoprocessing.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This parameter should not be changed unless you know what you are doing. Return: None Raises: ValueError if ``pixel_size`` is not a 2 element sequence of numbers. ValueError if ``vector_mask_options`` is not None but the ``mask_vector_path`` is undefined or doesn't point to a valid file. """ _assert_is_valid_pixel_size(target_pixel_size) base_raster_info = get_raster_info(base_raster_path) if target_projection_wkt is None: target_projection_wkt = base_raster_info['projection_wkt'] if target_bb is None: # ensure it's a sequence so we can modify it working_bb = list(get_raster_info(base_raster_path)['bounding_box']) # transform the working_bb if target_projection_wkt is not None if target_projection_wkt is not None: LOGGER.debug( "transforming bounding box from %s ", working_bb) working_bb = transform_bounding_box( base_raster_info['bounding_box'], base_raster_info['projection_wkt'], target_projection_wkt) LOGGER.debug( "transforming bounding to %s ", working_bb) else: # ensure it's a sequence so we can modify it working_bb = list(target_bb) # determine the raster size that bounds the input bounding box and then # adjust the bounding box to be that size target_x_size = int(abs( float(working_bb[2] - working_bb[0]) / target_pixel_size[0])) target_y_size = int(abs( float(working_bb[3] - working_bb[1]) / target_pixel_size[1])) # sometimes bounding boxes are numerically perfect, this checks for that x_residual = ( abs(target_x_size * target_pixel_size[0]) - (working_bb[2] - working_bb[0])) if not numpy.isclose(x_residual, 0.0): target_x_size += 1 y_residual = ( abs(target_y_size * target_pixel_size[1]) - (working_bb[3] - working_bb[1])) if not numpy.isclose(y_residual, 0.0): target_y_size += 1 if target_x_size == 0: LOGGER.warning( "bounding_box is so small that x dimension rounds to 0; " "clamping to 1.") target_x_size = 1 if target_y_size == 0: LOGGER.warning( "bounding_box is so small that y dimension rounds to 0; " "clamping to 1.") target_y_size = 1 # this ensures the bounding boxes perfectly fit a multiple of the target # pixel size working_bb[2] = working_bb[0] + abs(target_pixel_size[0] * target_x_size) working_bb[3] = working_bb[1] + abs(target_pixel_size[1] * target_y_size) reproject_callback = _make_logger_callback( "Warp %.1f%% complete %s") warp_options = [] if n_threads: warp_options.append('NUM_THREADS=%d' % n_threads) if gdal_warp_options: warp_options.extend(gdal_warp_options) mask_vector_path = None mask_layer_id = 0 mask_vector_where_filter = None if vector_mask_options: # translate pygeoprocessing terminology into GDAL warp options. if 'mask_vector_path' not in vector_mask_options: raise ValueError( 'vector_mask_options passed, but no value for ' '"mask_vector_path": %s', vector_mask_options) mask_vector_path = vector_mask_options['mask_vector_path'] if not os.path.exists(mask_vector_path): raise ValueError( 'The mask vector at %s was not found.', mask_vector_path) if 'mask_layer_id' in vector_mask_options: mask_layer_id = vector_mask_options['mask_layer_id'] if 'mask_vector_where_filter' in vector_mask_options: mask_vector_where_filter = ( vector_mask_options['mask_vector_where_filter']) if vector_mask_options: temp_working_dir = tempfile.mkdtemp(dir=working_dir) warped_raster_path = os.path.join( temp_working_dir, os.path.basename(target_raster_path).replace( '.tif', '_nonmasked.tif')) else: # if there is no vector path the result is the warp warped_raster_path = target_raster_path base_raster = gdal.OpenEx(base_raster_path, gdal.OF_RASTER) raster_creation_options = list(raster_driver_creation_tuple[1]) if (base_raster_info['numpy_type'] == numpy.int8 and 'PIXELTYPE' not in ' '.join(raster_creation_options)): raster_creation_options.append('PIXELTYPE=SIGNEDBYTE') # WarpOptions.this is None when an invalid option is passed, and it's a # truthy SWIG proxy object when it's given a valid resample arg. if not gdal.WarpOptions(resampleAlg=resample_method)[0].this: raise ValueError( f'Invalid resample method: "{resample_method}"') gdal.Warp( warped_raster_path, base_raster, format=raster_driver_creation_tuple[0], outputBounds=working_bb, xRes=abs(target_pixel_size[0]), yRes=abs(target_pixel_size[1]), resampleAlg=resample_method, outputBoundsSRS=target_projection_wkt, srcSRS=base_projection_wkt, dstSRS=target_projection_wkt, multithread=True if warp_options else False, warpOptions=warp_options, creationOptions=raster_creation_options, callback=reproject_callback, callback_data=[target_raster_path]) if vector_mask_options: # Make sure the raster creation options passed to ``mask_raster`` # reflect any metadata updates updated_raster_driver_creation_tuple = ( raster_driver_creation_tuple[0], tuple(raster_creation_options)) # there was a cutline vector, so mask it out now, otherwise target # is already the result. mask_raster( (warped_raster_path, 1), vector_mask_options['mask_vector_path'], target_raster_path, mask_layer_id=mask_layer_id, where_clause=mask_vector_where_filter, target_mask_value=None, working_dir=temp_working_dir, all_touched=False, raster_driver_creation_tuple=updated_raster_driver_creation_tuple) shutil.rmtree(temp_working_dir) def rasterize( vector_path, target_raster_path, burn_values=None, option_list=None, layer_id=0, where_clause=None): """Project a vector onto an existing raster. Burn the layer at ``layer_id`` in ``vector_path`` to an existing raster at ``target_raster_path_band``. Args: vector_path (string): filepath to vector to rasterize. target_raster_path (string): path to an existing raster to burn vector into. Can have multiple bands. burn_values (list/tuple): optional sequence of values to burn into each band of the raster. If used, should have the same length as number of bands at the ``target_raster_path`` raster. If ``None`` then ``option_list`` must have a valid value. option_list (list/tuple): optional a sequence of burn options, if None then a valid value for ``burn_values`` must exist. Otherwise, each element is a string of the form: * ``"ATTRIBUTE=?"``: Identifies an attribute field on the features to be used for a burn in value. The value will be burned into all output bands. If specified, ``burn_values`` will not be used and can be None. * ``"CHUNKYSIZE=?"``: The height in lines of the chunk to operate on. The larger the chunk size the less times we need to make a pass through all the shapes. If it is not set or set to zero the default chunk size will be used. Default size will be estimated based on the GDAL cache buffer size using formula: ``cache_size_bytes/scanline_size_bytes``, so the chunk will not exceed the cache. * ``"ALL_TOUCHED=TRUE/FALSE"``: May be set to ``TRUE`` to set all pixels touched by the line or polygons, not just those whose center is within the polygon or that are selected by Brezenhams line algorithm. Defaults to ``FALSE``. * ``"BURN_VALUE_FROM"``: May be set to "Z" to use the Z values of the geometries. The value from burn_values or the attribute field value is added to this before burning. In default case dfBurnValue is burned as it is (richpsharp: note, I'm not sure what this means, but copied from formal docs). This is implemented properly only for points and lines for now. Polygons will be burned using the Z value from the first point. * ``"MERGE_ALG=REPLACE/ADD"``: REPLACE results in overwriting of value, while ADD adds the new value to the existing raster, suitable for heatmaps for instance. Example:: ["ATTRIBUTE=npv", "ALL_TOUCHED=TRUE"] layer_id (str/int): name or index of the layer to rasterize. Defaults to 0. where_clause (str): If not None, is an SQL query-like string to filter which features are used to rasterize, (e.x. where="value=1"). Return: None """ gdal.PushErrorHandler('CPLQuietErrorHandler') raster = gdal.OpenEx(target_raster_path, gdal.GA_Update | gdal.OF_RASTER) gdal.PopErrorHandler() if raster is None: raise ValueError( "%s doesn't exist, but needed to rasterize." % target_raster_path) rasterize_callback = _make_logger_callback( "RasterizeLayer %.1f%% complete %s") if burn_values is None: burn_values = [] if option_list is None: option_list = [] if not burn_values and not option_list: raise ValueError( "Neither `burn_values` nor `option_list` is set. At least " "one must have a value.") if not isinstance(burn_values, (list, tuple)): raise ValueError( "`burn_values` is not a list/tuple, the value passed is '%s'", repr(burn_values)) if not isinstance(option_list, (list, tuple)): raise ValueError( "`option_list` is not a list/tuple, the value passed is '%s'", repr(option_list)) vector = gdal.OpenEx(vector_path, gdal.OF_VECTOR) layer = vector.GetLayer(layer_id) if where_clause: layer.SetAttributeFilter(where_clause) try: result = gdal.RasterizeLayer( raster, [1], layer, burn_values=burn_values, options=option_list, callback=rasterize_callback) except Exception: # something bad happened, but still clean up # this case came out of a flaky test condition where the raster # would still be in use by the rasterize layer function LOGGER.exception('bad error on rasterizelayer') result = -1 layer = None vector = None if result != 0: # need this __swig_destroy__ because we sometimes encounter a flaky # test where the path to the raster cannot be cleaned up because # it is still in use somewhere, likely a bug in gdal.RasterizeLayer # note it is only invoked if there is a serious error gdal.Dataset.__swig_destroy__(raster) raise RuntimeError('Rasterize returned a nonzero exit code.') raster = None def calculate_disjoint_polygon_set( vector_path, layer_id=0, bounding_box=None): """Create a sequence of sets of polygons that don't overlap. Determining the minimal number of those sets is an np-complete problem so this is an approximation that builds up sets of maximal subsets. Args: vector_path (string): a path to an OGR vector. layer_id (str/int): name or index of underlying layer in ``vector_path`` to calculate disjoint set. Defaults to 0. bounding_box (sequence): sequence of floats representing a bounding box to filter any polygons by. If a feature in ``vector_path`` does not intersect this bounding box it will not be considered in the disjoint calculation. Coordinates are in the order [minx, miny, maxx, maxy]. Return: subset_list (sequence): sequence of sets of FIDs from vector_path """ vector = gdal.OpenEx(vector_path, gdal.OF_VECTOR) vector_layer = vector.GetLayer(layer_id) feature_count = vector_layer.GetFeatureCount() if feature_count == 0: raise RuntimeError('Vector must have geometries but does not: %s' % vector_path) last_time = time.time() LOGGER.info("build shapely polygon list") if bounding_box is None: bounding_box = get_vector_info(vector_path)['bounding_box'] bounding_box = shapely.prepared.prep(shapely.geometry.box(*bounding_box)) # As much as I want this to be in a comprehension, a comprehension version # of this loop causes python 3.6 to crash on linux in GDAL 2.1.2 (which is # what's in the debian:stretch repos.) shapely_polygon_lookup = {} for poly_feat in vector_layer: poly_geom_ref = poly_feat.GetGeometryRef() if poly_geom_ref is None: LOGGER.warn( f'no geometry in {vector_path} FID: {poly_feat.GetFID()}, ' 'skipping...') continue # with GDAL>=3.3.0 ExportToWkb returns a bytearray instead of bytes shapely_polygon_lookup[poly_feat.GetFID()] = ( shapely.wkb.loads(bytes(poly_geom_ref.ExportToWkb()))) poly_geom_ref = None poly_feat = None LOGGER.info("build shapely rtree index") r_tree_index_stream = [ (poly_fid, poly.bounds, None) for poly_fid, poly in shapely_polygon_lookup.items() if bounding_box.intersects(poly)] if r_tree_index_stream: poly_rtree_index = rtree.index.Index(r_tree_index_stream) else: LOGGER.warning("no polygons intersected the bounding box") return [] vector_layer = None vector = None LOGGER.info( 'poly feature lookup 100.0%% complete on %s', os.path.basename(vector_path)) LOGGER.info('build poly intersection lookup') poly_intersect_lookup = collections.defaultdict(set) for poly_index, (poly_fid, poly_geom) in enumerate( shapely_polygon_lookup.items()): last_time = _invoke_timed_callback( last_time, lambda: LOGGER.info( "poly intersection lookup approximately %.1f%% complete " "on %s", 100.0 * float(poly_index+1) / len( shapely_polygon_lookup), os.path.basename(vector_path)), _LOGGING_PERIOD) possible_intersection_set = list(poly_rtree_index.intersection( poly_geom.bounds)) # no reason to prep the polygon to intersect itself if len(possible_intersection_set) > 1: polygon = shapely.prepared.prep(poly_geom) else: polygon = poly_geom for intersect_poly_fid in possible_intersection_set: if intersect_poly_fid == poly_fid or polygon.intersects( shapely_polygon_lookup[intersect_poly_fid]): poly_intersect_lookup[poly_fid].add(intersect_poly_fid) polygon = None LOGGER.info( 'poly intersection feature lookup 100.0%% complete on %s', os.path.basename(vector_path)) # Build maximal subsets subset_list = [] while len(poly_intersect_lookup) > 0: # sort polygons by increasing number of intersections intersections_list = [ (len(poly_intersect_set), poly_fid, poly_intersect_set) for poly_fid, poly_intersect_set in poly_intersect_lookup.items()] intersections_list.sort() # build maximal subset maximal_set = set() for _, poly_fid, poly_intersect_set in intersections_list: last_time = _invoke_timed_callback( last_time, lambda: LOGGER.info( "maximal subset build approximately %.1f%% complete " "on %s", 100.0 * float( feature_count - len(poly_intersect_lookup)) / feature_count, os.path.basename(vector_path)), _LOGGING_PERIOD) if not poly_intersect_set.intersection(maximal_set): # no intersection, add poly_fid to the maximal set and remove # the polygon from the lookup maximal_set.add(poly_fid) del poly_intersect_lookup[poly_fid] # remove all the polygons from intersections once they're computed for poly_fid, poly_intersect_set in poly_intersect_lookup.items(): poly_intersect_lookup[poly_fid] = ( poly_intersect_set.difference(maximal_set)) subset_list.append(maximal_set) LOGGER.info( 'maximal subset build 100.0%% complete on %s', os.path.basename(vector_path)) return subset_list def distance_transform_edt( base_region_raster_path_band, target_distance_raster_path, sampling_distance=(1., 1.), working_dir=None, raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS): """Calculate the euclidean distance transform on base raster. Calculates the euclidean distance transform on the base raster in units of pixels multiplied by an optional scalar constant. The implementation is based off the algorithm described in: Meijster, Arnold, Jos BTM Roerdink, and Wim H. Hesselink. "A general algorithm for computing distance transforms in linear time." Mathematical Morphology and its applications to image and signal processing. Springer, Boston, MA, 2002. 331-340. The base mask raster represents the area to distance transform from as any pixel that is not 0 or nodata. It is computationally convenient to calculate the distance transform on the entire raster irrespective of nodata placement and thus produces a raster that will have distance transform values even in pixels that are nodata in the base. Args: base_region_raster_path_band (tuple): a tuple including file path to a raster and the band index to define the base region pixels. Any pixel that is not 0 and nodata are considered to be part of the region. target_distance_raster_path (string): path to the target raster that is the exact euclidean distance transform from any pixel in the base raster that is not nodata and not 0. The units are in ``(pixel distance * sampling_distance)``. sampling_distance (tuple/list): an optional parameter used to scale the pixel distances when calculating the distance transform. Defaults to (1.0, 1.0). First element indicates the distance traveled in the x direction when changing a column index, and the second element in y when changing a row index. Both values must be > 0. working_dir (string): If not None, indicates where temporary files should be created during this run. raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver name string as the first element and a GDAL creation options tuple/list as the second. Defaults to a GTiff driver tuple defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS. Return: None """ working_raster_paths = {} for raster_prefix in ['region_mask_raster', 'g_raster']: with tempfile.NamedTemporaryFile( prefix=raster_prefix, suffix='.tif', delete=False, dir=working_dir) as tmp_file: working_raster_paths[raster_prefix] = tmp_file.name nodata = (get_raster_info(base_region_raster_path_band[0])['nodata'])[ base_region_raster_path_band[1]-1] nodata_out = 255 def mask_op(base_array): """Convert base_array to 1 if not 0 and nodata, 0 otherwise.""" if nodata is not None: return ~numpy.isclose(base_array, nodata) & (base_array != 0) else: return base_array != 0 if not isinstance(sampling_distance, (tuple, list)): raise ValueError( "`sampling_distance` should be a tuple/list, instead it's %s" % ( type(sampling_distance))) sample_d_x, sample_d_y = sampling_distance if sample_d_x <= 0. or sample_d_y <= 0.: raise ValueError( "Sample distances must be > 0.0, instead got %s", sampling_distance) raster_calculator( [base_region_raster_path_band], mask_op, working_raster_paths['region_mask_raster'], gdal.GDT_Byte, nodata_out, calc_raster_stats=False, raster_driver_creation_tuple=raster_driver_creation_tuple) geoprocessing_core._distance_transform_edt( working_raster_paths['region_mask_raster'], working_raster_paths['g_raster'], sampling_distance[0], sampling_distance[1], target_distance_raster_path, raster_driver_creation_tuple) for path in working_raster_paths.values(): try: os.remove(path) except OSError: LOGGER.warning("couldn't remove file %s", path) def _next_regular(base): """Find the next regular number greater than or equal to base. Regular numbers are composites of the prime factors 2, 3, and 5. Also known as 5-smooth numbers or Hamming numbers, these are the optimal size for inputs to FFTPACK. This source was taken directly from scipy.signaltools and saves us from having to access a protected member in a library that could change in future releases: https://github.com/scipy/scipy/blob/v0.17.1/scipy/signal/signaltools.py#L211 Args: base (int): a positive integer to start to find the next Hamming number. Return: The next regular number greater than or equal to ``base``. """ if base <= 6: return base # Quickly check if it's already a power of 2 if not (base & (base-1)): return base match = float('inf') # Anything found will be smaller p5 = 1 while p5 < base: p35 = p5 while p35 < base: # Ceiling integer division, avoiding conversion to float # (quotient = ceil(base / p35)) quotient = -(-base // p35) # Quickly find next power of 2 >= quotient p2 = 2**((quotient - 1).bit_length()) N = p2 * p35 if N == base: return N elif N < match: match = N p35 *= 3 if p35 == base: return p35 if p35 < match: match = p35 p5 *= 5 if p5 == base: return p5 if p5 < match: match = p5 return match def convolve_2d( signal_path_band, kernel_path_band, target_path, ignore_nodata_and_edges=False, mask_nodata=True, normalize_kernel=False, target_datatype=gdal.GDT_Float64, target_nodata=None, working_dir=None, set_tol_to_zero=1e-8, max_timeout=_MAX_TIMEOUT, raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS): """Convolve 2D kernel over 2D signal. Convolves the raster in ``kernel_path_band`` over ``signal_path_band``. Nodata values are treated as 0.0 during the convolution and masked to nodata for the output result where ``signal_path`` has nodata. Note with default values, boundary effects can be seen in the result where the kernel would hang off the edge of the raster or in regions with nodata pixels. The function would treat these areas as values with "0.0" by default thus pulling the total convolution down in these areas. This is similar to setting ``mode='same'`` in Numpy's ``convolve`` function: https://numpy.org/doc/stable/reference/generated/numpy.convolve.html This boundary effect can be avoided by setting ``ignore_nodata_and_edges=True`` which normalizes the target result by dynamically accounting for the number of valid signal pixels the kernel overlapped during the convolution step. Args: signal_path_band (tuple): a 2 tuple of the form (filepath to signal raster, band index). kernel_path_band (tuple): a 2 tuple of the form (filepath to kernel raster, band index), all pixel values should be valid -- output is not well defined if the kernel raster has nodata values. target_path (string): filepath to target raster that's the convolution of signal with kernel. Output will be a single band raster of same size and projection as ``signal_path_band``. Any nodata pixels that align with ``signal_path_band`` will be set to nodata. ignore_nodata_and_edges (boolean): If true, any pixels that are equal to ``signal_path_band``'s nodata value or signal pixels where the kernel extends beyond the edge of the raster are not included when averaging the convolution filter. This has the effect of "spreading" the result as though nodata and edges beyond the bounds of the raster are 0s. If set to false this tends to "pull" the signal away from nodata holes or raster edges. Set this value to ``True`` to avoid distortions signal values near edges for large integrating kernels. It can be useful to set this value to ``True`` to fill nodata holes through distance weighted averaging. In this case ``mask_nodata`` must be set to ``False`` so the result does not mask out these areas which are filled in. When using this technique be careful of cases where the kernel does not extend over any areas except nodata holes, in this case the resulting values in these areas will be nonsensical numbers, perhaps numerical infinity or NaNs. normalize_kernel (boolean): If true, the result is divided by the sum of the kernel. mask_nodata (boolean): If true, ``target_path`` raster's output is nodata where ``signal_path_band``'s pixels were nodata. Note that setting ``ignore_nodata_and_edges`` to ``True`` while setting ``mask_nodata`` to ``False`` can allow for a technique involving distance weighted averaging to define areas that would otherwise be nodata. Be careful in cases where the kernel does not extend over any valid non-nodata area since the result can be numerical infinity or NaNs. target_datatype (GDAL type): a GDAL raster type to set the output raster type to, as well as the type to calculate the convolution in. Defaults to GDT_Float64. Note signed byte is not supported. target_nodata (int/float): nodata value to set on output raster. If ``target_datatype`` is not gdal.GDT_Float64, this value must be set. Otherwise defaults to the minimum value of a float32. raster_creation_options (sequence): an argument list that will be passed to the GTiff driver for creating ``target_path``. Useful for blocksizes, compression, and more. working_dir (string): If not None, indicates where temporary files should be created during this run. set_tol_to_zero (float): any value within +- this from 0.0 will get set to 0.0. This is to handle numerical roundoff errors that sometimes result in "numerical zero", such as -1.782e-18 that cannot be tolerated by users of this function. If `None` no adjustment will be done to output values. max_timeout (float): maximum amount of time to wait for worker thread to terminate. raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver name string as the first element and a GDAL creation options tuple/list as the second. Defaults to a GTiff driver tuple defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS. Return: ``None`` Raises: ValueError: if ``ignore_nodata_and_edges`` is ``True`` and ``mask_nodata`` is ``False``. ValueError: if ``signal_path_band`` or ``kernel_path_band`` is a row based blocksize which would result in slow runtimes due to gdal cache thrashing. """ if target_datatype is not gdal.GDT_Float64 and target_nodata is None: raise ValueError( "`target_datatype` is set, but `target_nodata` is None. " "`target_nodata` must be set if `target_datatype` is not " "`gdal.GDT_Float64`. `target_nodata` is set to None.") if target_nodata is None: target_nodata = float(numpy.finfo(numpy.float32).min) if ignore_nodata_and_edges and not mask_nodata: LOGGER.debug( 'ignore_nodata_and_edges is True while mask_nodata is False -- ' 'this can yield a nonsensical result in areas where the kernel ' 'touches only nodata values.') bad_raster_path_list = [] for raster_id, raster_path_band in [ ('signal', signal_path_band), ('kernel', kernel_path_band)]: if (not _is_raster_path_band_formatted(raster_path_band)): bad_raster_path_list.append((raster_id, raster_path_band)) if bad_raster_path_list: raise ValueError( "Expected raster path band sequences for the following arguments " f"but instead got: {bad_raster_path_list}") signal_raster_info = get_raster_info(signal_path_band[0]) kernel_raster_info = get_raster_info(kernel_path_band[0]) for info_dict in [signal_raster_info, kernel_raster_info]: if 1 in info_dict['block_size']: raise ValueError( f'{signal_path_band} has a row blocksize which can make this ' f'function run very slow, create a square blocksize using ' f'`warp_raster` or `align_and_resize_raster_stack` which ' f'creates square blocksizes by default') # The nodata value is reset to a different value at the end of this # function. Here 0 is chosen as a default value since data are # incrementally added to the raster new_raster_from_base( signal_path_band[0], target_path, target_datatype, [0], raster_driver_creation_tuple=raster_driver_creation_tuple) n_cols_signal, n_rows_signal = signal_raster_info['raster_size'] n_cols_kernel, n_rows_kernel = kernel_raster_info['raster_size'] s_path_band = signal_path_band k_path_band = kernel_path_band s_nodata = signal_raster_info['nodata'][0] # we need the original signal raster info because we want the output to # be clipped and NODATA masked to it signal_raster = gdal.OpenEx(signal_path_band[0], gdal.OF_RASTER) signal_band = signal_raster.GetRasterBand(signal_path_band[1]) # getting the offset list before it's opened for updating target_offset_list = list(iterblocks((target_path, 1), offset_only=True)) target_raster = gdal.OpenEx(target_path, gdal.OF_RASTER | gdal.GA_Update) target_band = target_raster.GetRasterBand(1) # if we're ignoring nodata, we need to make a parallel convolved signal # of the nodata mask if ignore_nodata_and_edges: raster_file, mask_raster_path = tempfile.mkstemp( suffix='.tif', prefix='convolved_mask', dir=os.path.dirname(target_path)) os.close(raster_file) new_raster_from_base( signal_path_band[0], mask_raster_path, gdal.GDT_Float64, [0.0], raster_driver_creation_tuple=raster_driver_creation_tuple) mask_raster = gdal.OpenEx( mask_raster_path, gdal.GA_Update | gdal.OF_RASTER) mask_band = mask_raster.GetRasterBand(1) LOGGER.info('starting convolve') last_time = time.time() # calculate the kernel sum for normalization kernel_nodata = kernel_raster_info['nodata'][0] kernel_sum = 0.0 for _, kernel_block in iterblocks(kernel_path_band): if kernel_nodata is not None and ignore_nodata_and_edges: kernel_block[numpy.isclose(kernel_block, kernel_nodata)] = 0.0 kernel_sum += numpy.sum(kernel_block) # limit the size of the work queue since a large kernel / signal with small # block size can have a large memory impact when queuing offset lists. work_queue = queue.Queue(10) signal_offset_list = list(iterblocks(s_path_band, offset_only=True)) kernel_offset_list = list(iterblocks(k_path_band, offset_only=True)) n_blocks = len(signal_offset_list) * len(kernel_offset_list) LOGGER.debug('start fill work queue thread') def _fill_work_queue(): """Asynchronously fill the work queue.""" LOGGER.debug('fill work queue') for signal_offset in signal_offset_list: for kernel_offset in kernel_offset_list: work_queue.put((signal_offset, kernel_offset)) work_queue.put(None) LOGGER.debug('work queue full') fill_work_queue_worker = threading.Thread( target=_fill_work_queue) fill_work_queue_worker.daemon = True fill_work_queue_worker.start() # limit the size of the write queue so we don't accidentally load a whole # array into memory LOGGER.debug('start worker thread') write_queue = queue.Queue(10) worker = threading.Thread( target=_convolve_2d_worker, args=( signal_path_band, kernel_path_band, ignore_nodata_and_edges, normalize_kernel, set_tol_to_zero, work_queue, write_queue)) worker.daemon = True worker.start() n_blocks_processed = 0 LOGGER.info(f'{n_blocks} sent to workers, wait for worker results') while True: # the timeout guards against a worst case scenario where the # ``_convolve_2d_worker`` has crashed. write_payload = write_queue.get(timeout=_MAX_TIMEOUT) if write_payload: (index_dict, result, mask_result, left_index_raster, right_index_raster, top_index_raster, bottom_index_raster, left_index_result, right_index_result, top_index_result, bottom_index_result) = write_payload else: worker.join(max_timeout) break output_array = numpy.empty( (index_dict['win_ysize'], index_dict['win_xsize']), dtype=numpy.float32) # the inital data value in target_band is 0 because that is the # temporary nodata selected so that manual resetting of initial # data values weren't necessary. at the end of this function the # target nodata value is set to `target_nodata`. current_output = target_band.ReadAsArray(**index_dict) # read the signal block so we know where the nodata are potential_nodata_signal_array = signal_band.ReadAsArray(**index_dict) valid_mask = numpy.ones( potential_nodata_signal_array.shape, dtype=bool) # guard against a None nodata value if s_nodata is not None and mask_nodata: valid_mask[:] = ( ~numpy.isclose(potential_nodata_signal_array, s_nodata)) output_array[:] = target_nodata output_array[valid_mask] = ( (result[top_index_result:bottom_index_result, left_index_result:right_index_result])[valid_mask] + current_output[valid_mask]) target_band.WriteArray( output_array, xoff=index_dict['xoff'], yoff=index_dict['yoff']) if ignore_nodata_and_edges: # we'll need to save off the mask convolution so we can divide # it in total later current_mask = mask_band.ReadAsArray(**index_dict) output_array[valid_mask] = ( (mask_result[ top_index_result:bottom_index_result, left_index_result:right_index_result])[valid_mask] + current_mask[valid_mask]) mask_band.WriteArray( output_array, xoff=index_dict['xoff'], yoff=index_dict['yoff']) n_blocks_processed += 1 last_time = _invoke_timed_callback( last_time, lambda: LOGGER.info( "convolution worker approximately %.1f%% complete on %s", 100.0 * float(n_blocks_processed) / (n_blocks), os.path.basename(target_path)), _LOGGING_PERIOD) LOGGER.info( f"convolution worker 100.0% complete on " f"{os.path.basename(target_path)}") target_band.FlushCache() if ignore_nodata_and_edges: signal_nodata = get_raster_info(signal_path_band[0])['nodata'][ signal_path_band[1]-1] LOGGER.info( "need to normalize result so nodata values are not included") mask_pixels_processed = 0 mask_band.FlushCache() for target_offset_data in target_offset_list: target_block = target_band.ReadAsArray( **target_offset_data).astype(numpy.float64) signal_block = signal_band.ReadAsArray(**target_offset_data) mask_block = mask_band.ReadAsArray(**target_offset_data) if mask_nodata and signal_nodata is not None: valid_mask = ~numpy.isclose(signal_block, signal_nodata) else: valid_mask = numpy.ones(target_block.shape, dtype=bool) valid_mask &= (mask_block > 0) # divide the target_band by the mask_band target_block[valid_mask] /= mask_block[valid_mask].astype( numpy.float64) # scale by kernel sum if necessary since mask division will # automatically normalize kernel if not normalize_kernel: target_block[valid_mask] *= kernel_sum target_band.WriteArray( target_block, xoff=target_offset_data['xoff'], yoff=target_offset_data['yoff']) mask_pixels_processed += target_block.size last_time = _invoke_timed_callback( last_time, lambda: LOGGER.info( "convolution nodata normalizer approximately %.1f%% " "complete on %s", 100.0 * float(mask_pixels_processed) / ( n_cols_signal * n_rows_signal), os.path.basename(target_path)), _LOGGING_PERIOD) mask_raster = None mask_band = None os.remove(mask_raster_path) LOGGER.info( f"convolution nodata normalize 100.0% complete on " f"{os.path.basename(target_path)}") # set the nodata value from 0 to a reasonable value for the result target_band.SetNoDataValue(target_nodata) target_band = None target_raster = None def iterblocks( raster_path_band, largest_block=_LARGEST_ITERBLOCK, offset_only=False): """Iterate across all the memory blocks in the input raster. Result is a generator of block location information and numpy arrays. This is especially useful when a single value needs to be derived from the pixel values in a raster, such as the sum total of all pixel values, or a sequence of unique raster values. In such cases, ``raster_local_op`` is overkill, since it writes out a raster. As a generator, this can be combined multiple times with itertools.izip() to iterate 'simultaneously' over multiple rasters, though the user should be careful to do so only with prealigned rasters. Args: raster_path_band (tuple): a path/band index tuple to indicate which raster band iterblocks should iterate over. largest_block (int): Attempts to iterate over raster blocks with this many elements. Useful in cases where the blocksize is relatively small, memory is available, and the function call overhead dominates the iteration. Defaults to 2**20. A value of anything less than the original blocksize of the raster will result in blocksizes equal to the original size. offset_only (boolean): defaults to False, if True ``iterblocks`` only returns offset dictionary and doesn't read any binary data from the raster. This can be useful when iterating over writing to an output. Yields: If ``offset_only`` is false, on each iteration, a tuple containing a dict of block data and a 2-dimensional numpy array are yielded. The dict of block data has these attributes: * ``data['xoff']`` - The X offset of the upper-left-hand corner of the block. * ``data['yoff']`` - The Y offset of the upper-left-hand corner of the block. * ``data['win_xsize']`` - The width of the block. * ``data['win_ysize']`` - The height of the block. If ``offset_only`` is True, the function returns only the block offset data and does not attempt to read binary data from the raster. """ if not _is_raster_path_band_formatted(raster_path_band): raise ValueError( "`raster_path_band` not formatted as expected. Expects " "(path, band_index), received %s" % repr(raster_path_band)) raster = gdal.OpenEx(raster_path_band[0], gdal.OF_RASTER) if raster is None: raise ValueError( "Raster at %s could not be opened." % raster_path_band[0]) band = raster.GetRasterBand(raster_path_band[1]) block = band.GetBlockSize() cols_per_block = block[0] rows_per_block = block[1] n_cols = raster.RasterXSize n_rows = raster.RasterYSize block_area = cols_per_block * rows_per_block # try to make block wider if int(largest_block / block_area) > 0: width_factor = int(largest_block / block_area) cols_per_block *= width_factor if cols_per_block > n_cols: cols_per_block = n_cols block_area = cols_per_block * rows_per_block # try to make block taller if int(largest_block / block_area) > 0: height_factor = int(largest_block / block_area) rows_per_block *= height_factor if rows_per_block > n_rows: rows_per_block = n_rows n_col_blocks = int(math.ceil(n_cols / float(cols_per_block))) n_row_blocks = int(math.ceil(n_rows / float(rows_per_block))) for row_block_index in range(n_row_blocks): row_offset = row_block_index * rows_per_block row_block_width = n_rows - row_offset if row_block_width > rows_per_block: row_block_width = rows_per_block for col_block_index in range(n_col_blocks): col_offset = col_block_index * cols_per_block col_block_width = n_cols - col_offset if col_block_width > cols_per_block: col_block_width = cols_per_block offset_dict = { 'xoff': col_offset, 'yoff': row_offset, 'win_xsize': col_block_width, 'win_ysize': row_block_width, } if offset_only: yield offset_dict else: yield (offset_dict, band.ReadAsArray(**offset_dict)) band = None raster = None def transform_bounding_box( bounding_box, base_projection_wkt, target_projection_wkt, edge_samples=11, osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY): """Transform input bounding box to output projection. This transform accounts for the fact that the reprojected square bounding box might be warped in the new coordinate system. To account for this, the function samples points along the original bounding box edges and attempts to make the largest bounding box around any transformed point on the edge whether corners or warped edges. Args: bounding_box (sequence): a sequence of 4 coordinates in ``base_epsg`` coordinate system describing the bound in the order [xmin, ymin, xmax, ymax]. base_projection_wkt (string): the spatial reference of the input coordinate system in Well Known Text. target_projection_wkt (string): the spatial reference of the desired output coordinate system in Well Known Text. edge_samples (int): the number of interpolated points along each bounding box edge to sample along. A value of 2 will sample just the corners while a value of 3 will also sample the corners and the midpoint. osr_axis_mapping_strategy (int): OSR axis mapping strategy for ``SpatialReference`` objects. Defaults to ``geoprocessing.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This parameter should not be changed unless you know what you are doing. Return: A list of the form [xmin, ymin, xmax, ymax] that describes the largest fitting bounding box around the original warped bounding box in ``new_epsg`` coordinate system. Raises: ``ValueError`` if resulting transform yields non-finite coordinates. This would indicate an ill posed transform region that the user should address. """ base_ref = osr.SpatialReference() base_ref.ImportFromWkt(base_projection_wkt) target_ref = osr.SpatialReference() target_ref.ImportFromWkt(target_projection_wkt) base_ref.SetAxisMappingStrategy(osr_axis_mapping_strategy) target_ref.SetAxisMappingStrategy(osr_axis_mapping_strategy) # Create a coordinate transformation transformer = osr.CreateCoordinateTransformation(base_ref, target_ref) def _transform_point(point): """Transform an (x,y) point tuple from base_ref to target_ref.""" trans_x, trans_y, _ = (transformer.TransformPoint(*point)) return (trans_x, trans_y) # The following list comprehension iterates over each edge of the bounding # box, divides each edge into ``edge_samples`` number of points, then # reduces that list to an appropriate ``bounding_fn`` given the edge. # For example the left edge needs to be the minimum x coordinate so # we generate ``edge_samples` number of points between the upper left and # lower left point, transform them all to the new coordinate system # then get the minimum x coordinate "min(p[0] ...)" of the batch. # points are numbered from 0 starting upper right as follows: # 0--3 # | | # 1--2 p_0 = numpy.array((bounding_box[0], bounding_box[3])) p_1 = numpy.array((bounding_box[0], bounding_box[1])) p_2 = numpy.array((bounding_box[2], bounding_box[1])) p_3 = numpy.array((bounding_box[2], bounding_box[3])) raw_bounding_box = [ bounding_fn( [_transform_point( p_a * v + p_b * (1 - v)) for v in numpy.linspace( 0, 1, edge_samples)]) for p_a, p_b, bounding_fn in [ (p_0, p_1, lambda p_list: min([p[0] for p in p_list])), (p_1, p_2, lambda p_list: min([p[1] for p in p_list])), (p_2, p_3, lambda p_list: max([p[0] for p in p_list])), (p_3, p_0, lambda p_list: max([p[1] for p in p_list]))]] # sometimes a transform will be so tight that a sampling around it may # flip the coordinate system. This flips it back. I found this when # transforming the bounding box of Gibraltar in a utm coordinate system # to lat/lng. minx, maxx = sorted([raw_bounding_box[0], raw_bounding_box[2]]) miny, maxy = sorted([raw_bounding_box[1], raw_bounding_box[3]]) transformed_bounding_box = [minx, miny, maxx, maxy] if not all(numpy.isfinite(numpy.array(transformed_bounding_box))): raise ValueError( f'Could not transform bounding box from base to target projection.' f'Some transformed coordinates are not finite: ' f'{transformed_bounding_box}, base bounding box may not fit into ' f'target coordinate projection system.\n' f'Original bounding box: {bounding_box}\n' f'Base projection: {base_projection_wkt}\n' f'Target projection: {target_projection_wkt}\n') return transformed_bounding_box def mask_raster( base_raster_path_band, mask_vector_path, target_mask_raster_path, mask_layer_id=0, target_mask_value=None, working_dir=None, all_touched=False, where_clause=None, raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS): """Mask a raster band with a given vector. Args: base_raster_path_band (tuple): a (path, band number) tuple indicating the data to mask. mask_vector_path (path): path to a vector that will be used to mask anything outside of the polygon that overlaps with ``base_raster_path_band`` to ``target_mask_value`` if defined or else ``base_raster_path_band``'s nodata value. target_mask_raster_path (str): path to desired target raster that is a copy of ``base_raster_path_band`` except any pixels that do not intersect with ``mask_vector_path`` are set to ``target_mask_value`` or ``base_raster_path_band``'s nodata value if ``target_mask_value`` is None. mask_layer_id (str/int): an index or name to identify the mask geometry layer in ``mask_vector_path``, default is 0. target_mask_value (numeric): If not None, this value is written to any pixel in ``base_raster_path_band`` that does not intersect with ``mask_vector_path``. Otherwise the nodata value of ``base_raster_path_band`` is used. working_dir (str): this is a path to a directory that can be used to hold temporary files required to complete this operation. all_touched (bool): if False, a pixel is only masked if its centroid intersects with the mask. If True a pixel is masked if any point of the pixel intersects the polygon mask. where_clause (str): (optional) if not None, it is an SQL compatible where clause that can be used to filter the features that are used to mask the base raster. raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver name string as the first element and a GDAL creation options tuple/list as the second. Defaults to a GTiff driver tuple defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS. Return: None """ with tempfile.NamedTemporaryFile( prefix='mask_raster', delete=False, suffix='.tif', dir=working_dir) as mask_raster_file: mask_raster_path = mask_raster_file.name new_raster_from_base( base_raster_path_band[0], mask_raster_path, gdal.GDT_Byte, [255], fill_value_list=[0], raster_driver_creation_tuple=raster_driver_creation_tuple) base_raster_info = get_raster_info(base_raster_path_band[0]) rasterize( mask_vector_path, mask_raster_path, burn_values=[1], layer_id=mask_layer_id, option_list=[('ALL_TOUCHED=%s' % all_touched).upper()], where_clause=where_clause) base_nodata = base_raster_info['nodata'][base_raster_path_band[1]-1] if target_mask_value is None: mask_value = base_nodata if mask_value is None: LOGGER.warning( "No mask value was passed and target nodata is undefined, " "defaulting to 0 as the target mask value.") mask_value = 0 else: mask_value = target_mask_value def mask_op(base_array, mask_array): result = numpy.copy(base_array) result[mask_array == 0] = mask_value return result raster_calculator( [base_raster_path_band, (mask_raster_path, 1)], mask_op, target_mask_raster_path, base_raster_info['datatype'], base_nodata, raster_driver_creation_tuple=raster_driver_creation_tuple) os.remove(mask_raster_path) def _invoke_timed_callback( reference_time, callback_lambda, callback_period): """Invoke callback if a certain amount of time has passed. This is a convenience function to standardize update callbacks from the module. Args: reference_time (float): time to base ``callback_period`` length from. callback_lambda (lambda): function to invoke if difference between current time and ``reference_time`` has exceeded ``callback_period``. callback_period (float): time in seconds to pass until ``callback_lambda`` is invoked. Return: ``reference_time`` if ``callback_lambda`` not invoked, otherwise the time when ``callback_lambda`` was invoked. """ current_time = time.time() if current_time - reference_time > callback_period: callback_lambda() return current_time return reference_time def _gdal_to_numpy_type(band): """Calculate the equivalent numpy datatype from a GDAL raster band type. This function doesn't handle complex or unknown types. If they are passed in, this function will raise a ValueError. Args: band (gdal.Band): GDAL Band Return: numpy_datatype (numpy.dtype): equivalent of band.DataType """ # doesn't include GDT_Byte because that's a special case base_gdal_type_to_numpy = { gdal.GDT_Int16: numpy.int16, gdal.GDT_Int32: numpy.int32, gdal.GDT_UInt16: numpy.uint16, gdal.GDT_UInt32: numpy.uint32, gdal.GDT_Float32: numpy.float32, gdal.GDT_Float64: numpy.float64, } if band.DataType in base_gdal_type_to_numpy: return base_gdal_type_to_numpy[band.DataType] if band.DataType != gdal.GDT_Byte: raise ValueError("Unsupported DataType: %s" % str(band.DataType)) # band must be GDT_Byte type, check if it is signed/unsigned metadata = band.GetMetadata('IMAGE_STRUCTURE') if 'PIXELTYPE' in metadata and metadata['PIXELTYPE'] == 'SIGNEDBYTE': return numpy.int8 return numpy.uint8 def merge_bounding_box_list(bounding_box_list, bounding_box_mode): """Create a single bounding box by union or intersection of the list. Args: bounding_box_list (sequence): a sequence of bounding box coordinates in the order [minx, miny, maxx, maxy]. mode (string): either ``'union'`` or ``'intersection'`` for the corresponding reduction mode. Return: A four tuple bounding box that is the union or intersection of the input bounding boxes. Raises: ValueError if the bounding boxes in ``bounding_box_list`` do not intersect if the ``bounding_box_mode`` is 'intersection'. """ def _merge_bounding_boxes(bb1, bb2, mode): """Merge two bounding boxes through union or intersection. Args: bb1, bb2 (sequence): sequence of float representing bounding box in the form bb=[minx,miny,maxx,maxy] mode (string); one of 'union' or 'intersection' Return: Reduced bounding box of bb1/bb2 depending on mode. """ def _less_than_or_equal(x_val, y_val): return x_val if x_val <= y_val else y_val def _greater_than(x_val, y_val): return x_val if x_val > y_val else y_val if mode == "union": comparison_ops = [ _less_than_or_equal, _less_than_or_equal, _greater_than, _greater_than] if mode == "intersection": comparison_ops = [ _greater_than, _greater_than, _less_than_or_equal, _less_than_or_equal] bb_out = [op(x, y) for op, x, y in zip(comparison_ops, bb1, bb2)] return bb_out result_bb = functools.reduce( functools.partial(_merge_bounding_boxes, mode=bounding_box_mode), bounding_box_list) if result_bb[0] > result_bb[2] or result_bb[1] > result_bb[3]: raise ValueError( "Bounding boxes do not intersect. Base list: %s mode: %s " " result: %s" % (bounding_box_list, bounding_box_mode, result_bb)) return result_bb def get_gis_type(path): """Calculate the GIS type of the file located at ``path``. Args: path (str): path to a file on disk. Return: A bitwise OR of all GIS types that PyGeoprocessing models, currently this is ``pygeoprocessing.UNKNOWN_TYPE``, ``pygeoprocessing.RASTER_TYPE``, or ``pygeoprocessing.VECTOR_TYPE``. """ if not os.path.exists(path): raise ValueError("%s does not exist", path) from pygeoprocessing import UNKNOWN_TYPE gis_type = UNKNOWN_TYPE gis_raster = gdal.OpenEx(path, gdal.OF_RASTER) if gis_raster is not None: from pygeoprocessing import RASTER_TYPE gis_type |= RASTER_TYPE gis_raster = None gis_vector = gdal.OpenEx(path, gdal.OF_VECTOR) if gis_vector is not None: from pygeoprocessing import VECTOR_TYPE gis_type |= VECTOR_TYPE return gis_type def _make_logger_callback(message): """Build a timed logger callback that prints ``message`` replaced. Args: message (string): a string that expects 2 placement %% variables, first for % complete from ``df_complete``, second from ``p_progress_arg[0]``. Return: Function with signature: logger_callback(df_complete, psz_message, p_progress_arg) """ def logger_callback(df_complete, _, p_progress_arg): """Argument names come from the GDAL API for callbacks.""" try: current_time = time.time() if ((current_time - logger_callback.last_time) > 5.0 or (df_complete == 1.0 and logger_callback.total_time >= 5.0)): # In some multiprocess applications I was encountering a # ``p_progress_arg`` of None. This is unexpected and I suspect # was an issue for some kind of GDAL race condition. So I'm # guarding against it here and reporting an appropriate log # if it occurs. if p_progress_arg: LOGGER.info(message, df_complete * 100, p_progress_arg[0]) else: LOGGER.info(message, df_complete * 100, '') logger_callback.last_time = current_time logger_callback.total_time += current_time except AttributeError: logger_callback.last_time = time.time() logger_callback.total_time = 0.0 except Exception: LOGGER.exception("Unhandled error occurred while logging " "progress. df_complete: %s, p_progress_arg: %s", df_complete, p_progress_arg) return logger_callback def _is_raster_path_band_formatted(raster_path_band): """Return true if raster path band is a (str, int) tuple/list.""" if not isinstance(raster_path_band, (list, tuple)): return False elif len(raster_path_band) != 2: return False elif not isinstance(raster_path_band[0], str): return False elif not isinstance(raster_path_band[1], int): return False else: return True def _convolve_2d_worker( signal_path_band, kernel_path_band, ignore_nodata, normalize_kernel, set_tol_to_zero, work_queue, write_queue): """Worker function to be used by ``convolve_2d``. Args: signal_path_band (tuple): a 2 tuple of the form (filepath to signal raster, band index). kernel_path_band (tuple): a 2 tuple of the form (filepath to kernel raster, band index). ignore_nodata (boolean): If true, any pixels that are equal to ``signal_path_band``'s nodata value are not included when averaging the convolution filter. normalize_kernel (boolean): If true, the result is divided by the sum of the kernel. set_tol_to_zero (float): Value to test close to to determine if values are zero, and if so, set to zero. work_queue (Queue): will contain (signal_offset, kernel_offset) tuples that can be used to read raster blocks directly using GDAL ReadAsArray(**offset). Indicates the block to operate on. write_queue (Queue): mechanism to pass result back to the writer contains a (index_dict, result, mask_result, left_index_raster, right_index_raster, top_index_raster, bottom_index_raster, left_index_result, right_index_result, top_index_result, bottom_index_result) tuple that's used for writing and masking. Return: None """ signal_raster = gdal.OpenEx(signal_path_band[0], gdal.OF_RASTER) kernel_raster = gdal.OpenEx(kernel_path_band[0], gdal.OF_RASTER) signal_band = signal_raster.GetRasterBand(signal_path_band[1]) kernel_band = kernel_raster.GetRasterBand(kernel_path_band[1]) signal_raster_info = get_raster_info(signal_path_band[0]) kernel_raster_info = get_raster_info(kernel_path_band[0]) n_cols_signal, n_rows_signal = signal_raster_info['raster_size'] n_cols_kernel, n_rows_kernel = kernel_raster_info['raster_size'] signal_nodata = signal_raster_info['nodata'][0] kernel_nodata = kernel_raster_info['nodata'][0] mask_result = None # in case no mask is needed, variable is still defined # calculate the kernel sum for normalization kernel_sum = 0.0 for _, kernel_block in iterblocks(kernel_path_band): if kernel_nodata is not None and ignore_nodata: kernel_block[numpy.isclose(kernel_block, kernel_nodata)] = 0.0 kernel_sum += numpy.sum(kernel_block) while True: payload = work_queue.get() if payload is None: break signal_offset, kernel_offset = payload # ensure signal and kernel are internally float64 precision # irrespective of their base type signal_block = signal_band.ReadAsArray(**signal_offset).astype( numpy.float64) kernel_block = kernel_band.ReadAsArray(**kernel_offset).astype( numpy.float64) # don't ever convolve the nodata value if signal_nodata is not None: signal_nodata_mask = numpy.isclose(signal_block, signal_nodata) signal_block[signal_nodata_mask] = 0.0 if not ignore_nodata: signal_nodata_mask[:] = 0 else: signal_nodata_mask = numpy.zeros( signal_block.shape, dtype=bool) left_index_raster = ( signal_offset['xoff'] - n_cols_kernel // 2 + kernel_offset['xoff']) right_index_raster = ( signal_offset['xoff'] - n_cols_kernel // 2 + kernel_offset['xoff'] + signal_offset['win_xsize'] + kernel_offset['win_xsize'] - 1) top_index_raster = ( signal_offset['yoff'] - n_rows_kernel // 2 + kernel_offset['yoff']) bottom_index_raster = ( signal_offset['yoff'] - n_rows_kernel // 2 + kernel_offset['yoff'] + signal_offset['win_ysize'] + kernel_offset['win_ysize'] - 1) # it's possible that the piece of the integrating kernel # doesn't affect the final result, if so we should skip if (right_index_raster < 0 or bottom_index_raster < 0 or left_index_raster > n_cols_signal or top_index_raster > n_rows_signal): continue if kernel_nodata is not None: kernel_block[numpy.isclose(kernel_block, kernel_nodata)] = 0.0 if normalize_kernel: kernel_block /= kernel_sum # determine the output convolve shape shape = ( numpy.array(signal_block.shape) + numpy.array(kernel_block.shape) - 1) # add zero padding so FFT is fast fshape = [_next_regular(int(d)) for d in shape] signal_fft = numpy.fft.rfftn(signal_block, fshape) kernel_fft = numpy.fft.rfftn(kernel_block, fshape) # this variable determines the output slice that doesn't include # the padded array region made for fast FFTs. fslice = tuple([slice(0, int(sz)) for sz in shape]) # classic FFT convolution result = numpy.fft.irfftn(signal_fft * kernel_fft, fshape)[fslice] # nix any roundoff error if set_tol_to_zero is not None: result[numpy.isclose(result, set_tol_to_zero)] = 0.0 # if we're ignoring nodata, we need to make a convolution of the # nodata mask too if ignore_nodata: mask_fft = numpy.fft.rfftn( numpy.where(signal_nodata_mask, 0.0, 1.0), fshape) mask_result = numpy.fft.irfftn( mask_fft * kernel_fft, fshape)[fslice] left_index_result = 0 right_index_result = result.shape[1] top_index_result = 0 bottom_index_result = result.shape[0] # we might abut the edge of the raster, clip if so if left_index_raster < 0: left_index_result = -left_index_raster left_index_raster = 0 if top_index_raster < 0: top_index_result = -top_index_raster top_index_raster = 0 if right_index_raster > n_cols_signal: right_index_result -= right_index_raster - n_cols_signal right_index_raster = n_cols_signal if bottom_index_raster > n_rows_signal: bottom_index_result -= ( bottom_index_raster - n_rows_signal) bottom_index_raster = n_rows_signal # Add result to current output to account for overlapping edges index_dict = { 'xoff': left_index_raster, 'yoff': top_index_raster, 'win_xsize': right_index_raster-left_index_raster, 'win_ysize': bottom_index_raster-top_index_raster } write_queue.put( (index_dict, result, mask_result, left_index_raster, right_index_raster, top_index_raster, bottom_index_raster, left_index_result, right_index_result, top_index_result, bottom_index_result)) # Indicates worker has terminated write_queue.put(None) def _assert_is_valid_pixel_size(target_pixel_size): """Return true if ``target_pixel_size`` is a valid 2 element sequence. Raises ValueError if not a two element list/tuple and/or the values in the sequence are not numerical. """ def _is_number(x): """Return true if x is a number.""" try: if isinstance(x, str): return False float(x) return True except (ValueError, TypeError): return False if not isinstance(target_pixel_size, (list, tuple)): raise ValueError( "target_pixel_size is not a tuple, its value was '%s'", repr(target_pixel_size)) if (len(target_pixel_size) != 2 or not all([_is_number(x) for x in target_pixel_size])): raise ValueError( "Invalid value for `target_pixel_size`, expected two numerical " "elements, got: %s", repr(target_pixel_size)) return True def shapely_geometry_to_vector( shapely_geometry_list, target_vector_path, projection_wkt, vector_format, fields=None, attribute_list=None, ogr_geom_type=ogr.wkbPolygon): """Convert list of geometry to vector on disk. Args: shapely_geometry_list (list): a list of Shapely objects. target_vector_path (str): path to target vector. projection_wkt (str): WKT for target vector. vector_format (str): GDAL driver name for target vector. fields (dict): a python dictionary mapping string fieldname to OGR Fieldtypes, if None no fields are added attribute_list (list of dicts): a list of python dictionary mapping fieldname to field value for each geometry in `shapely_geometry_list`, if None, no attributes are created. ogr_geom_type (ogr geometry enumerated type): sets the target layer geometry type. Defaults to wkbPolygon. Return: None """ if fields is None: fields = {} if attribute_list is None: attribute_list = [{} for _ in range(len(shapely_geometry_list))] num_geoms = len(shapely_geometry_list) num_attrs = len(attribute_list) if num_geoms != num_attrs: raise ValueError( f"Geometry count ({num_geoms}) and attribute count " f"({num_attrs}) do not match.") vector_driver = ogr.GetDriverByName(vector_format) target_vector = vector_driver.CreateDataSource(target_vector_path) layer_name = os.path.basename(os.path.splitext(target_vector_path)[0]) projection = osr.SpatialReference() projection.ImportFromWkt(projection_wkt) target_layer = target_vector.CreateLayer( layer_name, srs=projection, geom_type=ogr_geom_type) for field_name, field_type in fields.items(): target_layer.CreateField(ogr.FieldDefn(field_name, field_type)) layer_defn = target_layer.GetLayerDefn() for shapely_feature, fields in zip(shapely_geometry_list, attribute_list): new_feature = ogr.Feature(layer_defn) new_geometry = ogr.CreateGeometryFromWkb(shapely_feature.wkb) new_feature.SetGeometry(new_geometry) for field_name, field_value in fields.items(): new_feature.SetField(field_name, field_value) target_layer.CreateFeature(new_feature) target_layer = None target_vector = None def numpy_array_to_raster( base_array, target_nodata, pixel_size, origin, projection_wkt, target_path, raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS): """Create a single band raster of size ``base_array.shape``. Args: base_array (numpy.array): a 2d numpy array. target_nodata (numeric): nodata value of target array, can be None. pixel_size (tuple): square dimensions (in ``(x, y)``) of pixel. origin (tuple/list): x/y coordinate of the raster origin. projection_wkt (str): target projection in wkt. target_path (str): path to raster to create that will be of the same type of base_array with contents of base_array. raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver name string as the first element and a GDAL creation options tuple/list as the second. Defaults to geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS. Return: None """ numpy_to_gdal_type = { numpy.dtype(bool): gdal.GDT_Byte, numpy.dtype(numpy.int8): gdal.GDT_Byte, numpy.dtype(numpy.uint8): gdal.GDT_Byte, numpy.dtype(numpy.int16): gdal.GDT_Int16, numpy.dtype(numpy.int32): gdal.GDT_Int32, numpy.dtype(numpy.uint16): gdal.GDT_UInt16, numpy.dtype(numpy.uint32): gdal.GDT_UInt32, numpy.dtype(numpy.float32): gdal.GDT_Float32, numpy.dtype(numpy.float64): gdal.GDT_Float64, numpy.dtype(numpy.csingle): gdal.GDT_CFloat32, numpy.dtype(numpy.complex64): gdal.GDT_CFloat64, } raster_driver = gdal.GetDriverByName(raster_driver_creation_tuple[0]) ny, nx = base_array.shape new_raster = raster_driver.Create( target_path, nx, ny, 1, numpy_to_gdal_type[base_array.dtype], options=raster_driver_creation_tuple[1]) if projection_wkt is not None: new_raster.SetProjection(projection_wkt) new_raster.SetGeoTransform( [origin[0], pixel_size[0], 0.0, origin[1], 0.0, pixel_size[1]]) new_band = new_raster.GetRasterBand(1) if target_nodata is not None: new_band.SetNoDataValue(target_nodata) new_band.WriteArray(base_array) new_band = None new_raster = None def raster_to_numpy_array(raster_path, band_id=1): """Read the entire contents of the raster band to a numpy array. Args: raster_path (str): path to raster. band_id (int): band in the raster to read. Return: numpy array contents of `band_id` in raster. """ raster = gdal.OpenEx(raster_path, gdal.OF_RASTER) band = raster.GetRasterBand(band_id) array = band.ReadAsArray() band = None raster = None return array def stitch_rasters( base_raster_path_band_list, resample_method_list, target_stitch_raster_path_band, overlap_algorithm='etch', area_weight_m2_to_wgs84=False, osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY): """Stitch the raster in the base list into the existing target. Args: base_raster_path_band_list (sequence): sequence of raster path/band tuples to stitch into target. resample_method_list (sequence): a sequence of resampling methods which one to one map each path in ``base_raster_path_band_list`` during resizing. Each element must be one of "near|bilinear|cubic|cubicspline|lanczos|mode". target_stitch_raster_path_band (tuple): raster path/band tuple to an existing raster, values in ``base_raster_path_band_list`` will be stitched into this raster/band in the order they are in the list. The nodata value for the target band must be defined and will be written over with values from the base raster. Nodata values in the base rasters will not be written into the target. If the pixel size or projection are different between base and target the base is warped to the target's cell size and target with the interpolation method provided. If any part of the base raster lies outside of the target, that part of the base is ignored. A warning is logged if the entire base raster is outside of the target bounds. overlap_algorithm (str): this value indicates which algorithm to use when a raster is stitched on non-nodata values in the target stitch raster. It can be one of the following: 'etch': write a value to the target raster only if the target raster pixel is nodata. If the target pixel is non-nodata ignore any additional values to write on that pixel. 'replace': write a value to the target raster irrespective of the value of the target raster 'add': add the value to be written to the target raster to any existing value that is there. If the existing value is nodata, treat it as 0.0. area_weight_m2_to_wgs84 (bool): If ``True`` the stitched raster will be converted to a per-area value before reprojection to wgs84, then multiplied by the m^2 area per pixel in the wgs84 coordinate space. This is useful when the quantity being stitched is a total quantity per pixel rather than a per unit area density. Note this assumes input rasters are in a projected space of meters, if they are not the stitched output will be nonsensical. osr_axis_mapping_strategy (int): OSR axis mapping strategy for ``SpatialReference`` objects. Defaults to ``geoprocessing.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This parameter should not be changed unless you know what you are doing. Return: None. """ valid_overlap_algorithms = ['etch', 'replace', 'add'] if overlap_algorithm not in valid_overlap_algorithms: raise ValueError( f'overlap algorithm {overlap_algorithm} is not one of ' f'{valid_overlap_algorithms}') if not _is_raster_path_band_formatted(target_stitch_raster_path_band): raise ValueError( f'Expected raster path/band tuple for ' f'target_stitch_raster_path_band but got ' f'"{target_stitch_raster_path_band}"') if len(base_raster_path_band_list) != len(resample_method_list): raise ValueError( f'Expected same number of elements in ' f'`base_raster_path_band_list` as `resample_method_list` but ' f'got {len(base_raster_path_band_list)} != ' f'{len(resample_method_list)} respectively') if not os.path.exists(target_stitch_raster_path_band[0]): raise ValueError( f'Target stitch raster does not exist: ' f'"{target_stitch_raster_path_band[0]}"') gis_type = get_gis_type(target_stitch_raster_path_band[0]) from pygeoprocessing import RASTER_TYPE if gis_type != RASTER_TYPE: raise ValueError( f'Target stitch raster is not a raster. ' f'Location: "{target_stitch_raster_path_band[0]}" ' f'GIS type: {gis_type}') target_raster_info = get_raster_info(target_stitch_raster_path_band[0]) if target_stitch_raster_path_band[1] > len(target_raster_info['nodata']): raise ValueError( f'target_stitch_raster_path_band refers to a band that exceeds ' f'the number of bands in the raster:\n' f'target_stitch_raster_path_band[1]: ' f'{target_stitch_raster_path_band[1]} ' f'n bands: {len(target_raster_info["nodata"])}') target_nodata = target_raster_info['nodata'][ target_stitch_raster_path_band[1]-1] if target_nodata is None: raise ValueError( f'target stitch raster at "{target_stitch_raster_path_band[0]} "' f'nodata value is `None`, expected non-`None` value') target_raster = gdal.OpenEx( target_stitch_raster_path_band[0], gdal.OF_RASTER | gdal.GA_Update) target_band = target_raster.GetRasterBand( target_stitch_raster_path_band[1]) target_inv_gt = gdal.InvGeoTransform(target_raster_info['geotransform']) target_raster_x_size, target_raster_y_size = target_raster_info[ 'raster_size'] for (raster_path, raster_band_id), resample_method in zip( base_raster_path_band_list, resample_method_list): LOGGER.info( f'stitching {(raster_path, raster_band_id)} into ' f'{target_stitch_raster_path_band}') raster_info = get_raster_info(raster_path) projected_raster_bounding_box = transform_bounding_box( raster_info['bounding_box'], raster_info['projection_wkt'], target_raster_info['projection_wkt']) try: # merge the bounding boxes only to see if they don't intersect _ = merge_bounding_box_list( [projected_raster_bounding_box, target_raster_info['bounding_box']], 'intersection') except ValueError: LOGGER.warning( f'the raster at "{raster_path}"" does not intersect the ' f'stitch raster at "{target_stitch_raster_path_band[0]}", ' f'skipping...') continue # use this to determine if we need to warp and delete if we did at # the end if (raster_info['projection_wkt'] == target_raster_info['projection_wkt'] and raster_info['pixel_size'] == target_raster_info['pixel_size']): warped_raster = False base_stitch_raster_path = raster_path else: workspace_dir = tempfile.mkdtemp( dir=os.path.dirname(target_stitch_raster_path_band[0]), prefix='stitch_rasters_workspace') base_stitch_raster_path = os.path.join( workspace_dir, os.path.basename(raster_path)) warp_raster( raster_path, target_raster_info['pixel_size'], base_stitch_raster_path, resample_method, target_projection_wkt=target_raster_info['projection_wkt'], working_dir=workspace_dir, osr_axis_mapping_strategy=osr_axis_mapping_strategy) warped_raster = True if warped_raster and area_weight_m2_to_wgs84: # determine base area per pixel currently and area per pixel # once it is projected to wgs84 pixel sizes base_pixel_area_m2 = abs(numpy.prod(raster_info['pixel_size'])) base_stitch_raster_info = get_raster_info( base_stitch_raster_path) _, lat_min, _, lat_max = base_stitch_raster_info['bounding_box'] n_rows = base_stitch_raster_info['raster_size'][1] # this column is a longitude invariant latitude variant pixel # area for scaling area dependent values m2_area_per_lat = _create_latitude_m2_area_column( lat_min, lat_max, n_rows) def _mult_op(base_array, base_nodata, scale, datatype): """Scale non-nodata by scale.""" result = base_array.astype(datatype) if base_nodata is not None: valid_mask = ~numpy.isclose(base_array, base_nodata) else: valid_mask = numpy.ones( base_array.shape, dtype=bool) result[valid_mask] = result[valid_mask] * scale[valid_mask] return result base_stitch_nodata = base_stitch_raster_info['nodata'][0] scaled_raster_path = os.path.join( workspace_dir, f'scaled_{os.path.basename(base_stitch_raster_path)}') # multiply the pixels in the resampled raster by the ratio of # the pixel area in the wgs84 units divided by the area of the # original pixel raster_calculator( [(base_stitch_raster_path, 1), (base_stitch_nodata, 'raw'), m2_area_per_lat/base_pixel_area_m2, (_GDAL_TYPE_TO_NUMPY_LOOKUP[ target_raster_info['datatype']], 'raw')], _mult_op, scaled_raster_path, target_raster_info['datatype'], base_stitch_nodata) # swap the result to base stitch so the rest of the function # operates on the area scaled raster os.remove(base_stitch_raster_path) base_stitch_raster_path = scaled_raster_path base_raster = gdal.OpenEx(base_stitch_raster_path, gdal.OF_RASTER) base_gt = base_raster.GetGeoTransform() base_band = base_raster.GetRasterBand(raster_band_id) base_nodata = base_band.GetNoDataValue() # Get the target upper left xoff/yoff w/r/t the stitch raster 0,0 # coordinates target_to_base_xoff, target_to_base_yoff = [ int(_) for _ in gdal.ApplyGeoTransform( target_inv_gt, *gdal.ApplyGeoTransform(base_gt, 0, 0))] for offset_dict in iterblocks( (base_stitch_raster_path, raster_band_id), offset_only=True): _offset_vars = {} overlap = True for (target_to_base_off, off_val, target_off_id, off_clip_id, win_size_id, raster_size) in [ (target_to_base_xoff, offset_dict['xoff'], 'target_xoff', 'xoff_clip', 'win_xsize', target_raster_x_size), (target_to_base_yoff, offset_dict['yoff'], 'target_yoff', 'yoff_clip', 'win_ysize', target_raster_y_size)]: _offset_vars[target_off_id] = (target_to_base_off+off_val) if _offset_vars[target_off_id] >= raster_size: overlap = False break # how far to move right to get in the target raster _offset_vars[off_clip_id] = 0 if _offset_vars[target_off_id] < 0: _offset_vars[off_clip_id] = -_offset_vars[target_off_id] _offset_vars[win_size_id] = offset_dict[win_size_id] if _offset_vars[off_clip_id] >= _offset_vars[win_size_id]: # its too far left for the whole window overlap = False break # make the _offset_vars[win_size_id] smaller if it shifts # off the target window if (_offset_vars[off_clip_id] + _offset_vars[target_off_id] + _offset_vars[win_size_id] >= raster_size): _offset_vars[win_size_id] -= ( _offset_vars[off_clip_id] + _offset_vars[target_off_id] + _offset_vars[win_size_id] - raster_size) if not overlap: continue target_array = target_band.ReadAsArray( xoff=_offset_vars['target_xoff']+_offset_vars['xoff_clip'], yoff=_offset_vars['target_yoff']+_offset_vars['yoff_clip'], win_xsize=_offset_vars['win_xsize'], win_ysize=_offset_vars['win_ysize']) target_nodata_mask = numpy.isclose(target_array, target_nodata) base_array = base_band.ReadAsArray( xoff=offset_dict['xoff']+_offset_vars['xoff_clip'], yoff=offset_dict['yoff']+_offset_vars['yoff_clip'], win_xsize=_offset_vars['win_xsize'], win_ysize=_offset_vars['win_ysize']) if base_nodata is not None: base_nodata_mask = numpy.isclose(base_array, base_nodata) else: base_nodata_mask = numpy.zeros( base_array.shape, dtype=bool) if overlap_algorithm == 'etch': # place values only where target is nodata valid_mask = ~base_nodata_mask & target_nodata_mask target_array[valid_mask] = base_array[valid_mask] elif overlap_algorithm == 'replace': # write valid values into the target -- disregard any # existing values in the target valid_mask = ~base_nodata_mask target_array[valid_mask] = base_array[valid_mask] elif overlap_algorithm == 'add': # add values to the target and treat target nodata as 0. valid_mask = ~base_nodata_mask masked_target_array = target_array[valid_mask] target_array_nodata_mask = numpy.isclose( masked_target_array, target_nodata) target_array[valid_mask] = ( base_array[valid_mask] + numpy.where( target_array_nodata_mask, 0, masked_target_array)) else: raise RuntimeError( f'overlap_algorithm {overlap_algorithm} was not defined ' f'but also not detected earlier -- this should never ' f'happen') target_band.WriteArray( target_array, xoff=_offset_vars['target_xoff']+_offset_vars['xoff_clip'], yoff=_offset_vars['target_yoff']+_offset_vars['yoff_clip']) base_raster = None base_band = None if warped_raster: shutil.rmtree(workspace_dir) target_raster = None target_band = None def _m2_area_of_wg84_pixel(pixel_size, center_lat): """Calculate m^2 area of a square wgs84 pixel. Adapted from: https://gis.stackexchange.com/a/127327/2397 Args: pixel_size (float): length of side of a square pixel in degrees. center_lat (float): latitude of the center of the pixel. Note this value +/- half the `pixel-size` must not exceed 90/-90 degrees latitude or an invalid area will be calculated. Returns: Area of square pixel of side length `pixel_size` centered at `center_lat` in m^2. """ a = 6378137 # meters b = 6356752.3142 # meters e = math.sqrt(1 - (b/a)**2) area_list = [] for f in [center_lat+pixel_size/2, center_lat-pixel_size/2]: zm = 1 - e*math.sin(math.radians(f)) zp = 1 + e*math.sin(math.radians(f)) area_list.append( math.pi * b**2 * ( math.log(zp/zm) / (2*e) + math.sin(math.radians(f)) / (zp*zm))) return abs(pixel_size / 360. * (area_list[0] - area_list[1])) def _create_latitude_m2_area_column(lat_min, lat_max, n_pixels): """Create a (n, 1) sized numpy array with m^2 areas in each element. Creates a per pixel m^2 area array that varies with changes in latitude. This array can be used to scale values by area when converting to or from a WGS84 projection to a projected one. Args: lat_max (float): maximum latitude in the bound lat_min (float): minimum latitude in the bound n_pixels (int): number of pixels to create for the column. The size of the target square pixels are (lat_max-lat_min)/n_pixels degrees per side. Return: A (n, 1) sized numpy array whose elements are the m^2 areas in each element estimated by the latitude value at the center of each pixel. """ pixel_size = (lat_max - lat_min) / n_pixels center_lat_array = numpy.linspace( lat_min+pixel_size/2, lat_max-pixel_size/2, n_pixels) area_array = numpy.array([ _m2_area_of_wg84_pixel(pixel_size, lat) for lat in reversed(center_lat_array)]).reshape((n_pixels, 1)) return area_array
thermometer.py
import os import time import threading SAMPLE_SIZE = 10 INTERVAL = 2 LOW_CUTOFF = -30.0 HI_CUTOFF = 600.0 ERR_VAL = -666 BASE_DIR = os.path.dirname(os.path.abspath(__file__)) class Thermometer: """ A base class for relevant thermometer functions for smokerpi """ def __init__(self, interval=INTERVAL): self.time_last_read = 0 self.f = -500 self.c = -500 self.interval = interval self.read_temp() def read_temp(self): raise NotImplemented() def get_f(self): if time.time() - self.time_last_read > self.interval: t = threading.Thread(target=self.read_temp) t.start() return self.f def get_c(self): if time.time() - self.time_last_read > self.interval: t = threading.Thread(target=self.read_temp) t.start() return self.c def read_continuous(self): while True: self.read_temp() print((self.c, self.f)) time.sleep(1) if __name__ == "__main__": k = Ktype() print(k.get_f())
train_mask_rcnn_alt_opt.py
#!/usr/bin/env python # -------------------------------------------------------- # Mask R-CNN # Copyright (c) 2018 VitoChien # Licensed under The MIT License [see LICENSE for details] # Written by Ruihe Qian # -------------------------------------------------------- """Train a Mask R-CNN network using alternating optimization. """ import _init_paths # from fast_rcnn.train import get_training_roidb, train_net from fast_rcnn.train_multi_gpu import get_training_roidb, train_net_multi_gpu from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir from datasets.factory import get_imdb from rpn.generate import imdb_proposals import argparse import pprint import numpy as np import sys, os import multiprocessing as mp import cPickle import shutil def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Train a MaskS R-CNN network') parser.add_argument("--gpu_id", type=str, default='0', help="List of device ids.") parser.add_argument('--net_name', dest='net_name', help='network name (e.g., "ZF")', default=None, type=str) parser.add_argument('--weights', dest='pretrained_model', help='initialize with pretrained model weights', default=None, type=str) parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default=None, type=str) parser.add_argument('--imdb', dest='imdb_name', help='dataset to train on', default='voc_2007_trainval', type=str) parser.add_argument('--set', dest='set_cfgs', help='set config keys', default=None, nargs=argparse.REMAINDER) if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() return args def get_roidb(imdb_name, rpn_file=None): imdb = get_imdb(imdb_name) print 'Loaded dataset `{:s}` for training'.format(imdb.name) imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD) print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD) if rpn_file is not None: imdb.config['rpn_file'] = rpn_file roidb = get_training_roidb(imdb) return roidb, imdb def get_solvers(net_name): # Mask R-CNN Alternating Optimization n = 'mask_rcnn_alt_opt' # Solver for each training stage solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'], [net_name, n, 'stage1_mask_rcnn_solver30k40k.pt'], [net_name, n, 'stage2_rpn_solver60k80k.pt'], [net_name, n, 'stage2_mask_rcnn_solver30k40k.pt']] solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers] # Iterations for each training stage max_iters = [12500, 60000, 30000, 60000] # max_iters = [100, 100, 100, 100] # Test prototxt for the RPN rpn_test_prototxt = os.path.join( cfg.MODELS_DIR, net_name, n, 'rpn_test.pt') return solvers, max_iters, rpn_test_prototxt # ------------------------------------------------------------------------------ # Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded # (e.g. "del net" in Python code). To work around this issue, each training # stage is executed in a separate process using multiprocessing.Process. # ------------------------------------------------------------------------------ def _init_caffe(cfg): """Initialize pycaffe in a training process. """ import caffe # fix the random seeds (numpy and caffe) for reproducibility np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID[0]) def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None, max_iters=None, cfg=None): """Train a Region Proposal Network in a separate training process. """ # Not using any proposals, just ground-truth boxes cfg.TRAIN.HAS_RPN = True cfg.TRAIN.BBOX_REG = False # applies only to Mask R-CNN bbox regression cfg.TRAIN.PROPOSAL_METHOD = 'gt' cfg.TRAIN.IMS_PER_BATCH = 1 print 'Init model: {}'.format(init_model) print('Using config:') pprint.pprint(cfg) import caffe # _init_caffe(cfg) roidb, imdb = get_roidb(imdb_name) print 'roidb len: {}'.format(len(roidb)) output_dir = get_output_dir(imdb) print 'Output will be saved to `{:s}`'.format(output_dir) model_paths = train_net_multi_gpu(solver, roidb, output_dir, pretrained_model=init_model, max_iters=max_iters, gpus=cfg.GPU_ID) # Cleanup all but the final model # for i in model_paths[:-1]: # os.remove(i) rpn_model_path = model_paths[-1] # Send final model path through the multiprocessing queue queue.put({'model_path': rpn_model_path}) def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None, rpn_test_prototxt=None): """Use a trained RPN to generate proposals. """ cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS print 'RPN model: {}'.format(rpn_model_path) print('Using config:') pprint.pprint(cfg) import caffe _init_caffe(cfg) # NOTE: the matlab implementation computes proposals on flipped images, too. # We compute them on the image once and then flip the already computed # proposals. This might cause a minor loss in mAP (less proposal jittering). imdb = get_imdb(imdb_name) print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name) # Load RPN and configure output directory rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST) output_dir = get_output_dir(imdb) print 'Output will be saved to `{:s}`'.format(output_dir) # Generate proposals on the imdb rpn_proposals = imdb_proposals(rpn_net, imdb) # Write proposals to disk and send the proposal file path through the # multiprocessing queue rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0] rpn_proposals_path = os.path.join( output_dir, rpn_net_name + '_proposals.pkl') with open(rpn_proposals_path, 'wb') as f: cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_proposals_path) queue.put({'proposal_path': rpn_proposals_path}) def train_mask_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, max_iters=None, cfg=None, rpn_file=None): """Train a Mask R-CNN using proposals generated by an RPN. """ cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead cfg.TRAIN.IMS_PER_BATCH = 1 print 'Init model: {}'.format(init_model) print 'RPN proposals: {}'.format(rpn_file) print('Using config:') pprint.pprint(cfg) import caffe # _init_caffe(cfg) roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file) output_dir = get_output_dir(imdb) print 'Output will be saved to `{:s}`'.format(output_dir) # Train Mask R-CNN model_paths = train_net_multi_gpu(solver, roidb, output_dir, pretrained_model=init_model, max_iters=max_iters, gpus=cfg.GPU_ID) # Cleanup all but the final model # for i in model_paths[:-1]: # os.remove(i) mask_rcnn_model_path = model_paths[-1] # Send Mask R-CNN model path over the multiprocessing queue queue.put({'model_path': mask_rcnn_model_path}) if __name__ == '__main__': args = parse_args() print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) gpu_id = args.gpu_id gpu_list = gpu_id.split(',') gpus = [int(i) for i in gpu_list] cfg.GPU_ID = gpus # -------------------------------------------------------------------------- # Pycaffe doesn't reliably free GPU memory when instantiated nets are # discarded (e.g. "del net" in Python code). To work around this issue, each # training stage is executed in a separate process using # multiprocessing.Process. # -------------------------------------------------------------------------- # queue for communicated results between processes mp_queue = mp.Queue() # solves, iters, etc. for each training stage solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name) # print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' # print 'Stage 1 RPN, init from ImageNet model' # print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' # cfg.TRAIN.SNAPSHOT_INFIX = 'stage1' # mp_kwargs = dict( # queue=mp_queue, # imdb_name=args.imdb_name, # init_model=args.pretrained_model, # solver=solvers[0], # max_iters=max_iters[0], # cfg=cfg) # p = mp.Process(target=train_rpn, kwargs=mp_kwargs) # p.start() # rpn_stage1_out = mp_queue.get() # p.join() rpn_stage1_out = dict() rpn_stage1_out["model_path"] = "./output/mask_rcnn_alt_opt/voc_2007_trainval/ResNet50_rpn_stage1_iter_15000.caffemodel" # print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' # print 'Stage 1 RPN, generate proposals' # print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' # # mp_kwargs = dict( # queue=mp_queue, # imdb_name=args.imdb_name, # rpn_model_path=str(rpn_stage1_out['model_path']), # cfg=cfg, # rpn_test_prototxt=rpn_test_prototxt) # p = mp.Process(target=rpn_generate, kwargs=mp_kwargs) # p.start() # rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path'] # p.join() rpn_stage1_out['proposal_path'] = "./output/mask_rcnn_alt_opt/voc_2007_trainval/ResNet50_rpn_stage1_iter_15000_proposals.pkl" print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 1 Mask R-CNN using RPN proposals, init from ImageNet model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage1' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, init_model=args.pretrained_model, solver=solvers[1], max_iters=max_iters[1], cfg=cfg, rpn_file=rpn_stage1_out['proposal_path']) #rpn_file="/home/qrh/py-mask-rcnn/output/mask_rcnn_alt_opt/voc_2007_trainval/ResNet50_rpn_stage1_iter_100_proposals.pkl") p = mp.Process(target=train_mask_rcnn, kwargs=mp_kwargs) p.start() mask_rcnn_stage1_out = mp_queue.get() p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 RPN, init from stage 1 Mask R-CNN model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage2' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, init_model=str(mask_rcnn_stage1_out['model_path']), solver=solvers[2], max_iters=max_iters[2], cfg=cfg) p = mp.Process(target=train_rpn, kwargs=mp_kwargs) p.start() rpn_stage2_out = mp_queue.get() p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 RPN, generate proposals' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, rpn_model_path=str(rpn_stage2_out['model_path']), cfg=cfg, rpn_test_prototxt=rpn_test_prototxt) p = mp.Process(target=rpn_generate, kwargs=mp_kwargs) p.start() rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path'] p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 Mask R-CNN, init from stage 2 RPN R-CNN model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage2' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, init_model=str(rpn_stage2_out['model_path']), solver=solvers[3], max_iters=max_iters[3], cfg=cfg, rpn_file=rpn_stage2_out['proposal_path']) p = mp.Process(target=train_mask_rcnn, kwargs=mp_kwargs) p.start() mask_rcnn_stage2_out = mp_queue.get() p.join() # Create final model (just a copy of the last stage) final_path = os.path.join( os.path.dirname(mask_rcnn_stage2_out['model_path']), args.net_name + '_mask_rcnn_final.caffemodel') print 'cp {} -> {}'.format( mask_rcnn_stage2_out['model_path'], final_path) shutil.copy(mask_rcnn_stage2_out['model_path'], final_path) print 'Final model: {}'.format(final_path)
main_12.py
import Redes.Red_Ini as Interface import Auxiliary.preprocessingData as Data import Auxiliary.GPUtil as GPU import os from threading import Thread import pickle import tensorflow as tf ################################### os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1, 2" # To force tensorflow to only see one GPU. # TensorFlow wizardry config = tf.ConfigProto() # Don't pre-allocate memory; allocate as-needed config.gpu_options.allow_growth = True # Only allow a total of half the GPU memory to be allocated #config.gpu_options.per_process_gpu_memory_fraction = 0.3 ################################### def launch_cnn(index_data, index_gpu, graph, name_path, X_train, y_train, X_test, y_test): with graph.as_default(): sess = tf.Session(config=config, graph=graph) with tf.device('/gpu:'+str(index_gpu)): with sess.as_default(): model = Interface.Red.build((1, 51, 3), 2, number_convolutional_layers=4, first_number_filters=256, dropout=0.5) history, model = Interface.Red.train(model, index_data, name_path + '_all_'+str(index_data), X_train[index_data], y_train[index_data], X_test[index_data], y_test[index_data], noise=1, l2_noise=1, weight_decay_noise=0.00001, class_weight = {0 : 0.0643, 1 : 0.935}) #, class_weight = {0 : 0.4, 1 : 0.6} with open(name_path + str(index_data) + '.pkl', 'wb') as f: # Python 3: open(..., 'wb') pickle.dump([history.history], f) # Interface.Red.plot_info(history) X_train, y_train, subjects_train, X_test, y_test, subjects_test = Data.loadData("data_12/normalizado") name_path = "../Results/result_12/4_capas/normal/con_weight_0.935/" try: # Create target Directory os.mkdir(name_path) print("Directory " , name_path , " Created ") except FileExistsError: print("Directory " , name_path , " already exists") name_file = "result" number_folders = 5 number_gpus = 3 temp_number_folder = 0 while temp_number_folder < number_folders: threads = [] for i in range(number_gpus): if temp_number_folder < number_folders: graph = tf.Graph() t = Thread(target=launch_cnn, args=(temp_number_folder, i ,graph, name_path + name_file, X_train, y_train, X_test, y_test)) temp_number_folder = temp_number_folder + 1 threads.append(t) # Start all threads for x in threads: x.start() # Wait for all of them to finish for x in threads: x.join() quit()
__init__.py
""" objectstore package, abstraction for storing blobs of data for use in Galaxy, all providers ensure that data can be accessed on the filesystem for running tools """ import os import random import shutil import logging import threading from xml.etree import ElementTree from galaxy.util import umask_fix_perms, force_symlink from galaxy.exceptions import ObjectInvalid, ObjectNotFound from galaxy.util.sleeper import Sleeper from galaxy.util.directory_hash import directory_hash_id from galaxy.util.odict import odict try: from sqlalchemy.orm import object_session except ImportError: object_session = None NO_SESSION_ERROR_MESSAGE = "Attempted to 'create' object store entity in configuration with no database session present." log = logging.getLogger( __name__ ) class ObjectStore(object): """ ObjectStore abstract interface """ def __init__(self, config, config_xml=None, **kwargs): self.running = True self.extra_dirs = {} def shutdown(self): self.running = False def exists(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None): """ Returns True if the object identified by `obj` exists in this file store, False otherwise. FIELD DESCRIPTIONS (these apply to all the methods in this class): :type obj: object :param obj: A Galaxy object with an assigned database ID accessible via the .id attribute. :type base_dir: string :param base_dir: A key in self.extra_dirs corresponding to the base directory in which this object should be created, or None to specify the default directory. :type dir_only: bool :param dir_only: If True, check only the path where the file identified by `obj` should be located, not the dataset itself. This option applies to `extra_dir` argument as well. :type extra_dir: string :param extra_dir: Append `extra_dir` to the directory structure where the dataset identified by `obj` should be located. (e.g., 000/extra_dir/obj.id) :type extra_dir_at_root: bool :param extra_dir_at_root: Applicable only if `extra_dir` is set. If True, the `extra_dir` argument is placed at root of the created directory structure rather than at the end (e.g., extra_dir/000/obj.id vs. 000/extra_dir/obj.id) :type alt_name: string :param alt_name: Use this name as the alternative name for the created dataset rather than the default. :type obj_dir: bool :param obj_dir: Append a subdirectory named with the object's ID (e.g. 000/obj.id) """ raise NotImplementedError() def file_ready(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False): """ A helper method that checks if a file corresponding to a dataset is ready and available to be used. Return True if so, False otherwise.""" return True def create(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False): """ Mark the object identified by `obj` as existing in the store, but with no content. This method will create a proper directory structure for the file if the directory does not already exist. See `exists` method for the description of other fields. """ raise NotImplementedError() def empty(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False): """ Test if the object identified by `obj` has content. If the object does not exist raises `ObjectNotFound`. See `exists` method for the description of the fields. """ raise NotImplementedError() def size(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False): """ Return size of the object identified by `obj`. If the object does not exist, return 0. See `exists` method for the description of the fields. """ raise NotImplementedError() def delete(self, obj, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False): """ Deletes the object identified by `obj`. See `exists` method for the description of other fields. :type entire_dir: bool :param entire_dir: If True, delete the entire directory pointed to by extra_dir. For safety reasons, this option applies only for and in conjunction with the extra_dir or obj_dir options. """ raise NotImplementedError() def get_data(self, obj, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False): """ Fetch `count` bytes of data starting at offset `start` from the object identified uniquely by `obj`. If the object does not exist raises `ObjectNotFound`. See `exists` method for the description of other fields. :type start: int :param start: Set the position to start reading the dataset file :type count: int :param count: Read at most `count` bytes from the dataset """ raise NotImplementedError() def get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False): """ Get the expected filename (including the absolute path) which can be used to access the contents of the object uniquely identified by `obj`. See `exists` method for the description of the fields. """ raise NotImplementedError() def update_from_file(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, file_name=None, create=False): """ Inform the store that the file associated with the object has been updated. If `file_name` is provided, update from that file instead of the default. If the object does not exist raises `ObjectNotFound`. See `exists` method for the description of other fields. :type file_name: string :param file_name: Use file pointed to by `file_name` as the source for updating the dataset identified by `obj` :type create: bool :param create: If True and the default dataset does not exist, create it first. """ raise NotImplementedError() def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False): """ If the store supports direct URL access, return a URL. Otherwise return None. Note: need to be careful to to bypass dataset security with this. See `exists` method for the description of the fields. """ raise NotImplementedError() def get_store_usage_percent(self): """ Return the percentage indicating how full the store is """ raise NotImplementedError() class DiskObjectStore(ObjectStore): """ Standard Galaxy object store, stores objects in files under a specific directory on disk. >>> from galaxy.util.bunch import Bunch >>> import tempfile >>> file_path=tempfile.mkdtemp() >>> obj = Bunch(id=1) >>> s = DiskObjectStore(Bunch(umask=077, job_working_directory=file_path, new_file_path=file_path, object_store_check_old_style=False), file_path=file_path) >>> s.create(obj) >>> s.exists(obj) True >>> assert s.get_filename(obj) == file_path + '/000/dataset_1.dat' """ def __init__(self, config, config_xml=None, file_path=None, extra_dirs=None): super(DiskObjectStore, self).__init__(config, config_xml=None, file_path=file_path, extra_dirs=extra_dirs) self.file_path = file_path or config.file_path self.config = config self.check_old_style = config.object_store_check_old_style self.extra_dirs['job_work'] = config.job_working_directory self.extra_dirs['temp'] = config.new_file_path # The new config_xml overrides universe settings. if config_xml is not None: for e in config_xml: if e.tag == 'files_dir': self.file_path = e.get('path') else: self.extra_dirs[e.get('type')] = e.get('path') if extra_dirs is not None: self.extra_dirs.update( extra_dirs ) def _get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False): """Class method that returns the absolute path for the file corresponding to the `obj`.id regardless of whether the file exists. """ path = self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name, obj_dir=False, old_style=True) # For backward compatibility, check the old style root path first; otherwise, # construct hashed path if not os.path.exists(path): return self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name) # TODO: rename to _disk_path or something like that to avoid conflicts with children that'll use the local_extra_dirs decorator, e.g. S3 def _construct_path(self, obj, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, **kwargs): """ Construct the expected absolute path for accessing the object identified by `obj`.id. :type base_dir: string :param base_dir: A key in self.extra_dirs corresponding to the base directory in which this object should be created, or None to specify the default directory. :type dir_only: bool :param dir_only: If True, check only the path where the file identified by `obj` should be located, not the dataset itself. This option applies to `extra_dir` argument as well. :type extra_dir: string :param extra_dir: Append the value of this parameter to the expected path used to access the object identified by `obj` (e.g., /files/000/<extra_dir>/dataset_10.dat). :type alt_name: string :param alt_name: Use this name as the alternative name for the returned dataset rather than the default. :type old_style: bool param old_style: This option is used for backward compatibility. If True the composed directory structure does not include a hash id (e.g., /files/dataset_10.dat (old) vs. /files/000/dataset_10.dat (new)) """ base = self.extra_dirs.get(base_dir, self.file_path) if old_style: if extra_dir is not None: path = os.path.join(base, extra_dir) else: path = base else: # Construct hashed path rel_path = os.path.join(*directory_hash_id(obj.id)) # Create a subdirectory for the object ID if obj_dir: rel_path = os.path.join(rel_path, str(obj.id)) # Optionally append extra_dir if extra_dir is not None: if extra_dir_at_root: rel_path = os.path.join(extra_dir, rel_path) else: rel_path = os.path.join(rel_path, extra_dir) path = os.path.join(base, rel_path) if not dir_only: path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % obj.id) return os.path.abspath(path) def exists(self, obj, **kwargs): if self.check_old_style: path = self._construct_path(obj, old_style=True, **kwargs) # For backward compatibility, check root path first; otherwise, construct # and check hashed path if os.path.exists(path): return True return os.path.exists(self._construct_path(obj, **kwargs)) def create(self, obj, **kwargs): if not self.exists(obj, **kwargs): path = self._construct_path(obj, **kwargs) dir_only = kwargs.get('dir_only', False) # Create directory if it does not exist dir = path if dir_only else os.path.dirname(path) if not os.path.exists(dir): os.makedirs(dir) # Create the file if it does not exist if not dir_only: open(path, 'w').close() # Should be rb? umask_fix_perms(path, self.config.umask, 0666) def empty(self, obj, **kwargs): return os.path.getsize(self.get_filename(obj, **kwargs)) == 0 def size(self, obj, **kwargs): if self.exists(obj, **kwargs): try: return os.path.getsize(self.get_filename(obj, **kwargs)) except OSError: return 0 else: return 0 def delete(self, obj, entire_dir=False, **kwargs): path = self.get_filename(obj, **kwargs) extra_dir = kwargs.get('extra_dir', None) obj_dir = kwargs.get('obj_dir', False) try: if entire_dir and (extra_dir or obj_dir): shutil.rmtree(path) return True if self.exists(obj, **kwargs): os.remove(path) return True except OSError, ex: log.critical('%s delete error %s' % (self._get_filename(obj, **kwargs), ex)) return False def get_data(self, obj, start=0, count=-1, **kwargs): data_file = open(self.get_filename(obj, **kwargs), 'r') # Should be rb? data_file.seek(start) content = data_file.read(count) data_file.close() return content def get_filename(self, obj, **kwargs): if self.check_old_style: path = self._construct_path(obj, old_style=True, **kwargs) # For backward compatibility, check root path first; otherwise, construct # and return hashed path if os.path.exists(path): return path return self._construct_path(obj, **kwargs) def update_from_file(self, obj, file_name=None, create=False, **kwargs): """ `create` parameter is not used in this implementation """ preserve_symlinks = kwargs.pop( 'preserve_symlinks', False ) # FIXME: symlinks and the object store model may not play well together # these should be handled better, e.g. registering the symlink'd file as an object if create: self.create(obj, **kwargs) if file_name and self.exists(obj, **kwargs): try: if preserve_symlinks and os.path.islink( file_name ): force_symlink( os.readlink( file_name ), self.get_filename( obj, **kwargs ) ) else: shutil.copy( file_name, self.get_filename( obj, **kwargs ) ) except IOError, ex: log.critical('Error copying %s to %s: %s' % (file_name, self._get_filename(obj, **kwargs), ex)) raise ex def get_object_url(self, obj, **kwargs): return None def get_store_usage_percent(self): st = os.statvfs(self.file_path) return ( float( st.f_blocks - st.f_bavail ) / st.f_blocks ) * 100 class CachingObjectStore(ObjectStore): """ Object store that uses a directory for caching files, but defers and writes back to another object store. """ def __init__(self, path, backend): super(CachingObjectStore, self).__init__(self, path, backend) class NestedObjectStore(ObjectStore): """ Base for ObjectStores that use other ObjectStores (DistributedObjectStore, HierarchicalObjectStore) """ def __init__(self, config, config_xml=None): super(NestedObjectStore, self).__init__(config, config_xml=config_xml) self.backends = {} def shutdown(self): for store in self.backends.values(): store.shutdown() super(NestedObjectStore, self).shutdown() def exists(self, obj, **kwargs): return self._call_method('exists', obj, False, False, **kwargs) def file_ready(self, obj, **kwargs): return self._call_method('file_ready', obj, False, False, **kwargs) def create(self, obj, **kwargs): random.choice(self.backends.values()).create(obj, **kwargs) def empty(self, obj, **kwargs): return self._call_method('empty', obj, True, False, **kwargs) def size(self, obj, **kwargs): return self._call_method('size', obj, 0, False, **kwargs) def delete(self, obj, **kwargs): return self._call_method('delete', obj, False, False, **kwargs) def get_data(self, obj, **kwargs): return self._call_method('get_data', obj, ObjectNotFound, True, **kwargs) def get_filename(self, obj, **kwargs): return self._call_method('get_filename', obj, ObjectNotFound, True, **kwargs) def update_from_file(self, obj, **kwargs): if kwargs.get('create', False): self.create(obj, **kwargs) kwargs['create'] = False return self._call_method('update_from_file', obj, ObjectNotFound, True, **kwargs) def get_object_url(self, obj, **kwargs): return self._call_method('get_object_url', obj, None, False, **kwargs) def _call_method(self, method, obj, default, default_is_exception, **kwargs): """ Check all children object stores for the first one with the dataset """ for key, store in self.backends.items(): if store.exists(obj, **kwargs): return store.__getattribute__(method)(obj, **kwargs) if default_is_exception: raise default( 'objectstore, _call_method failed: %s on %s, kwargs: %s' % ( method, str( obj ), str( kwargs ) ) ) else: return default class DistributedObjectStore(NestedObjectStore): """ ObjectStore that defers to a list of backends, for getting objects the first store where the object exists is used, objects are created in a store selected randomly, but with weighting. """ def __init__(self, config, config_xml=None, fsmon=False): super(DistributedObjectStore, self).__init__(config, config_xml=config_xml) if config_xml is None: self.distributed_config = config.distributed_object_store_config_file assert self.distributed_config is not None, "distributed object store ('object_store = distributed') " \ "requires a config file, please set one in " \ "'distributed_object_store_config_file')" self.backends = {} self.weighted_backend_ids = [] self.original_weighted_backend_ids = [] self.max_percent_full = {} self.global_max_percent_full = 0.0 random.seed() self.__parse_distributed_config(config, config_xml) self.sleeper = None if fsmon and ( self.global_max_percent_full or filter( lambda x: x != 0.0, self.max_percent_full.values() ) ): self.sleeper = Sleeper() self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor) self.filesystem_monitor_thread.setDaemon( True ) self.filesystem_monitor_thread.start() log.info("Filesystem space monitor started") def __parse_distributed_config(self, config, config_xml=None): if config_xml is None: root = ElementTree.parse(self.distributed_config).getroot() log.debug('Loading backends for distributed object store from %s' % self.distributed_config) else: root = config_xml.find('backends') log.debug('Loading backends for distributed object store from %s' % config_xml.get('id')) self.global_max_percent_full = float(root.get('maxpctfull', 0)) for elem in [ e for e in root if e.tag == 'backend' ]: id = elem.get('id') weight = int(elem.get('weight', 1)) maxpctfull = float(elem.get('maxpctfull', 0)) if elem.get('type', 'disk'): path = None extra_dirs = {} for sub in elem: if sub.tag == 'files_dir': path = sub.get('path') elif sub.tag == 'extra_dir': type = sub.get('type') extra_dirs[type] = sub.get('path') self.backends[id] = DiskObjectStore(config, file_path=path, extra_dirs=extra_dirs) self.max_percent_full[id] = maxpctfull log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (id, weight, path)) if extra_dirs: log.debug(" Extra directories:") for type, dir in extra_dirs.items(): log.debug(" %s: %s" % (type, dir)) for i in range(0, weight): # The simplest way to do weighting: add backend ids to a # sequence the number of times equalling weight, then randomly # choose a backend from that sequence at creation self.weighted_backend_ids.append(id) self.original_weighted_backend_ids = self.weighted_backend_ids def shutdown(self): super(DistributedObjectStore, self).shutdown() if self.sleeper is not None: self.sleeper.wake() def __filesystem_monitor(self): while self.running: new_weighted_backend_ids = self.original_weighted_backend_ids for id, backend in self.backends.items(): maxpct = self.max_percent_full[id] or self.global_max_percent_full pct = backend.get_store_usage_percent() if pct > maxpct: new_weighted_backend_ids = filter(lambda x: x != id, new_weighted_backend_ids) self.weighted_backend_ids = new_weighted_backend_ids self.sleeper.sleep(120) # Test free space every 2 minutes def create(self, obj, **kwargs): """ create() is the only method in which obj.object_store_id may be None """ if obj.object_store_id is None or not self.exists(obj, **kwargs): if obj.object_store_id is None or obj.object_store_id not in self.weighted_backend_ids: try: obj.object_store_id = random.choice(self.weighted_backend_ids) except IndexError: raise ObjectInvalid( 'objectstore.create, could not generate obj.object_store_id: %s, kwargs: %s' % ( str( obj ), str( kwargs ) ) ) create_object_in_session( obj ) log.debug("Selected backend '%s' for creation of %s %s" % (obj.object_store_id, obj.__class__.__name__, obj.id)) else: log.debug("Using preferred backend '%s' for creation of %s %s" % (obj.object_store_id, obj.__class__.__name__, obj.id)) self.backends[obj.object_store_id].create(obj, **kwargs) def _call_method(self, method, obj, default, default_is_exception, **kwargs): object_store_id = self.__get_store_id_for(obj, **kwargs) if object_store_id is not None: return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs) if default_is_exception: raise default( 'objectstore, _call_method failed: %s on %s, kwargs: %s' % ( method, str( obj ), str( kwargs ) ) ) else: return default def __get_store_id_for(self, obj, **kwargs): if obj.object_store_id is not None and obj.object_store_id in self.backends: return obj.object_store_id else: # if this instance has been switched from a non-distributed to a # distributed object store, or if the object's store id is invalid, # try to locate the object log.warning('The backend object store ID (%s) for %s object with ID %s is invalid' % (obj.object_store_id, obj.__class__.__name__, obj.id)) for id, store in self.backends.items(): if store.exists(obj, **kwargs): log.warning('%s object with ID %s found in backend object store with ID %s' % (obj.__class__.__name__, obj.id, id)) obj.object_store_id = id create_object_in_session( obj ) return id return None class HierarchicalObjectStore(NestedObjectStore): """ ObjectStore that defers to a list of backends, for getting objects the first store where the object exists is used, objects are always created in the first store. """ def __init__(self, config, config_xml=None, fsmon=False): super(HierarchicalObjectStore, self).__init__(config, config_xml=config_xml) self.backends = odict() for b in sorted(config_xml.find('backends'), key=lambda b: int(b.get('order'))): self.backends[int(b.get('order'))] = build_object_store_from_config(config, fsmon=fsmon, config_xml=b) def exists(self, obj, **kwargs): """ Exists must check all child object stores """ for store in self.backends.values(): if store.exists(obj, **kwargs): return True return False def create(self, obj, **kwargs): """ Create will always be called by the primary object_store """ self.backends[0].create(obj, **kwargs) def build_object_store_from_config(config, fsmon=False, config_xml=None): """ Depending on the configuration setting, invoke the appropriate object store """ if config_xml is None and os.path.exists( config.object_store_config_file ): # This is a top level invocation of build_object_store_from_config, and # we have an object_store_conf.xml -- read the .xml and build # accordingly root = ElementTree.parse(config.object_store_config_file).getroot() store = root.get('type') config_xml = root elif config_xml is not None: store = config_xml.get('type') else: store = config.object_store if store == 'disk': return DiskObjectStore(config=config, config_xml=config_xml) elif store == 's3': from .s3 import S3ObjectStore return S3ObjectStore(config=config, config_xml=config_xml) elif store == 'swift': from .s3 import SwiftObjectStore return SwiftObjectStore(config=config, config_xml=config_xml) elif store == 'distributed': return DistributedObjectStore(config=config, fsmon=fsmon, config_xml=config_xml) elif store == 'hierarchical': return HierarchicalObjectStore(config=config, config_xml=config_xml) elif store == 'irods': from .rods import IRODSObjectStore return IRODSObjectStore(config=config, config_xml=config_xml) elif store == 'pulsar': from .pulsar import PulsarObjectStore return PulsarObjectStore(config=config, config_xml=config_xml) else: log.error("Unrecognized object store definition: {0}".format(store)) def local_extra_dirs( func ): """ A decorator for non-local plugins to utilize local directories for their extra_dirs (job_working_directory and temp). """ def wraps( self, *args, **kwargs ): if kwargs.get( 'base_dir', None ) is None: return func( self, *args, **kwargs ) else: for c in self.__class__.__mro__: if c.__name__ == 'DiskObjectStore': return getattr( c, func.__name__ )( self, *args, **kwargs ) raise Exception( "Could not call DiskObjectStore's %s method, does your Object Store plugin inherit from DiskObjectStore?" % func.__name__ ) return wraps def convert_bytes(bytes): """ A helper function used for pretty printing disk usage """ if bytes is None: bytes = 0 bytes = float(bytes) if bytes >= 1099511627776: terabytes = bytes / 1099511627776 size = '%.2fTB' % terabytes elif bytes >= 1073741824: gigabytes = bytes / 1073741824 size = '%.2fGB' % gigabytes elif bytes >= 1048576: megabytes = bytes / 1048576 size = '%.2fMB' % megabytes elif bytes >= 1024: kilobytes = bytes / 1024 size = '%.2fKB' % kilobytes else: size = '%.2fb' % bytes return size def create_object_in_session( obj ): session = object_session( obj ) if object_session is not None else None if session is not None: object_session( obj ).add( obj ) object_session( obj ).flush() else: raise Exception( NO_SESSION_ERROR_MESSAGE )
YT_Download.py
import os import re import eel import sys import time import ffmpeg import requests import threading import urllib.parse from pytube import YouTube from modules import meta_song from modules import lyrics class Downloader(): """docstring for Youtubehq""" def __init__(self, urls, mode): fix_urls = [] for url in urls: if url != '': qs = urllib.parse.urlparse(url).query videoID = urllib.parse.parse_qs(qs)['v'][0] url = f'https://www.youtube.com/watch?v={videoID}' fix_urls += [url] print(f'直したurls:{fix_urls}') self.urls = fix_urls self.count = len(urls) self.percent = {} self.songDatas = {} self.lock = threading.Lock() self.mode = mode if mode == '1': self.number = 1 else: self.number = 2 self.desktop_path = os.path.normpath(os.path.expanduser("~/Desktop")) def makelist(self, alltag): tags = {} tags_list = [] audios = [] videos = [] for tag in alltag: tags['itag'] = tag.itag tags['mime_type'] = tag.mime_type tags['res'] = tag.resolution tags['abr'] = tag.abr tags['fps'] = tag.fps tags['vcodec'] = tag.video_codec tags['acodec'] = tag.audio_codec tags['file_type'] = tag.type tags_list.append(tags.copy()) for tags in tags_list: if tags['file_type'] == 'audio': audios.append(tags.copy()) if tags['file_type'] == 'video': videos.append(tags.copy()) return audios, videos def select_mobile_v(self, videos):#まずはmp4である事かつそのなかで一番大きい hq_mobile_v = 0 for video in videos: if video['mime_type'].split('/')[1] == 'mp4': if video['res']: if hq_mobile_v < int(video['res'][:-1]): hq_mobile_v = int(video['res'][:-1]) video_mobile = video print(video_mobile) return video_mobile def select_hq_v(self, videos): hq_v60 = 0 hq_v = 0 video_hq = {} video_hq60 = None for video in videos: #resでかぶるからそれより前で処理する。 if video['res'] is not None: if int(video['fps']) == 60: #resが2160以上が出てこないと変な感じになる。30fpsと重なってif 60 == 60:とかがいいかも if hq_v60 < int(video['res'][:-1]): hq_v60 = int(video['res'][:-1]) video_hq60 = video elif hq_v < int(video['res'][:-1]): hq_v = int(video['res'][:-1]) video_hq = video if video_hq60: return video_hq60 else: return video_hq def select_hq_a(self, audios): hq_a = 0 audio_hq = {} for audio in audios: if audio['abr'] is not None: if hq_a < int(audio['abr'][:-4]): hq_a = int(audio['abr'][:-4]) audio_hq = audio return audio_hq def show_progress_bar(self, stream, chunk, bytes_remaining): current = ((stream.filesize - bytes_remaining)/stream.filesize) with self.lock: self.percent[threading.currentThread().getName()] = current*100 def remove_symbol(self, title): # titleに入る余分な記号を取り除く code_regex = re.compile('[!"#$%&\'\\\\()*+,-./:;<=>?@[\\]^_`{|}~「」〔〕“”〈〉『』【】&*・()$#@。、?!`+¥%]') clean_title = code_regex.sub('', title) title = re.sub(r'\s+', "_", clean_title) return title def download_audio(self, yt, audio_hq, title): path = self.desktop_path itag_a = audio_hq['itag'] if not os.path.exists(path + '/audio/webm'): os.makedirs(path + '/audio/webm') # ここで音声をダウンロード yt.streams.get_by_itag(itag_a).download(path + '/audio/webm', filename=title) def download_video(self, yt, video_hq, title): path = self.desktop_path itag_v = video_hq['itag'] if not os.path.exists(path + '/video/original'): os.makedirs(path + '/video/original') # ここで動画をダウンロード yt.streams.get_by_itag(itag_v).download(path + '/video/original', filename=title) def opus_to_aac(self, path, title, audiopath): # opus to aac if not os.path.exists(path + '/audio/aac'): os.mkdir(path + '/audio/aac') title_aac = path + '/audio/aac/' + title + '.' + 'm4a' instream_a = ffmpeg.input(audiopath) stream = ffmpeg.output(instream_a, title_aac, audio_bitrate=160000, acodec="aac") ffmpeg.run(stream, overwrite_output=True) return title_aac def join_audio_video(self, video_hq, path, title, videopath, audiopath): # titleの最後が.の場合は.を付けない。もし最後がなら...の可能性もあるから正規表現で取り出した方がいいよ。 # join h264 and aac if video_hq['acodec'] is None: #これのおかげで音付きの動画の場合はエンコードかからない。 if not os.path.exists(path + '/video/joined'): os.mkdir(path + '/video/joined') title_join = path + '/video/joined/' + title + '.' + 'mp4' instream_v = ffmpeg.input(videopath) instream_a = ffmpeg.input(audiopath) stream = ffmpeg.output(instream_v, instream_a, title_join, vcodec='copy', acodec='copy') #vcodec='h264'にすればエンコードしてくれる。 ffmpeg.run(stream, overwrite_output=True) def download(self, url): path = self.desktop_path for i in range(10): try: yt = YouTube(url) except RegexMatchError as e: print('動画情報を取得できなかった。') else: print('上手く行きました。') break yt.register_on_progress_callback(self.show_progress_bar) alltag = yt.streams title = yt.title print(title) try: artist = yt.metadata[0]['Artist'] song = yt.metadata[0]['Song'] # artist・song名に空白以外の区切りがある場合は取り除く。 artist = artist.replace(',', '') song = song.replace(',', '') print(f'情報:{artist}, {song}') songData = meta_song.addMusicData(song, artist) with self.lock: """ 中身は 'URL': {{'JPN': {'name':..., 'artist':..., }, 'USA': {'name':..., 'artist':..., }}} """ self.songDatas[url] = songData.make_songData() #なければ{} self.songDatas[url]['title'] = title artwork_url = '' file_name = '' file_path = '' LyricInstance = '' #歌詞を追加する。 if 'artist' in self.songDatas[url]['JPN']: artist_jp = self.songDatas[url]['JPN']['artist'] songName_jp = self.songDatas[url]['JPN']['name'] LyricInstance = lyrics.Lyric(artist_jp, songName_jp) elif 'artist' in self.songDatas[url]['USA']: artist_en = self.songDatas[url]['USA']['artist'] songName_en = self.songDatas[url]['USA']['name'] LyricInstance = lyrics.Lyric(artist_en, songName_en) if LyricInstance: if LyricInstance.lyric: self.songDatas[url]['lyric'] = LyricInstance.lyric else: self.songDatas[url]['lyric'] = '' #アートワークDLしてDL先のパスを記録する。 if 'artworkUrl' in self.songDatas[url]['JPN']: artwork_url = self.songDatas[url]['JPN']['artworkUrl'] name = self.songDatas[url]['JPN']['name'] artist = self.songDatas[url]['JPN']['artist'] file_name = f'{name}_{artist}' if 'artworkUrl' in self.songDatas[url]['USA']: artwork_url = self.songDatas[url]['USA']['artworkUrl'] name = self.songDatas[url]['USA']['name'] artist = self.songDatas[url]['USA']['artist'] file_name = f'{name}_{artist}' print(artwork_url) print(file_name) if artwork_url: artwork_path = f'web/images/artwork/{file_name}.jpg' res = requests.get(artwork_url, stream=True) if res.status_code == 200: with open(artwork_path, 'wb') as f: f.write(res.content) self.songDatas[url]['artwork_path'] = artwork_path.replace('web/', '') except KeyError as e: print(e) title = self.remove_symbol(title) audios, videos = self.makelist(alltag) audio_hq = self.select_hq_a(audios) ta = threading.Thread(target=self.download_audio, args=[yt, audio_hq, title,]) ta.start() if self.mode == '2': video_hq = self.select_mobile_v(videos) print(f'ここあるよね:{video_hq}') tv = threading.Thread(target=self.download_video, args=[yt, video_hq, title,]) tv.start() tv.join() if self.mode == '3': video_hq = self.select_hq_v(videos) tv = threading.Thread(target=self.download_video, args=[yt, video_hq, title,]) tv.start() tv.join() ta.join() audiopath = path + '/audio/webm/' + title + '.' + audio_hq['mime_type'].split('/')[1] # aacの音声ファイルのパスをもらう。 audiopath = self.opus_to_aac(path, title, audiopath) if not self.mode == '1': videopath = path + '/video/original/' + title + '.' + video_hq['mime_type'].split('/')[1] self.join_audio_video(video_hq, path, title, videopath, audiopath) with self.lock: self.songDatas[url]['audiopath'] = audiopath def get_progress(self, threads): while any(t.is_alive() for t in threads): with self.lock: percent = round(sum(self.percent.values()) / (self.count * self.number)) # sys.stdout.write(' {percent}%\r'.format(percent=percent)) eel.putProgress(percent) time.sleep(1.0) # print(f' {percent}%', flush=True) eel.putProgress(100) def multi_download(self): global start start = time.time() downloads = [] join_audio_videos = [] for url in self.urls: t = threading.Thread(target=self.download, args=[url,]) t.start() downloads.append(t) monitor_progress = threading.Thread(target=self.get_progress, args=(downloads,)) monitor_progress.start() for t in downloads: t.join() monitor_progress.join() # print(self.titles) # # ここに結合の処理を追加する。 # for title in self.titles: # join_audio_video = threading.Thread(target=self.join_audio_video, args=(title,)) # join_audio_video.start() # join_audio_videos.append(join_audio_video) # for join_audio_video in join_audio_videos: # join_audio_video.join() print('done') eel.addSongData(self.urls, self.songDatas) eel.doneProgress() time_of_script = time.time() - start print('実行時間:{}'.format(time_of_script)) # Downloader = Downloader(['https://www.youtube.com/watch?v=cw4-bqSpVdo','https://www.youtube.com/watch?v=CGXhyRiXR2M']) # Downloader.multi_download() # Downloader.async_dl(['https://www.youtube.com/watch?v=cw4-bqSpVdo','https://www.youtube.com/watch?v=CGXhyRiXR2M']) # eel.start('index.html', close_callback=print(Downloader.title)) # https://www.youtube.com/watch?v=vUQfJIsTbJI スカー # https://www.youtube.com/watch?v=rjyi3K8LeQ0 2秒2K # https://www.youtube.com/watch?v=zCLOJ9j1k2Y&t=2s 4K動画 # cipherの値が変わった問題 https://github.com/nficano/pytube/issues/642 済み # 辞書型の解決策がここにあったhttps://gist.github.com/dogrunjp/9748789 済み # タイトルがYouTubeになる https://github.com/nficano/pytube/issues/632 済み # パスを指定するならdownload(./video)のように指定する。 済み # タイトル最後に.が入る時エラーが発生する。もしドットが後半に続くなら 済み # 元々音声ファイルが合成されていると音が二重になる。 viedoのacodecがNoneじゃなければ合成しない。だけど音声ファイルはaacへ 済み # webm動画・音声, MP4それぞれフォルダを分けたい。あと合成したあとのファイルも #音声ファイルをaacに変換してから合成したい 済み # 60fpsの場合はどうする。 済み # 最後の文字がドットじゃない場合はドットを足す。 済み # copyだけだとvp9がmp4に入っててipadでは再生出来んな http://tech.ckme.co.jp/ffmpeg_vcodec.shtml 済みだけど、エンコードはかなりの時間かかる。実用的でない。 # 音質は160kpなのか? 済み # 他の人のパソコンでも動作するように相対的なパスを指定 # 上書きの際y/nと聞かれてプログラムストップ 済み # 画質は8Kだけど2160pと表示されるあと最高画質はitag272で決まっているおそらく、音声は251がおそらく最高 https://github.com/nficano/pytube/issues/304 どっかでダブってる # もし動画で使用したいならMP4で画質が高いのをダウンロードするようにする。済み # lany https://www.youtube.com/watch?v=iPMp-TP3ODg # lany cover https://www.youtube.com/watch?v=K3xbUe_spgA
cell.py
import os import ssl import time import shutil import socket import asyncio import logging import argparse import datetime import functools import contextlib import multiprocessing import tornado.web as t_web import synapse.exc as s_exc import synapse.common as s_common import synapse.daemon as s_daemon import synapse.telepath as s_telepath import synapse.lib.base as s_base import synapse.lib.boss as s_boss import synapse.lib.coro as s_coro import synapse.lib.hive as s_hive import synapse.lib.const as s_const import synapse.lib.nexus as s_nexus import synapse.lib.config as s_config import synapse.lib.health as s_health import synapse.lib.output as s_output import synapse.lib.certdir as s_certdir import synapse.lib.dyndeps as s_dyndeps import synapse.lib.httpapi as s_httpapi import synapse.lib.version as s_version import synapse.lib.hiveauth as s_hiveauth import synapse.lib.lmdbslab as s_lmdbslab import synapse.tools.backup as s_t_backup logger = logging.getLogger(__name__) SLAB_MAP_SIZE = 128 * s_const.mebibyte ''' Base classes for the synapse "cell" microservice architecture. ''' def adminapi(log=False): ''' Decorator for CellApi (and subclasses) for requiring a method to be called only by an admin user. Args: log (bool): If set to True, log the user, function and arguments. ''' def decrfunc(func): @functools.wraps(func) def wrapped(*args, **kwargs): if args[0].user is not None and not args[0].user.isAdmin(): raise s_exc.AuthDeny(mesg='User is not an admin.', user=args[0].user.name) if log: logger.info('Executing [%s] as [%s] with args [%s][%s]', func.__qualname__, args[0].user.name, args[1:], kwargs) return func(*args, **kwargs) wrapped.__syn_wrapped__ = 'adminapi' return wrapped return decrfunc class CellApi(s_base.Base): async def __anit__(self, cell, link, user): await s_base.Base.__anit__(self) self.cell = cell self.link = link assert user self.user = user self.sess = self.link.get('sess') # type: s_daemon.Sess self.sess.user = user await self.initCellApi() async def initCellApi(self): pass async def allowed(self, perm, default=None): ''' Check if the user has the requested permission. Args: perm: permission path components to check default: Value returned if no value stored Examples: Form a path and check the permission from a remote proxy:: perm = ('node', 'add', 'inet:ipv4') allowed = await prox.allowed(perm) if allowed: dostuff() Returns: Optional[bool]: True if the user has permission, False if explicitly denied, None if no entry ''' return self.user.allowed(perm, default=default) async def _reqUserAllowed(self, perm): ''' Helper method that subclasses can use for user permission checking. Args: perm: permission path components to check Notes: This can be used to require a permission; and will throw an exception if the permission is not allowed. Examples: Implement an API that requires a user to have a specific permission in order to execute it:: async def makeWidget(self, wvalu, wtype): # This will throw if the user doesn't have the appropriate widget permission await self._reqUserAllowed(('widget', wtype)) return await self.cell.makeWidget((wvalu, wtype)) Returns: None: This API does not return anything. It only throws an exception on failure. Raises: s_exc.AuthDeny: If the permission is not allowed. ''' if not await self.allowed(perm): perm = '.'.join(perm) mesg = f'User must have permission {perm}' raise s_exc.AuthDeny(mesg=mesg, perm=perm, user=self.user.name) def getCellType(self): return self.cell.getCellType() def getCellIden(self): return self.cell.getCellIden() async def isCellActive(self): ''' Returns True if the cell is an active/leader cell. ''' return await self.cell.isCellActive() @adminapi() def getNexsIndx(self): return self.cell.getNexsIndx() @adminapi() async def promote(self): return await self.cell.promote() def getCellUser(self): return self.user.pack() def setCellUser(self, iden): ''' Switch to another user (admin only). This API allows remote admin/service accounts to impersonate a user. Used mostly by services that manage their own authentication/sessions. ''' if not self.user.isAdmin(): mesg = 'setCellUser() caller must be admin.' raise s_exc.AuthDeny(mesg=mesg) user = self.cell.auth.user(iden) if user is None: raise s_exc.NoSuchUser(iden=iden) self.user = user self.link.get('sess').user = user return True async def ps(self): return await self.cell.ps(self.user) async def kill(self, iden): return await self.cell.kill(self.user, iden) @adminapi(log=True) async def addUser(self, name, passwd=None, email=None, iden=None): return await self.cell.addUser(name, passwd=passwd, email=email, iden=iden) @adminapi(log=True) async def delUser(self, iden): return await self.cell.delUser(iden) @adminapi(log=True) async def addRole(self, name): return await self.cell.addRole(name) @adminapi(log=True) async def delRole(self, iden): return await self.cell.delRole(iden) @adminapi() async def dyncall(self, iden, todo, gatekeys=()): return await self.cell.dyncall(iden, todo, gatekeys=gatekeys) @adminapi() async def dyniter(self, iden, todo, gatekeys=()): async for item in self.cell.dyniter(iden, todo, gatekeys=gatekeys): yield item @adminapi() async def issue(self, nexsiden: str, event: str, args, kwargs, meta=None): ''' Note: this swallows exceptions and return values. It is expected that the nexus _followerLoop would be the return path ''' try: await self.cell.nexsroot.issue(nexsiden, event, args, kwargs, meta) except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only raise except Exception: pass @adminapi(log=True) async def delAuthUser(self, name): await self.cell.auth.delUser(name) await self.cell.fire('user:mod', act='deluser', name=name) @adminapi(log=True) async def addAuthRole(self, name): role = await self.cell.auth.addRole(name) await self.cell.fire('user:mod', act='addrole', name=name) return role.pack() @adminapi(log=True) async def delAuthRole(self, name): await self.cell.auth.delRole(name) await self.cell.fire('user:mod', act='delrole', name=name) @adminapi() async def getAuthUsers(self, archived=False): ''' Args: archived (bool): If true, list all users, else list non-archived users ''' return await self.cell.getAuthUsers(archived=archived) @adminapi() async def getAuthRoles(self): return await self.cell.getAuthRoles() @adminapi(log=True) async def addUserRule(self, iden, rule, indx=None, gateiden=None): return await self.cell.addUserRule(iden, rule, indx=indx, gateiden=gateiden) @adminapi(log=True) async def setUserRules(self, iden, rules, gateiden=None): return await self.cell.setUserRules(iden, rules, gateiden=gateiden) @adminapi(log=True) async def setRoleRules(self, iden, rules, gateiden=None): return await self.cell.setRoleRules(iden, rules, gateiden=gateiden) @adminapi(log=True) async def addRoleRule(self, iden, rule, indx=None, gateiden=None): return await self.cell.addRoleRule(iden, rule, indx=indx, gateiden=gateiden) @adminapi(log=True) async def delUserRule(self, iden, rule, gateiden=None): return await self.cell.delUserRule(iden, rule, gateiden=gateiden) @adminapi(log=True) async def delRoleRule(self, iden, rule, gateiden=None): return await self.cell.delRoleRule(iden, rule, gateiden=gateiden) @adminapi(log=True) async def setUserAdmin(self, iden, admin, gateiden=None): return await self.cell.setUserAdmin(iden, admin, gateiden=gateiden) @adminapi() async def getAuthInfo(self, name): s_common.deprecated('getAuthInfo') user = await self.cell.auth.getUserByName(name) if user is not None: info = user.pack() info['roles'] = [self.cell.auth.role(r).name for r in info['roles']] return info role = await self.cell.auth.getRoleByName(name) if role is not None: return role.pack() raise s_exc.NoSuchName(name=name) @adminapi(log=True) async def addAuthRule(self, name, rule, indx=None, gateiden=None): s_common.deprecated('addAuthRule') item = await self.cell.auth.getUserByName(name) if item is None: item = await self.cell.auth.getRoleByName(name) await item.addRule(rule, indx=indx, gateiden=gateiden) @adminapi(log=True) async def delAuthRule(self, name, rule, gateiden=None): s_common.deprecated('delAuthRule') item = await self.cell.auth.getUserByName(name) if item is None: item = await self.cell.auth.getRoleByName(name) await item.delRule(rule, gateiden=gateiden) @adminapi(log=True) async def setAuthAdmin(self, name, isadmin): s_common.deprecated('setAuthAdmin') item = await self.cell.auth.getUserByName(name) if item is None: item = await self.cell.auth.getRoleByName(name) await item.setAdmin(isadmin) async def setUserPasswd(self, iden, passwd): await self.cell.auth.reqUser(iden) if self.user.iden == iden: return await self.cell.setUserPasswd(iden, passwd) self.user.confirm(('auth', 'user', 'set', 'passwd')) return await self.cell.setUserPasswd(iden, passwd) @adminapi(log=True) async def setUserLocked(self, useriden, locked): return await self.cell.setUserLocked(useriden, locked) @adminapi(log=True) async def setUserArchived(self, useriden, archived): return await self.cell.setUserArchived(useriden, archived) @adminapi(log=True) async def setUserEmail(self, useriden, email): return await self.cell.setUserEmail(useriden, email) @adminapi(log=True) async def addUserRole(self, useriden, roleiden): return await self.cell.addUserRole(useriden, roleiden) @adminapi(log=True) async def delUserRole(self, useriden, roleiden): return await self.cell.delUserRole(useriden, roleiden) async def getUserInfo(self, name): user = await self.cell.auth.reqUserByName(name) if self.user.isAdmin() or self.user.iden == user.iden: info = user.pack() info['roles'] = [self.cell.auth.role(r).name for r in info['roles']] return info mesg = 'getUserInfo denied for non-admin and non-self' raise s_exc.AuthDeny(mesg=mesg) async def getRoleInfo(self, name): role = await self.cell.auth.reqRoleByName(name) if self.user.isAdmin() or role.iden in self.user.info.get('roles', ()): return role.pack() mesg = 'getRoleInfo denied for non-admin and non-member' raise s_exc.AuthDeny(mesg=mesg) @adminapi() async def getUserDef(self, iden): return await self.cell.getUserDef(iden) @adminapi() async def getAuthGate(self, iden): return await self.cell.getAuthGate(iden) @adminapi() async def getAuthGates(self): return await self.cell.getAuthGates() @adminapi() async def getRoleDef(self, iden): return await self.cell.getRoleDef(iden) @adminapi() async def getUserDefByName(self, name): return await self.cell.getUserDefByName(name) @adminapi() async def getRoleDefByName(self, name): return await self.cell.getRoleDefByName(name) @adminapi() async def getUserDefs(self): return await self.cell.getUserDefs() @adminapi() async def getRoleDefs(self): return await self.cell.getRoleDefs() @adminapi() async def isUserAllowed(self, iden, perm, gateiden=None): return await self.cell.isUserAllowed(iden, perm, gateiden=gateiden) @adminapi() async def tryUserPasswd(self, name, passwd): return await self.cell.tryUserPasswd(name, passwd) @adminapi() async def getUserProfile(self, iden): return await self.cell.getUserProfile(iden) @adminapi() async def getUserProfInfo(self, iden, name): return await self.cell.getUserProfInfo(iden, name) @adminapi() async def setUserProfInfo(self, iden, name, valu): return await self.cell.setUserProfInfo(iden, name, valu) async def getHealthCheck(self): await self._reqUserAllowed(('health',)) return await self.cell.getHealthCheck() @adminapi() async def getDmonSessions(self): return await self.cell.getDmonSessions() @adminapi() async def listHiveKey(self, path=None): return await self.cell.listHiveKey(path=path) @adminapi() async def getHiveKeys(self, path): return await self.cell.getHiveKeys(path) @adminapi() async def getHiveKey(self, path): return await self.cell.getHiveKey(path) @adminapi(log=True) async def setHiveKey(self, path, valu): return await self.cell.setHiveKey(path, valu) @adminapi(log=True) async def popHiveKey(self, path): return await self.cell.popHiveKey(path) @adminapi(log=True) async def saveHiveTree(self, path=()): return await self.cell.saveHiveTree(path=path) @adminapi() async def getNexusChanges(self, offs): async for item in self.cell.getNexusChanges(offs): yield item @adminapi() async def runBackup(self, name=None, wait=True): ''' Run a new backup. Args: name (str): The optional name of the backup. wait (bool): On True, wait for backup to complete before returning. Returns: str: The name of the newly created backup. ''' return await self.cell.runBackup(name=name, wait=wait) @adminapi() async def getBackups(self): ''' Retrieve a list of backups. Returns: list[str]: A list of backup names. ''' return await self.cell.getBackups() @adminapi() async def delBackup(self, name): ''' Delete a backup by name. Args: name (str): The name of the backup to delete. ''' return await self.cell.delBackup(name) @adminapi() async def getDiagInfo(self): return { 'slabs': await s_lmdbslab.Slab.getSlabStats(), } class Cell(s_nexus.Pusher, s_telepath.Aware): ''' A Cell() implements a synapse micro-service. A Cell has 5 phases of startup: 1. Universal cell data structures 2. Service specific storage/data (pre-nexs) 3. Nexus subsystem initialization 4. Service specific startup (with nexus) 5. Networking and mirror services ''' cellapi = CellApi confdefs = {} # type: ignore # This should be a JSONSchema properties list for an object. confbase = { 'cell:guid': { 'description': 'An optional hard-coded GUID to store as the permanent GUID for the cell.', 'type': 'string', }, 'mirror': { 'description': 'A telepath URL for our upstream mirror (we must be a backup!).', 'type': 'string', }, 'auth:passwd': { 'description': 'Set to <passwd> (local only) to bootstrap the root user password.', 'type': 'string', }, 'auth:anon': { 'description': 'Allow anonymous telepath access by mapping to the given user name.', 'type': 'string', }, 'auth:ctor': { 'description': 'Allow the construction of the cell auth object to be hooked at runtime.', 'type': 'string', 'hideconf': True, }, 'auth:conf': { 'description': 'Extended configuration to be used by an alternate auth constructor.', 'type': 'object', 'hideconf': True, }, 'nexslog:en': { 'default': False, 'description': 'Record all changes to the cell. Required for mirroring (on both sides).', 'type': 'boolean', }, 'nexslog:async': { 'default': False, 'description': '(Experimental) Map the nexus log LMDB instance with map_async=True.', 'type': 'boolean', }, 'dmon:listen': { 'description': 'A config-driven way to specify the telepath bind URL.', 'type': ['string', 'null'], }, 'https:port': { 'description': 'A config-driven way to specify the HTTPS port.', 'type': ['integer', 'null'], }, 'backup:dir': { 'description': 'A directory outside the service directory where backups will be saved.', 'type': 'string', }, 'aha:name': { 'description': 'The name of the cell service in the aha service registry.', 'type': 'string', }, 'aha:leader': { 'description': 'The AHA service name to claim as the active instance of a storm service.', 'type': 'string', }, 'aha:network': { 'description': 'The AHA service network. This makes aha:name/aha:leader relative names.', 'type': 'string', }, 'aha:registry': { 'description': 'The telepath URL of the aha service registry.', 'type': ['string', 'array'], 'items': {'type': 'string'}, }, 'aha:admin': { 'description': 'An AHA client certificate CN to register as a local admin user.', 'type': 'string', }, } BACKUP_SPAWN_TIMEOUT = 4.0 BACKUP_ACQUIRE_TIMEOUT = 0.5 async def __anit__(self, dirn, conf=None, readonly=False): # phase 1 if conf is None: conf = {} s_telepath.Aware.__init__(self) self.dirn = s_common.gendir(dirn) self.auth = None self.sessions = {} self.isactive = False self.inaugural = False self.activecoros = {} self.conf = self._initCellConf(conf) # each cell has a guid path = s_common.genpath(self.dirn, 'cell.guid') # generate a guid file if needed if not os.path.isfile(path): self.inaugural = True guid = conf.get('cell:guid') if guid is None: guid = s_common.guid() with open(path, 'w') as fd: fd.write(guid) # read our guid file with open(path, 'r') as fd: self.iden = fd.read().strip() self.donexslog = self.conf.get('nexslog:en') backdirn = self.conf.get('backup:dir') if backdirn is not None: backdirn = s_common.genpath(backdirn) if backdirn.startswith(self.dirn): mesg = 'backup:dir must not be within the service directory' raise s_exc.BadConfValu(mesg=mesg) backdirn = s_common.gendir(backdirn) self.backdirn = backdirn self.backuprunning = False if self.conf.get('mirror') and not self.conf.get('nexslog:en'): mesg = 'Mirror mode requires nexslog:en=True' raise s_exc.BadConfValu(mesg=mesg) # construct our nexsroot instance ( but do not start it ) await s_nexus.Pusher.__anit__(self, self.iden) root = await self._ctorNexsRoot() # mutually assured destruction with our nexs root self.onfini(root.fini) root.onfini(self.fini) self.setNexsRoot(root) await self._initCellSlab(readonly=readonly) self.hive = await self._initCellHive() # self.cellinfo, a HiveDict for general purpose persistent storage node = await self.hive.open(('cellinfo',)) self.cellinfo = await node.dict() self.onfini(node) node = await self.hive.open(('cellvers',)) self.cellvers = await node.dict(nexs=True) if self.inaugural: await self.cellinfo.set('synapse:version', s_version.version) synvers = self.cellinfo.get('synapse:version') if synvers is None or synvers < s_version.version: await self.cellinfo.set('synapse:version', s_version.version) self.auth = await self._initCellAuth() auth_passwd = self.conf.get('auth:passwd') if auth_passwd is not None: user = await self.auth.getUserByName('root') if not await user.tryPasswd(auth_passwd): await user.setPasswd(auth_passwd, nexs=False) self.boss = await s_boss.Boss.anit() self.onfini(self.boss) self.dynitems = { 'auth': self.auth, 'cell': self } # a tuple of (vers, func) tuples # it is expected that this is set by # initServiceStorage self.cellupdaters = () # initialize web app and callback data structures self._health_funcs = [] self.addHealthFunc(self._cellHealth) # initialize network backend infrastructure await self._initCertDir() await self._initAhaRegistry() # initialize network daemons (but do not listen yet) # to allow registration of callbacks and shared objects # within phase 2/4. await self._initCellHttp() await self._initCellDmon() # phase 2 - service storage await self.initServiceStorage() # phase 3 - nexus subsystem await self.initNexusSubsystem() # phase 4 - service logic await self.initServiceRuntime() # phase 5 - service networking await self.initServiceNetwork() async def _execCellUpdates(self): # implement to apply updates to a fully initialized active cell # ( and do so using _bumpCellVers ) pass async def _bumpCellVers(self, name, updates): if self.inaugural: await self.cellvers.set(name, updates[-1][0]) return curv = self.cellvers.get(name, 0) for vers, callback in updates: if vers <= curv: continue await callback() await self.cellvers.set(name, vers) curv = vers async def _initAhaRegistry(self): self.ahainfo = None self.ahaclient = None ahaurl = self.conf.get('aha:registry') if ahaurl is not None: info = await s_telepath.addAhaUrl(ahaurl) self.ahaclient = info.get('client') if self.ahaclient is None: self.ahaclient = await s_telepath.Client.anit(info.get('url')) self.ahaclient._fini_atexit = True info['client'] = self.ahaclient async def finiaha(): await s_telepath.delAhaUrl(ahaurl) self.onfini(finiaha) ahaadmin = self.conf.get('aha:admin') if ahaadmin is not None: # add the user in a pre-nexus compatible way user = await self.auth.getUserByName(ahaadmin) if user is None: iden = s_common.guid(ahaadmin) await self.auth._addUser(iden, ahaadmin) user = await self.auth.getUserByName(ahaadmin) if not user.isAdmin(): await user.setAdmin(True, logged=False) if user.isLocked(): await user.setLocked(False, logged=False) async def initServiceStorage(self): pass async def initNexusSubsystem(self): mirror = self.conf.get('mirror') await self.nexsroot.startup(mirror, celliden=self.iden) await self.setCellActive(mirror is None) async def initServiceNetwork(self): # start a unix local socket daemon listener sockpath = os.path.join(self.dirn, 'sock') sockurl = f'unix://{sockpath}' try: await self.dmon.listen(sockurl) except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only raise except OSError as e: logger.error(f'Failed to listen on unix socket at: [{sockpath}][{e}]') logger.error('LOCAL UNIX SOCKET WILL BE UNAVAILABLE') except Exception: # pragma: no cover logging.exception('Unknown dmon listen error.') raise self.sockaddr = None turl = self.conf.get('dmon:listen') if turl is not None: self.sockaddr = await self.dmon.listen(turl) logger.info(f'dmon listening: {turl}') await self._initAhaService() port = self.conf.get('https:port') if port is not None: await self.addHttpsPort(port) logger.info(f'https listening: {port}') async def _initAhaService(self): if self.ahaclient is None: return turl = self.conf.get('dmon:listen') ahaname = self.conf.get('aha:name') if ahaname is None: return ahalead = self.conf.get('aha:leader') ahanetw = self.conf.get('aha:network') ahainfo = self.conf.get('aha:svcinfo') if ahainfo is None and turl is not None: urlinfo = s_telepath.chopurl(turl) urlinfo.pop('host', None) urlinfo['port'] = self.sockaddr[1] ahainfo = { 'urlinfo': urlinfo, } if ahainfo is None: return self.ahainfo = ahainfo async def onlink(proxy): await proxy.addAhaSvc(ahaname, self.ahainfo, network=ahanetw) if self.isactive and ahalead is not None: await proxy.addAhaSvc(ahalead, self.ahainfo, network=ahanetw) async def fini(): await self.ahaclient.offlink(onlink) await self.ahaclient.onlink(onlink) self.onfini(fini) async def initServiceRuntime(self): pass async def _ctorNexsRoot(self): ''' Initialize a NexsRoot to use for the cell. ''' map_async = self.conf.get('nexslog:async') return await s_nexus.NexsRoot.anit(self.dirn, donexslog=self.donexslog, map_async=map_async) async def getNexsIndx(self): return await self.nexsroot.index() @s_nexus.Pusher.onPushAuto('nexslog:setindex') async def setNexsIndx(self, indx): return await self.nexsroot.setindex(indx) async def promote(self): ''' Transform this cell from a passive follower to an active cell that writes changes locally. ''' if self.conf.get('mirror') is None: mesg = 'promote() called on non-mirror' raise s_exc.BadConfValu(mesg=mesg) await self.nexsroot.promote() await self.setCellActive(True) async def _setAhaActive(self): if self.ahaclient is None: return if self.ahainfo is None: return ahalead = self.conf.get('aha:leader') if ahalead is None: return try: proxy = await self.ahaclient.proxy(timeout=2) except TimeoutError: # pragma: no cover return None # if we went inactive, bump the aha proxy if not self.isactive: await proxy.fini() return ahanetw = self.conf.get('aha:network') try: await proxy.addAhaSvc(ahalead, self.ahainfo, network=ahanetw) except asyncio.CancelledError: # pragma: no cover raise except Exception as e: # pragma: no cover logger.warning(f'_setAhaActive failed: {e}') def addActiveCoro(self, func, iden=None, base=None): ''' Add a function callback to be run as a coroutine when the Cell is active. Args: func (coroutine function): The function run as a coroutine. iden (str): The iden to use for the coroutine. base (Optional[Base]): if present, this active coro will be fini'd when the base is fini'd Returns: str: A GUID string that identifies the coroutine for delActiveCoro() NOTE: This will re-fire the coroutine if it exits and the Cell is still active. ''' if base and base.isfini: raise s_exc.IsFini() if iden is None: iden = s_common.guid() cdef = {'func': func, 'base': base} self.activecoros[iden] = cdef if base: async def fini(): await self.delActiveCoro(iden) base.onfini(fini) if self.isactive: self._fireActiveCoro(iden, cdef) return iden async def delActiveCoro(self, iden): ''' Remove an Active coroutine previously added with addActiveCoro(). Args: iden (str): The iden returned by addActiveCoro() ''' cdef = self.activecoros.pop(iden, None) if cdef is None: return await self._killActiveCoro(cdef) def _fireActiveCoros(self): for iden, cdef in self.activecoros.items(): self._fireActiveCoro(iden, cdef) def _fireActiveCoro(self, iden, cdef): func = cdef.get('func') async def wrap(): while not self.isfini: try: await func() except asyncio.CancelledError: raise except Exception: # pragma no cover logger.exception(f'activeCoro Error: {func}') await asyncio.sleep(1) cdef['task'] = self.schedCoro(wrap()) async def _killActiveCoros(self): for cdef in self.activecoros.values(): await self._killActiveCoro(cdef) async def _killActiveCoro(self, cdef): task = cdef.pop('task', None) if task is not None: task.cancel() await asyncio.sleep(0) async def isCellActive(self): return self.isactive async def setCellActive(self, active): self.isactive = active if self.isactive: self._fireActiveCoros() await self._execCellUpdates() await self.initServiceActive() else: await self._killActiveCoros() await self.initServicePassive() await self._setAhaActive() async def initServiceActive(self): # pragma: no cover pass async def initServicePassive(self): # pragma: no cover pass async def getNexusChanges(self, offs): async for item in self.nexsroot.iter(offs): yield item def _reqBackDirn(self, name): self._reqBackConf() path = s_common.genpath(self.backdirn, name) if not path.startswith(self.backdirn): mesg = 'Directory traversal detected' raise s_exc.BadArg(mesg=mesg) return path async def runBackup(self, name=None, wait=True): if self.backuprunning: raise s_exc.BackupAlreadyRunning(mesg='Another backup is already running') try: task = None self.backuprunning = True if name is None: name = time.strftime('%Y%m%d%H%M%S', datetime.datetime.now().timetuple()) path = self._reqBackDirn(name) if os.path.isdir(path): mesg = 'Backup with name already exists' raise s_exc.BadArg(mesg=mesg) task = self.schedCoro(self._execBackupTask(path)) def done(self, task): self.backuprunning = False task.add_done_callback(functools.partial(done, self)) if wait: logger.info(f'Waiting for backup to complete [{name}]') await task return name except (asyncio.CancelledError, Exception): if task is not None: task.cancel() self.backuprunning = False raise async def _execBackupTask(self, dirn): ''' A task that backs up the cell to the target directory ''' logger.info(f'Starting backup to [{dirn}]') await self.boss.promote('backup', self.auth.rootuser) slabs = s_lmdbslab.Slab.getSlabsInDir(self.dirn) assert slabs ctx = multiprocessing.get_context('spawn') mypipe, child_pipe = ctx.Pipe() paths = [str(slab.path) for slab in slabs] def spawnproc(): proc = ctx.Process(target=self._backupProc, args=(child_pipe, self.dirn, dirn, paths)) proc.start() hasdata = mypipe.poll(timeout=self.BACKUP_SPAWN_TIMEOUT) if not hasdata: raise s_exc.SynErr(mesg='backup subprocess stuck') data = mypipe.recv() assert data == 'ready' return proc proc = await s_coro.executor(spawnproc) while True: await s_lmdbslab.Slab.syncLoopOnce() if not any(slab.dirty for slab in slabs): break try: mypipe.send('proceed') # This is technically pending the ioloop waiting for the backup process to acquire a bunch of # transactions. We're effectively locking out new write requests the brute force way. hasdata = mypipe.poll(timeout=self.BACKUP_ACQUIRE_TIMEOUT) if not hasdata: raise s_exc.SynErr(mesg='backup subprocess stuck') data = mypipe.recv() assert data == 'captured' def waitforproc(): proc.join() if proc.exitcode: raise s_exc.SpawnExit(code=proc.exitcode) retn = await s_coro.executor(waitforproc) except (asyncio.CancelledError, Exception): logger.exception('Error performing backup to [{dirn}]') proc.terminate() raise else: logger.info(f'Backup completed to [{dirn}]') return retn @staticmethod def _backupProc(pipe, srcdir, dstdir, lmdbpaths): ''' (In a separate process) Actually do the backup ''' pipe.send('ready') data = pipe.recv() assert data == 'proceed' with s_t_backup.capturelmdbs(srcdir, onlydirs=lmdbpaths) as lmdbinfo: # Let parent know we have the transactions so he can resume the ioloop pipe.send('captured') s_t_backup.txnbackup(lmdbinfo, srcdir, dstdir) def _reqBackConf(self): if self.backdirn is None: mesg = 'Backup APIs require the backup:dir config option is set' raise s_exc.NeedConfValu(mesg=mesg) async def delBackup(self, name): self._reqBackConf() path = self._reqBackDirn(name) cellguid = os.path.join(path, 'cell.guid') if not os.path.isfile(cellguid): mesg = 'Specified backup path has no cell.guid file.' raise s_exc.BadArg(mesg=mesg) logger.info(f'Removing backup for [{path}]') await s_coro.executor(shutil.rmtree, path, ignore_errors=True) logger.info(f'Backup removed from [{path}]') async def getBackups(self): self._reqBackConf() backups = [] def walkpath(path): for name in os.listdir(path): full = os.path.join(path, name) cellguid = os.path.join(full, 'cell.guid') if os.path.isfile(cellguid): backups.append(os.path.relpath(full, self.backdirn)) continue if os.path.isdir(full): walkpath(full) walkpath(self.backdirn) return backups async def isUserAllowed(self, iden, perm, gateiden=None): user = self.auth.user(iden) if user is None: return False return user.allowed(perm, gateiden=gateiden) async def tryUserPasswd(self, name, passwd): user = await self.auth.getUserByName(name) if user is None: return None if not await user.tryPasswd(passwd): return None return user.pack() async def getUserProfile(self, iden): user = await self.auth.reqUser(iden) return user.profile.pack() async def getUserProfInfo(self, iden, name): user = await self.auth.reqUser(iden) return user.profile.get(name) async def setUserProfInfo(self, iden, name, valu): user = await self.auth.reqUser(iden) return await user.profile.set(name, valu) async def addUserRule(self, iden, rule, indx=None, gateiden=None): user = await self.auth.reqUser(iden) retn = await user.addRule(rule, indx=indx, gateiden=gateiden) return retn async def addRoleRule(self, iden, rule, indx=None, gateiden=None): role = await self.auth.reqRole(iden) retn = await role.addRule(rule, indx=indx, gateiden=gateiden) return retn async def delUserRule(self, iden, rule, gateiden=None): user = await self.auth.reqUser(iden) return await user.delRule(rule, gateiden=gateiden) async def delRoleRule(self, iden, rule, gateiden=None): role = await self.auth.reqRole(iden) return await role.delRule(rule, gateiden=gateiden) async def setUserRules(self, iden, rules, gateiden=None): user = await self.auth.reqUser(iden) await user.setRules(rules, gateiden=gateiden) async def setRoleRules(self, iden, rules, gateiden=None): role = await self.auth.reqRole(iden) await role.setRules(rules, gateiden=gateiden) async def setUserAdmin(self, iden, admin, gateiden=None): user = await self.auth.reqUser(iden) await user.setAdmin(admin, gateiden=gateiden) async def addUserRole(self, useriden, roleiden): user = await self.auth.reqUser(useriden) await user.grant(roleiden) await self.fire('user:mod', act='grant', user=useriden, role=roleiden) async def delUserRole(self, useriden, roleiden): user = await self.auth.reqUser(useriden) await user.revoke(roleiden) await self.fire('user:mod', act='revoke', user=useriden, role=roleiden) async def addUser(self, name, passwd=None, email=None, iden=None): user = await self.auth.addUser(name, passwd=passwd, email=email, iden=iden) await self.fire('user:mod', act='adduser', name=name) return user.pack(packroles=True) async def delUser(self, iden): await self.auth.delUser(iden) await self.fire('user:mod', act='deluser', user=iden) async def addRole(self, name): role = await self.auth.addRole(name) return role.pack() async def delRole(self, iden): await self.auth.delRole(iden) async def setUserEmail(self, useriden, email): await self.auth.setUserInfo(useriden, 'email', email) async def setUserPasswd(self, iden, passwd): user = await self.auth.reqUser(iden) await user.setPasswd(passwd) await self.fire('user:mod', act='setpasswd', user=iden) async def setUserLocked(self, iden, locked): user = await self.auth.reqUser(iden) await user.setLocked(locked) await self.fire('user:mod', act='locked', user=iden, locked=locked) async def setUserArchived(self, iden, archived): user = await self.auth.reqUser(iden) await user.setArchived(archived) await self.fire('user:mod', act='archived', user=iden, archived=archived) async def getUserDef(self, iden): user = self.auth.user(iden) if user is not None: return user.pack(packroles=True) async def getAuthGate(self, iden): gate = self.auth.getAuthGate(iden) if gate is None: return None return gate.pack() async def getAuthGates(self): return [g.pack() for g in self.auth.getAuthGates()] async def getRoleDef(self, iden): role = self.auth.role(iden) if role is not None: return role.pack() async def getUserDefByName(self, name): user = await self.auth.getUserByName(name) if user is not None: return user.pack(packroles=True) async def getRoleDefByName(self, name): role = await self.auth.getRoleByName(name) if role is not None: return role.pack() async def getUserDefs(self): return [u.pack(packroles=True) for u in self.auth.users()] async def getRoleDefs(self): return [r.pack() for r in self.auth.roles()] async def getAuthUsers(self, archived=False): return [u.pack() for u in self.auth.users() if archived or not u.info.get('archived')] async def getAuthRoles(self): return [r.pack() for r in self.auth.roles()] async def dyniter(self, iden, todo, gatekeys=()): for useriden, perm, gateiden in gatekeys: (await self.auth.reqUser(useriden)).confirm(perm, gateiden=gateiden) item = self.dynitems.get(iden) name, args, kwargs = todo meth = getattr(item, name) async for item in meth(*args, **kwargs): yield item async def dyncall(self, iden, todo, gatekeys=()): for useriden, perm, gateiden in gatekeys: (await self.auth.reqUser(useriden)).confirm(perm, gateiden=gateiden) item = self.dynitems.get(iden) if item is None: raise s_exc.NoSuchIden(mesg=iden) name, args, kwargs = todo meth = getattr(item, name) return await s_coro.ornot(meth, *args, **kwargs) async def getConfOpt(self, name): return self.conf.get(name) def _getSessInfo(self, iden): return self.sessstor.gen(iden) def getUserName(self, iden, defv='<unknown>'): ''' Translate the user iden to a user name. ''' # since this pattern is so common, utilitizing... user = self.auth.user(iden) if user is None: return defv return user.name async def genHttpSess(self, iden): # TODO age out http sessions sess = self.sessions.get(iden) if sess is not None: return sess sess = await s_httpapi.Sess.anit(self, iden) self.sessions[iden] = sess return sess async def addHttpsPort(self, port, host='0.0.0.0', sslctx=None): addr = socket.gethostbyname(host) if sslctx is None: pkeypath = os.path.join(self.dirn, 'sslkey.pem') certpath = os.path.join(self.dirn, 'sslcert.pem') if not os.path.isfile(certpath): logger.warning('NO CERTIFICATE FOUND! generating self-signed certificate.') with s_common.getTempDir() as dirn: cdir = s_certdir.CertDir(path=(dirn,)) pkey, cert = cdir.genHostCert(self.getCellType()) cdir.savePkeyPem(pkey, pkeypath) cdir.saveCertPem(cert, certpath) sslctx = self.initSslCtx(certpath, pkeypath) serv = self.wapp.listen(port, address=addr, ssl_options=sslctx) self.httpds.append(serv) return list(serv._sockets.values())[0].getsockname() def initSslCtx(self, certpath, keypath): sslctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) if not os.path.isfile(keypath): raise s_exc.NoSuchFile(name=keypath) if not os.path.isfile(certpath): raise s_exc.NoSuchFile(name=certpath) sslctx.load_cert_chain(certpath, keypath) return sslctx async def _initCellHttp(self): self.httpds = [] self.sessstor = s_lmdbslab.GuidStor(self.slab, 'http:sess') async def fini(): [await s.fini() for s in self.sessions.values()] for http in self.httpds: http.stop() self.onfini(fini) # Generate/Load a Cookie Secret secpath = os.path.join(self.dirn, 'cookie.secret') if not os.path.isfile(secpath): with s_common.genfile(secpath) as fd: fd.write(s_common.guid().encode('utf8')) with s_common.getfile(secpath) as fd: secret = fd.read().decode('utf8') opts = { 'cookie_secret': secret, 'websocket_ping_interval': 10 } self.wapp = t_web.Application(**opts) self._initCellHttpApis() def _initCellHttpApis(self): self.addHttpApi('/api/v1/login', s_httpapi.LoginV1, {'cell': self}) self.addHttpApi('/api/v1/active', s_httpapi.ActiveV1, {'cell': self}) self.addHttpApi('/api/v1/healthcheck', s_httpapi.HealthCheckV1, {'cell': self}) self.addHttpApi('/api/v1/auth/users', s_httpapi.AuthUsersV1, {'cell': self}) self.addHttpApi('/api/v1/auth/roles', s_httpapi.AuthRolesV1, {'cell': self}) self.addHttpApi('/api/v1/auth/adduser', s_httpapi.AuthAddUserV1, {'cell': self}) self.addHttpApi('/api/v1/auth/addrole', s_httpapi.AuthAddRoleV1, {'cell': self}) self.addHttpApi('/api/v1/auth/delrole', s_httpapi.AuthDelRoleV1, {'cell': self}) self.addHttpApi('/api/v1/auth/user/(.*)', s_httpapi.AuthUserV1, {'cell': self}) self.addHttpApi('/api/v1/auth/role/(.*)', s_httpapi.AuthRoleV1, {'cell': self}) self.addHttpApi('/api/v1/auth/password/(.*)', s_httpapi.AuthUserPasswdV1, {'cell': self}) self.addHttpApi('/api/v1/auth/grant', s_httpapi.AuthGrantV1, {'cell': self}) self.addHttpApi('/api/v1/auth/revoke', s_httpapi.AuthRevokeV1, {'cell': self}) self.addHttpApi('/api/v1/auth/onepass/issue', s_httpapi.OnePassIssueV1, {'cell': self}) def addHttpApi(self, path, ctor, info): self.wapp.add_handlers('.*', ( (path, ctor, info), )) async def _initCertDir(self): certpath = s_common.gendir(self.dirn, 'certs') # add our cert path to the global resolver s_certdir.addCertPath(certpath) async def fini(): s_certdir.delCertPath(certpath) self.onfini(fini) # our certdir is *only* the cell certs dir self.certdir = s_certdir.CertDir(path=(certpath,)) async def _initCellDmon(self): self.dmon = await s_daemon.Daemon.anit() self.dmon.share('*', self) self.onfini(self.dmon.fini) async def _initCellHive(self): isnew = not self.slab.dbexists('hive') db = self.slab.initdb('hive') hive = await s_hive.SlabHive.anit(self.slab, db=db, nexsroot=self.nexsroot) self.onfini(hive) if isnew: path = os.path.join(self.dirn, 'hiveboot.yaml') if os.path.isfile(path): logger.debug(f'Loading cell hive from {path}') tree = s_common.yamlload(path) if tree is not None: # Pack and unpack the tree to avoid tuple/list issues # for in-memory structures. tree = s_common.tuplify(tree) await hive.loadHiveTree(tree) return hive async def _initCellSlab(self, readonly=False): s_common.gendir(self.dirn, 'slabs') path = os.path.join(self.dirn, 'slabs', 'cell.lmdb') if not os.path.exists(path) and readonly: logger.warning('Creating a slab for a readonly cell.') _slab = await s_lmdbslab.Slab.anit(path, map_size=SLAB_MAP_SIZE) _slab.initdb('hive') await _slab.fini() self.slab = await s_lmdbslab.Slab.anit(path, map_size=SLAB_MAP_SIZE, readonly=readonly) self.onfini(self.slab.fini) async def _initCellAuth(self): authctor = self.conf.get('auth:ctor') if authctor is not None: ctor = s_dyndeps.getDynLocal(authctor) return await ctor(self) return await self._initCellHiveAuth() async def _initCellHiveAuth(self): node = await self.hive.open(('auth',)) auth = await s_hiveauth.Auth.anit(node, nexsroot=self.nexsroot) self.onfini(auth.fini) return auth @contextlib.asynccontextmanager async def getLocalProxy(self, share='*', user='root'): url = self.getLocalUrl(share=share, user=user) prox = await s_telepath.openurl(url) yield prox def getLocalUrl(self, share='*', user='root'): return f'cell://{user}@{self.dirn}:{share}' def _initCellConf(self, conf): if isinstance(conf, dict): conf = s_config.Config.getConfFromCell(self, conf=conf) for k, v in self._loadCellYaml('cell.yaml').items(): conf.setdefault(k, v) conf.reqConfValid() return conf def _loadCellYaml(self, *path): path = os.path.join(self.dirn, *path) if os.path.isfile(path): logger.debug('Loading file from [%s]', path) return s_common.yamlload(path) return {} async def getTeleApi(self, link, mesg, path): # if auth is disabled or it's a unix socket, they're root. if link.get('unix'): name = 'root' auth = mesg[1].get('auth') if auth is not None: name, info = auth user = await self.auth.getUserByName(name) if user is None: raise s_exc.NoSuchUser(name=name) else: user = await self._getCellUser(link, mesg) return await self.getCellApi(link, user, path) async def getCellApi(self, link, user, path): return await self.cellapi.anit(self, link, user) @classmethod def getCellType(cls): return cls.__name__.lower() @classmethod def getEnvPrefix(cls): return f'SYN_{cls.__name__.upper()}' def getCellIden(self): return self.iden @classmethod def initCellConf(cls): ''' Create a Config object for the Cell. Notes: The Config object has a ``envar_prefix`` set according to the results of ``cls.getEnvPrefix()``. Returns: s_config.Config: A Config helper object. ''' prefix = cls.getEnvPrefix() schema = s_config.getJsSchema(cls.confbase, cls.confdefs) return s_config.Config(schema, envar_prefix=prefix) @classmethod def getArgParser(cls, conf=None): ''' Get an ``argparse.ArgumentParser`` for the Cell. Args: conf (s_config.Config): Optional, a Config object which Notes: Boot time configuration data is placed in the argument group called ``config``. This adds default ``dirn``, ``--telepath``, ``--https`` and ``--name`` arguements to the argparser instance. Configuration values which have the ``hideconf`` or ``hidecmdl`` value set to True are not added to the argparser instance. Returns: argparse.ArgumentParser: A ArgumentParser for the Cell. ''' name = cls.getCellType() prefix = cls.getEnvPrefix() pars = argparse.ArgumentParser(prog=name) pars.add_argument('dirn', help=f'The storage directory for the {name} service.') pars.add_argument('--log-level', default='INFO', choices=s_const.LOG_LEVEL_CHOICES, help='Specify the Python logging log level.', type=str.upper) telendef = None telepdef = 'tcp://0.0.0.0:27492' httpsdef = 4443 telenvar = '_'.join((prefix, 'NAME')) telepvar = '_'.join((prefix, 'TELEPATH')) httpsvar = '_'.join((prefix, 'HTTPS')) telen = os.getenv(telenvar, telendef) telep = os.getenv(telepvar, telepdef) https = os.getenv(httpsvar, httpsdef) pars.add_argument('--telepath', default=telep, type=str, help=f'The telepath URL to listen on. This defaults to {telepdef}, and may be ' f'also be overridden by the {telepvar} environment variable.') pars.add_argument('--https', default=https, type=int, help=f'The port to bind for the HTTPS/REST API. This defaults to {httpsdef}, ' f'and may be also be overridden by the {httpsvar} environment variable.') pars.add_argument('--name', type=str, default=telen, help=f'The (optional) additional name to share the {name} as. This defaults to ' f'{telendef}, and may be also be overridden by the {telenvar} environment' f' variable.') if conf is not None: args = conf.getArgParseArgs() if args: pgrp = pars.add_argument_group('config', 'Configuration arguments.') for (argname, arginfo) in args: pgrp.add_argument(argname, **arginfo) return pars @classmethod async def initFromArgv(cls, argv, outp=None): ''' Cell launcher which does automatic argument parsing, environment variable resolution and Cell creation. Args: argv (list): A list of command line arguments to launch the Cell with. outp (s_ouput.OutPut): Optional, an output object. Notes: This does the following items: - Create a Config object from the Cell class. - Creates an Argument Parser from the Cell class and Config object. - Parses the provided arguments. - Loads configuration data from the parsed options and environment variables. - Sets logging for the process. - Creates the Cell from the Cell Ctor. - Adds a Telepath listener, HTTPs port listeners and Telepath share names. - Returns the Cell. Returns: Cell: This returns an instance of the Cell. ''' conf = cls.initCellConf() pars = cls.getArgParser(conf=conf) opts = pars.parse_args(argv) conf.setConfFromOpts(opts) conf.setConfFromEnvs() s_common.setlogging(logger, defval=opts.log_level) cell = await cls.anit(opts.dirn, conf=conf) try: if 'dmon:listen' not in cell.conf: await cell.dmon.listen(opts.telepath) if outp is not None: outp.printf(f'...{cell.getCellType()} API (telepath): %s' % (opts.telepath,)) else: if outp is not None: lisn = cell.conf.get('dmon:listen') if lisn is None: lisn = cell.getLocalUrl() outp.printf(f'...{cell.getCellType()} API (telepath): %s' % (lisn,)) if 'https:port' not in cell.conf: await cell.addHttpsPort(opts.https) if outp is not None: outp.printf(f'...{cell.getCellType()} API (https): %s' % (opts.https,)) else: if outp is not None: port = cell.conf.get('https:port') if port is None: outp.printf(f'...{cell.getCellType()} API (https): disabled') else: outp.printf(f'...{cell.getCellType()} API (https): %s' % (port,)) if opts.name is not None: cell.dmon.share(opts.name, cell) if outp is not None: outp.printf(f'...{cell.getCellType()} API (telepath name): %s' % (opts.name,)) except (asyncio.CancelledError, Exception): await cell.fini() raise return cell @classmethod async def execmain(cls, argv, outp=None): ''' The main entry point for running the Cell as an application. Args: argv (list): A list of command line arguments to launch the Cell with. outp (s_ouput.OutPut): Optional, an output object. Notes: This coroutine waits until the Cell is fini'd or a SIGINT/SIGTERM signal is sent to the process. Returns: None. ''' if outp is None: outp = s_output.stdout cell = await cls.initFromArgv(argv, outp=outp) await cell.main() async def _getCellUser(self, link, mesg): # check for a TLS client cert username = link.getTlsPeerCn() if username is not None: user = await self.auth.getUserByName(username) if user is not None: return user logger.warning(f'TLS Client Cert User NOT FOUND: {username}') auth = mesg[1].get('auth') if auth is None: anonuser = self.conf.get('auth:anon') if anonuser is None: raise s_exc.AuthDeny(mesg='Unable to find cell user') user = await self.auth.getUserByName(anonuser) if user is None: raise s_exc.AuthDeny(mesg=f'Anon user ({anonuser}) is not found.') if user.isLocked(): raise s_exc.AuthDeny(mesg=f'Anon user ({anonuser}) is locked.') return user name, info = auth user = await self.auth.getUserByName(name) if user is None: raise s_exc.NoSuchUser(name=name, mesg=f'No such user: {name}.') # passwd None always fails... passwd = info.get('passwd') if not await user.tryPasswd(passwd): raise s_exc.AuthDeny(mesg='Invalid password', user=user.name) return user async def getHealthCheck(self): health = s_health.HealthCheck(self.getCellIden()) for func in self._health_funcs: await func(health) return health.pack() def addHealthFunc(self, func): '''Register a callback function to get a HealthCheck object.''' self._health_funcs.append(func) async def _cellHealth(self, health): pass async def getDmonSessions(self): return await self.dmon.getSessInfo() # ----- Change distributed Auth methods ---- async def listHiveKey(self, path=None): if path is None: path = () items = self.hive.dir(path) if items is None: return None return [item[0] for item in items] async def getHiveKeys(self, path): ''' Return a list of (name, value) tuples for nodes under the path. ''' items = self.hive.dir(path) if items is None: return () return [(i[0], i[1]) for i in items] async def getHiveKey(self, path): ''' Get the value of a key in the cell default hive ''' return await self.hive.get(path) async def setHiveKey(self, path, valu): ''' Set or change the value of a key in the cell default hive ''' return await self.hive.set(path, valu, nexs=True) async def popHiveKey(self, path): ''' Remove and return the value of a key in the cell default hive. Note: this is for expert emergency use only. ''' return await self.hive.pop(path, nexs=True) async def saveHiveTree(self, path=()): return await self.hive.saveHiveTree(path=path) async def loadHiveTree(self, tree, path=(), trim=False): ''' Note: this is for expert emergency use only. ''' return await self._push('hive:loadtree', tree, path, trim) @s_nexus.Pusher.onPush('hive:loadtree') async def _onLoadHiveTree(self, tree, path, trim): return await self.hive.loadHiveTree(tree, path=path, trim=trim) @s_nexus.Pusher.onPushAuto('sync') async def sync(self): ''' no-op mutable for testing purposes. If I am follower, when this returns, I have received and applied all the writes that occurred on the leader before this call. ''' return async def ps(self, user): isallowed = await self.isUserAllowed(user.iden, ('task', 'get')) retn = [] for task in self.boss.ps(): if (task.user.iden == user.iden) or isallowed: retn.append(task.pack()) return retn async def kill(self, user, iden): perm = ('task', 'del') isallowed = await self.isUserAllowed(user.iden, perm) logger.info(f'User [{user.name}] Requesting task kill: {iden}') task = self.boss.get(iden) if task is None: logger.info(f'Task does not exist: {iden}') return False if (task.user.iden == user.iden) or isallowed: logger.info(f'Killing task: {iden}') await task.kill() logger.info(f'Task killed: {iden}') return True perm = '.'.join(perm) raise s_exc.AuthDeny(mesg=f'User must have permission {perm} or own the task', task=iden, user=str(user), perm=perm)
run.py
import time import threading from crawler import run as crawler_run from webapi import run as webapi_run from webapi import APIMiddleware from config import configger def watch_thread(): while True: try: google_storage_length = api_mdw.get_len('ggl') nongoogle_storage_length = api_mdw.get_len('non') if google_storage_length < configger.STORAGE_LOWER_BOUND \ or nongoogle_storage_length < configger.STORAGE_LOWER_BOUND: handler = threading.Thread(target=crawler_run) handler.start() handler.join() time.sleep(configger.STORAGE_WATCH_SLEEP_TIME) except KeyboardInterrupt: break if __name__ == "__main__": api_mdw = APIMiddleware() api_handler = threading.Thread(target=webapi_run) watch_handler = threading.Thread(target=watch_thread) api_handler.start() time.sleep(2) watch_handler.start()
test_sys.py
import builtins import codecs import gc import locale import operator import os import struct import subprocess import sys import sysconfig import test.support from test import support from test.support import os_helper from test.support.script_helper import assert_python_ok, assert_python_failure from test.support import threading_helper from test.support import import_helper import textwrap import unittest import warnings # count the number of test runs, used to create unique # strings to intern in test_intern() INTERN_NUMRUNS = 0 DICT_KEY_STRUCT_FORMAT = 'n2BI2n' class DisplayHookTest(unittest.TestCase): def test_original_displayhook(self): dh = sys.__displayhook__ with support.captured_stdout() as out: dh(42) self.assertEqual(out.getvalue(), "42\n") self.assertEqual(builtins._, 42) del builtins._ with support.captured_stdout() as out: dh(None) self.assertEqual(out.getvalue(), "") self.assertTrue(not hasattr(builtins, "_")) # sys.displayhook() requires arguments self.assertRaises(TypeError, dh) stdout = sys.stdout try: del sys.stdout self.assertRaises(RuntimeError, dh, 42) finally: sys.stdout = stdout def test_lost_displayhook(self): displayhook = sys.displayhook try: del sys.displayhook code = compile("42", "<string>", "single") self.assertRaises(RuntimeError, eval, code) finally: sys.displayhook = displayhook def test_custom_displayhook(self): def baddisplayhook(obj): raise ValueError with support.swap_attr(sys, 'displayhook', baddisplayhook): code = compile("42", "<string>", "single") self.assertRaises(ValueError, eval, code) class ExceptHookTest(unittest.TestCase): def test_original_excepthook(self): try: raise ValueError(42) except ValueError as exc: with support.captured_stderr() as err: sys.__excepthook__(*sys.exc_info()) self.assertTrue(err.getvalue().endswith("ValueError: 42\n")) self.assertRaises(TypeError, sys.__excepthook__) def test_excepthook_bytes_filename(self): # bpo-37467: sys.excepthook() must not crash if a filename # is a bytes string with warnings.catch_warnings(): warnings.simplefilter('ignore', BytesWarning) try: raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text")) except SyntaxError as exc: with support.captured_stderr() as err: sys.__excepthook__(*sys.exc_info()) err = err.getvalue() self.assertIn(""" File "b'bytes_filename'", line 123\n""", err) self.assertIn(""" text\n""", err) self.assertTrue(err.endswith("SyntaxError: msg\n")) def test_excepthook(self): with test.support.captured_output("stderr") as stderr: sys.excepthook(1, '1', 1) self.assertTrue("TypeError: print_exception(): Exception expected for " \ "value, str found" in stderr.getvalue()) # FIXME: testing the code for a lost or replaced excepthook in # Python/pythonrun.c::PyErr_PrintEx() is tricky. class SysModuleTest(unittest.TestCase): def tearDown(self): test.support.reap_children() def test_exit(self): # call with two arguments self.assertRaises(TypeError, sys.exit, 42, 42) # call without argument with self.assertRaises(SystemExit) as cm: sys.exit() self.assertIsNone(cm.exception.code) rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()') self.assertEqual(rc, 0) self.assertEqual(out, b'') self.assertEqual(err, b'') # call with integer argument with self.assertRaises(SystemExit) as cm: sys.exit(42) self.assertEqual(cm.exception.code, 42) # call with tuple argument with one entry # entry will be unpacked with self.assertRaises(SystemExit) as cm: sys.exit((42,)) self.assertEqual(cm.exception.code, 42) # call with string argument with self.assertRaises(SystemExit) as cm: sys.exit("exit") self.assertEqual(cm.exception.code, "exit") # call with tuple argument with two entries with self.assertRaises(SystemExit) as cm: sys.exit((17, 23)) self.assertEqual(cm.exception.code, (17, 23)) # test that the exit machinery handles SystemExits properly rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)') self.assertEqual(rc, 47) self.assertEqual(out, b'') self.assertEqual(err, b'') def check_exit_message(code, expected, **env_vars): rc, out, err = assert_python_failure('-c', code, **env_vars) self.assertEqual(rc, 1) self.assertEqual(out, b'') self.assertTrue(err.startswith(expected), "%s doesn't start with %s" % (ascii(err), ascii(expected))) # test that stderr buffer is flushed before the exit message is written # into stderr check_exit_message( r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")', b"unflushed,message") # test that the exit message is written with backslashreplace error # handler to stderr check_exit_message( r'import sys; sys.exit("surrogates:\uDCFF")', b"surrogates:\\udcff") # test that the unicode message is encoded to the stderr encoding # instead of the default encoding (utf8) check_exit_message( r'import sys; sys.exit("h\xe9")', b"h\xe9", PYTHONIOENCODING='latin-1') def test_getdefaultencoding(self): self.assertRaises(TypeError, sys.getdefaultencoding, 42) # can't check more than the type, as the user might have changed it self.assertIsInstance(sys.getdefaultencoding(), str) # testing sys.settrace() is done in test_sys_settrace.py # testing sys.setprofile() is done in test_sys_setprofile.py def test_switchinterval(self): self.assertRaises(TypeError, sys.setswitchinterval) self.assertRaises(TypeError, sys.setswitchinterval, "a") self.assertRaises(ValueError, sys.setswitchinterval, -1.0) self.assertRaises(ValueError, sys.setswitchinterval, 0.0) orig = sys.getswitchinterval() # sanity check self.assertTrue(orig < 0.5, orig) try: for n in 0.00001, 0.05, 3.0, orig: sys.setswitchinterval(n) self.assertAlmostEqual(sys.getswitchinterval(), n) finally: sys.setswitchinterval(orig) def test_recursionlimit(self): self.assertRaises(TypeError, sys.getrecursionlimit, 42) oldlimit = sys.getrecursionlimit() self.assertRaises(TypeError, sys.setrecursionlimit) self.assertRaises(ValueError, sys.setrecursionlimit, -42) sys.setrecursionlimit(10000) self.assertEqual(sys.getrecursionlimit(), 10000) sys.setrecursionlimit(oldlimit) def test_recursionlimit_recovery(self): if hasattr(sys, 'gettrace') and sys.gettrace(): self.skipTest('fatal error if run with a trace function') oldlimit = sys.getrecursionlimit() def f(): f() try: for depth in (50, 75, 100, 250, 1000): try: sys.setrecursionlimit(depth) except RecursionError: # Issue #25274: The recursion limit is too low at the # current recursion depth continue # Issue #5392: test stack overflow after hitting recursion # limit twice with self.assertRaises(RecursionError): f() with self.assertRaises(RecursionError): f() finally: sys.setrecursionlimit(oldlimit) @test.support.cpython_only def test_setrecursionlimit_recursion_depth(self): # Issue #25274: Setting a low recursion limit must be blocked if the # current recursion depth is already higher than limit. from _testinternalcapi import get_recursion_depth def set_recursion_limit_at_depth(depth, limit): recursion_depth = get_recursion_depth() if recursion_depth >= depth: with self.assertRaises(RecursionError) as cm: sys.setrecursionlimit(limit) self.assertRegex(str(cm.exception), "cannot set the recursion limit to [0-9]+ " "at the recursion depth [0-9]+: " "the limit is too low") else: set_recursion_limit_at_depth(depth, limit) oldlimit = sys.getrecursionlimit() try: sys.setrecursionlimit(1000) for limit in (10, 25, 50, 75, 100, 150, 200): set_recursion_limit_at_depth(limit, limit) finally: sys.setrecursionlimit(oldlimit) def test_getwindowsversion(self): # Raise SkipTest if sys doesn't have getwindowsversion attribute test.support.get_attribute(sys, "getwindowsversion") v = sys.getwindowsversion() self.assertEqual(len(v), 5) self.assertIsInstance(v[0], int) self.assertIsInstance(v[1], int) self.assertIsInstance(v[2], int) self.assertIsInstance(v[3], int) self.assertIsInstance(v[4], str) self.assertRaises(IndexError, operator.getitem, v, 5) self.assertIsInstance(v.major, int) self.assertIsInstance(v.minor, int) self.assertIsInstance(v.build, int) self.assertIsInstance(v.platform, int) self.assertIsInstance(v.service_pack, str) self.assertIsInstance(v.service_pack_minor, int) self.assertIsInstance(v.service_pack_major, int) self.assertIsInstance(v.suite_mask, int) self.assertIsInstance(v.product_type, int) self.assertEqual(v[0], v.major) self.assertEqual(v[1], v.minor) self.assertEqual(v[2], v.build) self.assertEqual(v[3], v.platform) self.assertEqual(v[4], v.service_pack) # This is how platform.py calls it. Make sure tuple # still has 5 elements maj, min, buildno, plat, csd = sys.getwindowsversion() def test_call_tracing(self): self.assertRaises(TypeError, sys.call_tracing, type, 2) @unittest.skipUnless(hasattr(sys, "setdlopenflags"), 'test needs sys.setdlopenflags()') def test_dlopenflags(self): self.assertTrue(hasattr(sys, "getdlopenflags")) self.assertRaises(TypeError, sys.getdlopenflags, 42) oldflags = sys.getdlopenflags() self.assertRaises(TypeError, sys.setdlopenflags) sys.setdlopenflags(oldflags+1) self.assertEqual(sys.getdlopenflags(), oldflags+1) sys.setdlopenflags(oldflags) @test.support.refcount_test def test_refcount(self): # n here must be a global in order for this test to pass while # tracing with a python function. Tracing calls PyFrame_FastToLocals # which will add a copy of any locals to the frame object, causing # the reference count to increase by 2 instead of 1. global n self.assertRaises(TypeError, sys.getrefcount) c = sys.getrefcount(None) n = None self.assertEqual(sys.getrefcount(None), c+1) del n self.assertEqual(sys.getrefcount(None), c) if hasattr(sys, "gettotalrefcount"): self.assertIsInstance(sys.gettotalrefcount(), int) def test_getframe(self): self.assertRaises(TypeError, sys._getframe, 42, 42) self.assertRaises(ValueError, sys._getframe, 2000000000) self.assertTrue( SysModuleTest.test_getframe.__code__ \ is sys._getframe().f_code ) # sys._current_frames() is a CPython-only gimmick. @threading_helper.reap_threads def test_current_frames(self): import threading import traceback # Spawn a thread that blocks at a known place. Then the main # thread does sys._current_frames(), and verifies that the frames # returned make sense. entered_g = threading.Event() leave_g = threading.Event() thread_info = [] # the thread's id def f123(): g456() def g456(): thread_info.append(threading.get_ident()) entered_g.set() leave_g.wait() t = threading.Thread(target=f123) t.start() entered_g.wait() # At this point, t has finished its entered_g.set(), although it's # impossible to guess whether it's still on that line or has moved on # to its leave_g.wait(). self.assertEqual(len(thread_info), 1) thread_id = thread_info[0] d = sys._current_frames() for tid in d: self.assertIsInstance(tid, int) self.assertGreater(tid, 0) main_id = threading.get_ident() self.assertIn(main_id, d) self.assertIn(thread_id, d) # Verify that the captured main-thread frame is _this_ frame. frame = d.pop(main_id) self.assertTrue(frame is sys._getframe()) # Verify that the captured thread frame is blocked in g456, called # from f123. This is a little tricky, since various bits of # threading.py are also in the thread's call stack. frame = d.pop(thread_id) stack = traceback.extract_stack(frame) for i, (filename, lineno, funcname, sourceline) in enumerate(stack): if funcname == "f123": break else: self.fail("didn't find f123() on thread's call stack") self.assertEqual(sourceline, "g456()") # And the next record must be for g456(). filename, lineno, funcname, sourceline = stack[i+1] self.assertEqual(funcname, "g456") self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"]) # Reap the spawned thread. leave_g.set() t.join() @threading_helper.reap_threads def test_current_exceptions(self): import threading import traceback # Spawn a thread that blocks at a known place. Then the main # thread does sys._current_frames(), and verifies that the frames # returned make sense. entered_g = threading.Event() leave_g = threading.Event() thread_info = [] # the thread's id def f123(): g456() def g456(): thread_info.append(threading.get_ident()) entered_g.set() while True: try: raise ValueError("oops") except ValueError: if leave_g.wait(timeout=support.LONG_TIMEOUT): break t = threading.Thread(target=f123) t.start() entered_g.wait() # At this point, t has finished its entered_g.set(), although it's # impossible to guess whether it's still on that line or has moved on # to its leave_g.wait(). self.assertEqual(len(thread_info), 1) thread_id = thread_info[0] d = sys._current_exceptions() for tid in d: self.assertIsInstance(tid, int) self.assertGreater(tid, 0) main_id = threading.get_ident() self.assertIn(main_id, d) self.assertIn(thread_id, d) self.assertEqual((None, None, None), d.pop(main_id)) # Verify that the captured thread frame is blocked in g456, called # from f123. This is a little tricky, since various bits of # threading.py are also in the thread's call stack. exc_type, exc_value, exc_tb = d.pop(thread_id) stack = traceback.extract_stack(exc_tb.tb_frame) for i, (filename, lineno, funcname, sourceline) in enumerate(stack): if funcname == "f123": break else: self.fail("didn't find f123() on thread's call stack") self.assertEqual(sourceline, "g456()") # And the next record must be for g456(). filename, lineno, funcname, sourceline = stack[i+1] self.assertEqual(funcname, "g456") self.assertTrue(sourceline.startswith("if leave_g.wait(")) # Reap the spawned thread. leave_g.set() t.join() def test_attributes(self): self.assertIsInstance(sys.api_version, int) self.assertIsInstance(sys.argv, list) for arg in sys.argv: self.assertIsInstance(arg, str) self.assertIsInstance(sys.orig_argv, list) for arg in sys.orig_argv: self.assertIsInstance(arg, str) self.assertIn(sys.byteorder, ("little", "big")) self.assertIsInstance(sys.builtin_module_names, tuple) self.assertIsInstance(sys.copyright, str) self.assertIsInstance(sys.exec_prefix, str) self.assertIsInstance(sys.base_exec_prefix, str) self.assertIsInstance(sys.executable, str) self.assertEqual(len(sys.float_info), 11) self.assertEqual(sys.float_info.radix, 2) self.assertEqual(len(sys.int_info), 2) self.assertTrue(sys.int_info.bits_per_digit % 5 == 0) self.assertTrue(sys.int_info.sizeof_digit >= 1) self.assertEqual(type(sys.int_info.bits_per_digit), int) self.assertEqual(type(sys.int_info.sizeof_digit), int) self.assertIsInstance(sys.hexversion, int) self.assertEqual(len(sys.hash_info), 9) self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width) # sys.hash_info.modulus should be a prime; we do a quick # probable primality test (doesn't exclude the possibility of # a Carmichael number) for x in range(1, 100): self.assertEqual( pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus), 1, "sys.hash_info.modulus {} is a non-prime".format( sys.hash_info.modulus) ) self.assertIsInstance(sys.hash_info.inf, int) self.assertIsInstance(sys.hash_info.nan, int) self.assertIsInstance(sys.hash_info.imag, int) algo = sysconfig.get_config_var("Py_HASH_ALGORITHM") if sys.hash_info.algorithm in {"fnv", "siphash13", "siphash24"}: self.assertIn(sys.hash_info.hash_bits, {32, 64}) self.assertIn(sys.hash_info.seed_bits, {32, 64, 128}) if algo == 1: self.assertEqual(sys.hash_info.algorithm, "siphash24") elif algo == 2: self.assertEqual(sys.hash_info.algorithm, "fnv") elif algo == 3: self.assertEqual(sys.hash_info.algorithm, "siphash13") else: self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash13", "siphash24"}) else: # PY_HASH_EXTERNAL self.assertEqual(algo, 0) self.assertGreaterEqual(sys.hash_info.cutoff, 0) self.assertLess(sys.hash_info.cutoff, 8) self.assertIsInstance(sys.maxsize, int) self.assertIsInstance(sys.maxunicode, int) self.assertEqual(sys.maxunicode, 0x10FFFF) self.assertIsInstance(sys.platform, str) self.assertIsInstance(sys.prefix, str) self.assertIsInstance(sys.base_prefix, str) self.assertIsInstance(sys.platlibdir, str) self.assertIsInstance(sys.version, str) vi = sys.version_info self.assertIsInstance(vi[:], tuple) self.assertEqual(len(vi), 5) self.assertIsInstance(vi[0], int) self.assertIsInstance(vi[1], int) self.assertIsInstance(vi[2], int) self.assertIn(vi[3], ("alpha", "beta", "candidate", "final")) self.assertIsInstance(vi[4], int) self.assertIsInstance(vi.major, int) self.assertIsInstance(vi.minor, int) self.assertIsInstance(vi.micro, int) self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final")) self.assertIsInstance(vi.serial, int) self.assertEqual(vi[0], vi.major) self.assertEqual(vi[1], vi.minor) self.assertEqual(vi[2], vi.micro) self.assertEqual(vi[3], vi.releaselevel) self.assertEqual(vi[4], vi.serial) self.assertTrue(vi > (1,0,0)) self.assertIsInstance(sys.float_repr_style, str) self.assertIn(sys.float_repr_style, ('short', 'legacy')) if not sys.platform.startswith('win'): self.assertIsInstance(sys.abiflags, str) def test_thread_info(self): info = sys.thread_info self.assertEqual(len(info), 3) self.assertIn(info.name, ('nt', 'pthread', 'solaris', None)) self.assertIn(info.lock, ('semaphore', 'mutex+cond', None)) def test_43581(self): # Can't use sys.stdout, as this is a StringIO object when # the test runs under regrtest. self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding) def test_intern(self): global INTERN_NUMRUNS INTERN_NUMRUNS += 1 self.assertRaises(TypeError, sys.intern) s = "never interned before" + str(INTERN_NUMRUNS) self.assertTrue(sys.intern(s) is s) s2 = s.swapcase().swapcase() self.assertTrue(sys.intern(s2) is s) # Subclasses of string can't be interned, because they # provide too much opportunity for insane things to happen. # We don't want them in the interned dict and if they aren't # actually interned, we don't want to create the appearance # that they are by allowing intern() to succeed. class S(str): def __hash__(self): return 123 self.assertRaises(TypeError, sys.intern, S("abc")) def test_sys_flags(self): self.assertTrue(sys.flags) attrs = ("debug", "inspect", "interactive", "optimize", "dont_write_bytecode", "no_user_site", "no_site", "ignore_environment", "verbose", "bytes_warning", "quiet", "hash_randomization", "isolated", "dev_mode", "utf8_mode", "warn_default_encoding") for attr in attrs: self.assertTrue(hasattr(sys.flags, attr), attr) attr_type = bool if attr == "dev_mode" else int self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr) self.assertTrue(repr(sys.flags)) self.assertEqual(len(sys.flags), len(attrs)) self.assertIn(sys.flags.utf8_mode, {0, 1, 2}) def assert_raise_on_new_sys_type(self, sys_attr): # Users are intentionally prevented from creating new instances of # sys.flags, sys.version_info, and sys.getwindowsversion. arg = sys_attr attr_type = type(sys_attr) with self.assertRaises(TypeError): attr_type(arg) with self.assertRaises(TypeError): attr_type.__new__(attr_type, arg) def test_sys_flags_no_instantiation(self): self.assert_raise_on_new_sys_type(sys.flags) def test_sys_version_info_no_instantiation(self): self.assert_raise_on_new_sys_type(sys.version_info) def test_sys_getwindowsversion_no_instantiation(self): # Skip if not being run on Windows. test.support.get_attribute(sys, "getwindowsversion") self.assert_raise_on_new_sys_type(sys.getwindowsversion()) @test.support.cpython_only def test_clear_type_cache(self): sys._clear_type_cache() def test_ioencoding(self): env = dict(os.environ) # Test character: cent sign, encoded as 0x4A (ASCII J) in CP424, # not representable in ASCII. env["PYTHONIOENCODING"] = "cp424" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'], stdout = subprocess.PIPE, env=env) out = p.communicate()[0].strip() expected = ("\xa2" + os.linesep).encode("cp424") self.assertEqual(out, expected) env["PYTHONIOENCODING"] = "ascii:replace" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'], stdout = subprocess.PIPE, env=env) out = p.communicate()[0].strip() self.assertEqual(out, b'?') env["PYTHONIOENCODING"] = "ascii" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = p.communicate() self.assertEqual(out, b'') self.assertIn(b'UnicodeEncodeError:', err) self.assertIn(rb"'\xa2'", err) env["PYTHONIOENCODING"] = "ascii:" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = p.communicate() self.assertEqual(out, b'') self.assertIn(b'UnicodeEncodeError:', err) self.assertIn(rb"'\xa2'", err) env["PYTHONIOENCODING"] = ":surrogateescape" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'], stdout=subprocess.PIPE, env=env) out = p.communicate()[0].strip() self.assertEqual(out, b'\xbd') @unittest.skipUnless(os_helper.FS_NONASCII, 'requires OS support of non-ASCII encodings') @unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False), 'requires FS encoding to match locale') def test_ioencoding_nonascii(self): env = dict(os.environ) env["PYTHONIOENCODING"] = "" p = subprocess.Popen([sys.executable, "-c", 'print(%a)' % os_helper.FS_NONASCII], stdout=subprocess.PIPE, env=env) out = p.communicate()[0].strip() self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII)) @unittest.skipIf(sys.base_prefix != sys.prefix, 'Test is not venv-compatible') def test_executable(self): # sys.executable should be absolute self.assertEqual(os.path.abspath(sys.executable), sys.executable) # Issue #7774: Ensure that sys.executable is an empty string if argv[0] # has been set to a non existent program name and Python is unable to # retrieve the real program name # For a normal installation, it should work without 'cwd' # argument. For test runs in the build directory, see #7774. python_dir = os.path.dirname(os.path.realpath(sys.executable)) p = subprocess.Popen( ["nonexistent", "-c", 'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'], executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir) stdout = p.communicate()[0] executable = stdout.strip().decode("ASCII") p.wait() self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))]) def check_fsencoding(self, fs_encoding, expected=None): self.assertIsNotNone(fs_encoding) codecs.lookup(fs_encoding) if expected: self.assertEqual(fs_encoding, expected) def test_getfilesystemencoding(self): fs_encoding = sys.getfilesystemencoding() if sys.platform == 'darwin': expected = 'utf-8' else: expected = None self.check_fsencoding(fs_encoding, expected) def c_locale_get_error_handler(self, locale, isolated=False, encoding=None): # Force the POSIX locale env = os.environ.copy() env["LC_ALL"] = locale env["PYTHONCOERCECLOCALE"] = "0" code = '\n'.join(( 'import sys', 'def dump(name):', ' std = getattr(sys, name)', ' print("%s: %s" % (name, std.errors))', 'dump("stdin")', 'dump("stdout")', 'dump("stderr")', )) args = [sys.executable, "-X", "utf8=0", "-c", code] if isolated: args.append("-I") if encoding is not None: env['PYTHONIOENCODING'] = encoding else: env.pop('PYTHONIOENCODING', None) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, universal_newlines=True) stdout, stderr = p.communicate() return stdout def check_locale_surrogateescape(self, locale): out = self.c_locale_get_error_handler(locale, isolated=True) self.assertEqual(out, 'stdin: surrogateescape\n' 'stdout: surrogateescape\n' 'stderr: backslashreplace\n') # replace the default error handler out = self.c_locale_get_error_handler(locale, encoding=':ignore') self.assertEqual(out, 'stdin: ignore\n' 'stdout: ignore\n' 'stderr: backslashreplace\n') # force the encoding out = self.c_locale_get_error_handler(locale, encoding='iso8859-1') self.assertEqual(out, 'stdin: strict\n' 'stdout: strict\n' 'stderr: backslashreplace\n') out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:') self.assertEqual(out, 'stdin: strict\n' 'stdout: strict\n' 'stderr: backslashreplace\n') # have no any effect out = self.c_locale_get_error_handler(locale, encoding=':') self.assertEqual(out, 'stdin: surrogateescape\n' 'stdout: surrogateescape\n' 'stderr: backslashreplace\n') out = self.c_locale_get_error_handler(locale, encoding='') self.assertEqual(out, 'stdin: surrogateescape\n' 'stdout: surrogateescape\n' 'stderr: backslashreplace\n') def test_c_locale_surrogateescape(self): self.check_locale_surrogateescape('C') def test_posix_locale_surrogateescape(self): self.check_locale_surrogateescape('POSIX') def test_implementation(self): # This test applies to all implementations equally. levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF} self.assertTrue(hasattr(sys.implementation, 'name')) self.assertTrue(hasattr(sys.implementation, 'version')) self.assertTrue(hasattr(sys.implementation, 'hexversion')) self.assertTrue(hasattr(sys.implementation, 'cache_tag')) version = sys.implementation.version self.assertEqual(version[:2], (version.major, version.minor)) hexversion = (version.major << 24 | version.minor << 16 | version.micro << 8 | levels[version.releaselevel] << 4 | version.serial << 0) self.assertEqual(sys.implementation.hexversion, hexversion) # PEP 421 requires that .name be lower case. self.assertEqual(sys.implementation.name, sys.implementation.name.lower()) @test.support.cpython_only def test_debugmallocstats(self): # Test sys._debugmallocstats() from test.support.script_helper import assert_python_ok args = ['-c', 'import sys; sys._debugmallocstats()'] ret, out, err = assert_python_ok(*args) self.assertIn(b"free PyDictObjects", err) # The function has no parameter self.assertRaises(TypeError, sys._debugmallocstats, True) @unittest.skipUnless(hasattr(sys, "getallocatedblocks"), "sys.getallocatedblocks unavailable on this build") def test_getallocatedblocks(self): try: import _testcapi except ImportError: with_pymalloc = support.with_pymalloc() else: try: alloc_name = _testcapi.pymem_getallocatorsname() except RuntimeError as exc: # "cannot get allocators name" (ex: tracemalloc is used) with_pymalloc = True else: with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug')) # Some sanity checks a = sys.getallocatedblocks() self.assertIs(type(a), int) if with_pymalloc: self.assertGreater(a, 0) else: # When WITH_PYMALLOC isn't available, we don't know anything # about the underlying implementation: the function might # return 0 or something greater. self.assertGreaterEqual(a, 0) try: # While we could imagine a Python session where the number of # multiple buffer objects would exceed the sharing of references, # it is unlikely to happen in a normal test run. self.assertLess(a, sys.gettotalrefcount()) except AttributeError: # gettotalrefcount() not available pass gc.collect() b = sys.getallocatedblocks() self.assertLessEqual(b, a) gc.collect() c = sys.getallocatedblocks() self.assertIn(c, range(b - 50, b + 50)) def test_is_finalizing(self): self.assertIs(sys.is_finalizing(), False) # Don't use the atexit module because _Py_Finalizing is only set # after calling atexit callbacks code = """if 1: import sys class AtExit: is_finalizing = sys.is_finalizing print = print def __del__(self): self.print(self.is_finalizing(), flush=True) # Keep a reference in the __main__ module namespace, so the # AtExit destructor will be called at Python exit ref = AtExit() """ rc, stdout, stderr = assert_python_ok('-c', code) self.assertEqual(stdout.rstrip(), b'True') def test_issue20602(self): # sys.flags and sys.float_info were wiped during shutdown. code = """if 1: import sys class A: def __del__(self, sys=sys): print(sys.flags) print(sys.float_info) a = A() """ rc, out, err = assert_python_ok('-c', code) out = out.splitlines() self.assertIn(b'sys.flags', out[0]) self.assertIn(b'sys.float_info', out[1]) def test_sys_ignores_cleaning_up_user_data(self): code = """if 1: import struct, sys class C: def __init__(self): self.pack = struct.pack def __del__(self): self.pack('I', -42) sys.x = C() """ rc, stdout, stderr = assert_python_ok('-c', code) self.assertEqual(rc, 0) self.assertEqual(stdout.rstrip(), b"") self.assertEqual(stderr.rstrip(), b"") @unittest.skipUnless(hasattr(sys, 'getandroidapilevel'), 'need sys.getandroidapilevel()') def test_getandroidapilevel(self): level = sys.getandroidapilevel() self.assertIsInstance(level, int) self.assertGreater(level, 0) def test_sys_tracebacklimit(self): code = """if 1: import sys def f1(): 1 / 0 def f2(): f1() sys.tracebacklimit = %r f2() """ def check(tracebacklimit, expected): p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit], stderr=subprocess.PIPE) out = p.communicate()[1] self.assertEqual(out.splitlines(), expected) traceback = [ b'Traceback (most recent call last):', b' File "<string>", line 8, in <module>', b' File "<string>", line 6, in f2', b' File "<string>", line 4, in f1', b'ZeroDivisionError: division by zero' ] check(10, traceback) check(3, traceback) check(2, traceback[:1] + traceback[2:]) check(1, traceback[:1] + traceback[3:]) check(0, [traceback[-1]]) check(-1, [traceback[-1]]) check(1<<1000, traceback) check(-1<<1000, [traceback[-1]]) check(None, traceback) def test_no_duplicates_in_meta_path(self): self.assertEqual(len(sys.meta_path), len(set(sys.meta_path))) @unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"), 'needs sys._enablelegacywindowsfsencoding()') def test__enablelegacywindowsfsencoding(self): code = ('import sys', 'sys._enablelegacywindowsfsencoding()', 'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())') rc, out, err = assert_python_ok('-c', '; '.join(code)) out = out.decode('ascii', 'replace').rstrip() self.assertEqual(out, 'mbcs replace') def test_orig_argv(self): code = textwrap.dedent(''' import sys print(sys.argv) print(sys.orig_argv) ''') args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg'] proc = subprocess.run(args, check=True, capture_output=True, text=True) expected = [ repr(['-c', 'arg']), # sys.argv repr(args), # sys.orig_argv ] self.assertEqual(proc.stdout.rstrip().splitlines(), expected, proc) def test_module_names(self): self.assertIsInstance(sys.stdlib_module_names, frozenset) for name in sys.stdlib_module_names: self.assertIsInstance(name, str) def test_stdlib_dir(self): os = import_helper.import_fresh_module('os') marker = getattr(os, '__file__', None) if marker and not os.path.exists(marker): marker = None expected = os.path.dirname(marker) if marker else None self.assertEqual(os.path.normpath(sys._stdlib_dir), os.path.normpath(expected)) @test.support.cpython_only class UnraisableHookTest(unittest.TestCase): def write_unraisable_exc(self, exc, err_msg, obj): import _testcapi import types err_msg2 = f"Exception ignored {err_msg}" try: _testcapi.write_unraisable_exc(exc, err_msg, obj) return types.SimpleNamespace(exc_type=type(exc), exc_value=exc, exc_traceback=exc.__traceback__, err_msg=err_msg2, object=obj) finally: # Explicitly break any reference cycle exc = None def test_original_unraisablehook(self): for err_msg in (None, "original hook"): with self.subTest(err_msg=err_msg): obj = "an object" with test.support.captured_output("stderr") as stderr: with test.support.swap_attr(sys, 'unraisablehook', sys.__unraisablehook__): self.write_unraisable_exc(ValueError(42), err_msg, obj) err = stderr.getvalue() if err_msg is not None: self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err) else: self.assertIn(f'Exception ignored in: {obj!r}\n', err) self.assertIn('Traceback (most recent call last):\n', err) self.assertIn('ValueError: 42\n', err) def test_original_unraisablehook_err(self): # bpo-22836: PyErr_WriteUnraisable() should give sensible reports class BrokenDel: def __del__(self): exc = ValueError("del is broken") # The following line is included in the traceback report: raise exc class BrokenStrException(Exception): def __str__(self): raise Exception("str() is broken") class BrokenExceptionDel: def __del__(self): exc = BrokenStrException() # The following line is included in the traceback report: raise exc for test_class in (BrokenDel, BrokenExceptionDel): with self.subTest(test_class): obj = test_class() with test.support.captured_stderr() as stderr, \ test.support.swap_attr(sys, 'unraisablehook', sys.__unraisablehook__): # Trigger obj.__del__() del obj report = stderr.getvalue() self.assertIn("Exception ignored", report) self.assertIn(test_class.__del__.__qualname__, report) self.assertIn("test_sys.py", report) self.assertIn("raise exc", report) if test_class is BrokenExceptionDel: self.assertIn("BrokenStrException", report) self.assertIn("<exception str() failed>", report) else: self.assertIn("ValueError", report) self.assertIn("del is broken", report) self.assertTrue(report.endswith("\n")) def test_original_unraisablehook_exception_qualname(self): # See bpo-41031, bpo-45083. # Check that the exception is printed with its qualified name # rather than just classname, and the module names appears # unless it is one of the hard-coded exclusions. class A: class B: class X(Exception): pass for moduleName in 'builtins', '__main__', 'some_module': with self.subTest(moduleName=moduleName): A.B.X.__module__ = moduleName with test.support.captured_stderr() as stderr, \ test.support.swap_attr(sys, 'unraisablehook', sys.__unraisablehook__): expected = self.write_unraisable_exc( A.B.X(), "msg", "obj"); report = stderr.getvalue() self.assertIn(A.B.X.__qualname__, report) if moduleName in ['builtins', '__main__']: self.assertNotIn(moduleName + '.', report) else: self.assertIn(moduleName + '.', report) def test_original_unraisablehook_wrong_type(self): exc = ValueError(42) with test.support.swap_attr(sys, 'unraisablehook', sys.__unraisablehook__): with self.assertRaises(TypeError): sys.unraisablehook(exc) def test_custom_unraisablehook(self): hook_args = None def hook_func(args): nonlocal hook_args hook_args = args obj = object() try: with test.support.swap_attr(sys, 'unraisablehook', hook_func): expected = self.write_unraisable_exc(ValueError(42), "custom hook", obj) for attr in "exc_type exc_value exc_traceback err_msg object".split(): self.assertEqual(getattr(hook_args, attr), getattr(expected, attr), (hook_args, expected)) finally: # expected and hook_args contain an exception: break reference cycle expected = None hook_args = None def test_custom_unraisablehook_fail(self): def hook_func(*args): raise Exception("hook_func failed") with test.support.captured_output("stderr") as stderr: with test.support.swap_attr(sys, 'unraisablehook', hook_func): self.write_unraisable_exc(ValueError(42), "custom hook fail", None) err = stderr.getvalue() self.assertIn(f'Exception ignored in sys.unraisablehook: ' f'{hook_func!r}\n', err) self.assertIn('Traceback (most recent call last):\n', err) self.assertIn('Exception: hook_func failed\n', err) @test.support.cpython_only class SizeofTest(unittest.TestCase): def setUp(self): self.P = struct.calcsize('P') self.longdigit = sys.int_info.sizeof_digit import _testinternalcapi self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD check_sizeof = test.support.check_sizeof def test_gc_head_size(self): # Check that the gc header size is added to objects tracked by the gc. vsize = test.support.calcvobjsize gc_header_size = self.gc_headsize # bool objects are not gc tracked self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit) # but lists are self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size) def test_errors(self): class BadSizeof: def __sizeof__(self): raise ValueError self.assertRaises(ValueError, sys.getsizeof, BadSizeof()) class InvalidSizeof: def __sizeof__(self): return None self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof()) sentinel = ["sentinel"] self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel) class FloatSizeof: def __sizeof__(self): return 4.5 self.assertRaises(TypeError, sys.getsizeof, FloatSizeof()) self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel) class OverflowSizeof(int): def __sizeof__(self): return int(self) self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)), sys.maxsize + self.gc_headsize) with self.assertRaises(OverflowError): sys.getsizeof(OverflowSizeof(sys.maxsize + 1)) with self.assertRaises(ValueError): sys.getsizeof(OverflowSizeof(-1)) with self.assertRaises((ValueError, OverflowError)): sys.getsizeof(OverflowSizeof(-sys.maxsize - 1)) def test_default(self): size = test.support.calcvobjsize self.assertEqual(sys.getsizeof(True), size('') + self.longdigit) self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit) def test_objecttypes(self): # check all types defined in Objects/ calcsize = struct.calcsize size = test.support.calcobjsize vsize = test.support.calcvobjsize check = self.check_sizeof # bool check(True, vsize('') + self.longdigit) # buffer # XXX # builtin_function_or_method check(len, size('5P')) # bytearray samples = [b'', b'u'*100000] for sample in samples: x = bytearray(sample) check(x, vsize('n2Pi') + x.__alloc__()) # bytearray_iterator check(iter(bytearray()), size('nP')) # bytes check(b'', vsize('n') + 1) check(b'x' * 10, vsize('n') + 11) # cell def get_cell(): x = 42 def inner(): return x return inner check(get_cell().__closure__[0], size('P')) # code def check_code_size(a, expected_size): self.assertGreaterEqual(sys.getsizeof(a), expected_size) check_code_size(get_cell().__code__, size('6i13P')) check_code_size(get_cell.__code__, size('6i13P')) def get_cell2(x): def inner(): return x return inner check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n')) # complex check(complex(0,1), size('2d')) # method_descriptor (descriptor object) check(str.lower, size('3PPP')) # classmethod_descriptor (descriptor object) # XXX # member_descriptor (descriptor object) import datetime check(datetime.timedelta.days, size('3PP')) # getset_descriptor (descriptor object) import collections check(collections.defaultdict.default_factory, size('3PP')) # wrapper_descriptor (descriptor object) check(int.__add__, size('3P2P')) # method-wrapper (descriptor object) check({}.__iter__, size('2P')) # empty dict check({}, size('nQ2P')) # dict check({"a": 1}, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + (8*2//3)*calcsize('n2P')) longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8} check(longdict, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + (16*2//3)*calcsize('n2P')) # dictionary-keyview check({}.keys(), size('P')) # dictionary-valueview check({}.values(), size('P')) # dictionary-itemview check({}.items(), size('P')) # dictionary iterator check(iter({}), size('P2nPn')) # dictionary-keyiterator check(iter({}.keys()), size('P2nPn')) # dictionary-valueiterator check(iter({}.values()), size('P2nPn')) # dictionary-itemiterator check(iter({}.items()), size('P2nPn')) # dictproxy class C(object): pass check(C.__dict__, size('P')) # BaseException check(BaseException(), size('5Pb')) # UnicodeEncodeError check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP')) # UnicodeDecodeError check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP')) # UnicodeTranslateError check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP')) # ellipses check(Ellipsis, size('')) # EncodingMap import codecs, encodings.iso8859_3 x = codecs.charmap_build(encodings.iso8859_3.decoding_table) check(x, size('32B2iB')) # enumerate check(enumerate([]), size('n3P')) # reverse check(reversed(''), size('nP')) # float check(float(0), size('d')) # sys.floatinfo check(sys.float_info, vsize('') + self.P * len(sys.float_info)) # frame import inspect x = inspect.currentframe() check(x, size('3Pi3c')) # function def func(): pass check(func, size('14Pi')) class c(): @staticmethod def foo(): pass @classmethod def bar(cls): pass # staticmethod check(foo, size('PP')) # classmethod check(bar, size('PP')) # generator def get_gen(): yield 1 check(get_gen(), size('P2PPP4P')) # iterator check(iter('abc'), size('lP')) # callable-iterator import re check(re.finditer('',''), size('2P')) # list samples = [[], [1,2,3], ['1', '2', '3']] for sample in samples: check(list(sample), vsize('Pn') + len(sample)*self.P) # sortwrapper (list) # XXX # cmpwrapper (list) # XXX # listiterator (list) check(iter([]), size('lP')) # listreverseiterator (list) check(reversed([]), size('nP')) # int check(0, vsize('')) check(1, vsize('') + self.longdigit) check(-1, vsize('') + self.longdigit) PyLong_BASE = 2**sys.int_info.bits_per_digit check(int(PyLong_BASE), vsize('') + 2*self.longdigit) check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit) check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit) # module check(unittest, size('PnPPP')) # None check(None, size('')) # NotImplementedType check(NotImplemented, size('')) # object check(object(), size('')) # property (descriptor object) class C(object): def getx(self): return self.__x def setx(self, value): self.__x = value def delx(self): del self.__x x = property(getx, setx, delx, "") check(x, size('5Pi')) # PyCapsule # XXX # rangeiterator check(iter(range(1)), size('4l')) # reverse check(reversed(''), size('nP')) # range check(range(1), size('4P')) check(range(66000), size('4P')) # set # frozenset PySet_MINSIZE = 8 samples = [[], range(10), range(50)] s = size('3nP' + PySet_MINSIZE*'nP' + '2nP') for sample in samples: minused = len(sample) if minused == 0: tmp = 1 # the computation of minused is actually a bit more complicated # but this suffices for the sizeof test minused = minused*2 newsize = PySet_MINSIZE while newsize <= minused: newsize = newsize << 1 if newsize <= 8: check(set(sample), s) check(frozenset(sample), s) else: check(set(sample), s + newsize*calcsize('nP')) check(frozenset(sample), s + newsize*calcsize('nP')) # setiterator check(iter(set()), size('P3n')) # slice check(slice(0), size('3P')) # super check(super(int), size('3P')) # tuple check((), vsize('')) check((1,2,3), vsize('') + 3*self.P) # type # static type: PyTypeObject fmt = 'P2nPI13Pl4Pn9Pn12PIPP' s = vsize(fmt) check(int, s) # class s = vsize(fmt + # PyTypeObject '4P' # PyAsyncMethods '36P' # PyNumberMethods '3P' # PyMappingMethods '10P' # PySequenceMethods '2P' # PyBufferProcs '5P') class newstyleclass(object): pass # Separate block for PyDictKeysObject with 8 keys and 5 entries check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 32 + 21*calcsize("n2P")) # dict with shared keys check(newstyleclass().__dict__, size('nQ2P') + 15*self.P) o = newstyleclass() o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1 # Separate block for PyDictKeysObject with 16 keys and 10 entries check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 32 + 21*calcsize("n2P")) # dict with shared keys check(newstyleclass().__dict__, size('nQ2P') + 13*self.P) # unicode # each tuple contains a string and its expected character size # don't put any static strings here, as they may contain # wchar_t or UTF-8 representations samples = ['1'*100, '\xff'*50, '\u0100'*40, '\uffff'*100, '\U00010000'*30, '\U0010ffff'*100] asciifields = "nnbP" compactfields = asciifields + "nPn" unicodefields = compactfields + "P" for s in samples: maxchar = ord(max(s)) if maxchar < 128: L = size(asciifields) + len(s) + 1 elif maxchar < 256: L = size(compactfields) + len(s) + 1 elif maxchar < 65536: L = size(compactfields) + 2*(len(s) + 1) else: L = size(compactfields) + 4*(len(s) + 1) check(s, L) # verify that the UTF-8 size is accounted for s = chr(0x4000) # 4 bytes canonical representation check(s, size(compactfields) + 4) # compile() will trigger the generation of the UTF-8 # representation as a side effect compile(s, "<stdin>", "eval") check(s, size(compactfields) + 4 + 4) # TODO: add check that forces the presence of wchar_t representation # TODO: add check that forces layout of unicodefields # weakref import weakref check(weakref.ref(int), size('2Pn2P')) # weakproxy # XXX # weakcallableproxy check(weakref.proxy(int), size('2Pn2P')) def check_slots(self, obj, base, extra): expected = sys.getsizeof(base) + struct.calcsize(extra) if gc.is_tracked(obj) and not gc.is_tracked(base): expected += self.gc_headsize self.assertEqual(sys.getsizeof(obj), expected) def test_slots(self): # check all subclassable types defined in Objects/ that allow # non-empty __slots__ check = self.check_slots class BA(bytearray): __slots__ = 'a', 'b', 'c' check(BA(), bytearray(), '3P') class D(dict): __slots__ = 'a', 'b', 'c' check(D(x=[]), {'x': []}, '3P') class L(list): __slots__ = 'a', 'b', 'c' check(L(), [], '3P') class S(set): __slots__ = 'a', 'b', 'c' check(S(), set(), '3P') class FS(frozenset): __slots__ = 'a', 'b', 'c' check(FS(), frozenset(), '3P') from collections import OrderedDict class OD(OrderedDict): __slots__ = 'a', 'b', 'c' check(OD(x=[]), OrderedDict(x=[]), '3P') def test_pythontypes(self): # check all types defined in Python/ size = test.support.calcobjsize vsize = test.support.calcvobjsize check = self.check_sizeof # _ast.AST import _ast check(_ast.AST(), size('P')) try: raise TypeError except TypeError: tb = sys.exc_info()[2] # traceback if tb is not None: check(tb, size('2P2i')) # symtable entry # XXX # sys.flags check(sys.flags, vsize('') + self.P * len(sys.flags)) def test_asyncgen_hooks(self): old = sys.get_asyncgen_hooks() self.assertIsNone(old.firstiter) self.assertIsNone(old.finalizer) firstiter = lambda *a: None sys.set_asyncgen_hooks(firstiter=firstiter) hooks = sys.get_asyncgen_hooks() self.assertIs(hooks.firstiter, firstiter) self.assertIs(hooks[0], firstiter) self.assertIs(hooks.finalizer, None) self.assertIs(hooks[1], None) finalizer = lambda *a: None sys.set_asyncgen_hooks(finalizer=finalizer) hooks = sys.get_asyncgen_hooks() self.assertIs(hooks.firstiter, firstiter) self.assertIs(hooks[0], firstiter) self.assertIs(hooks.finalizer, finalizer) self.assertIs(hooks[1], finalizer) sys.set_asyncgen_hooks(*old) cur = sys.get_asyncgen_hooks() self.assertIsNone(cur.firstiter) self.assertIsNone(cur.finalizer) def test_changing_sys_stderr_and_removing_reference(self): # If the default displayhook doesn't take a strong reference # to sys.stderr the following code can crash. See bpo-43660 # for more details. code = textwrap.dedent(''' import sys class MyStderr: def write(self, s): sys.stderr = None sys.stderr = MyStderr() 1/0 ''') rc, out, err = assert_python_failure('-c', code) self.assertEqual(out, b"") self.assertEqual(err, b"") if __name__ == "__main__": unittest.main()
sender.py
"""Module for sending data and hosting sending servers.""" from aiohttp import web import asyncio import humanize from multiprocessing import Process import os import pkgutil import requests from requests_toolbelt import MultipartEncoder import socket from .exception import CodeExistsError, CodeNotFoundError, IsNotReceiverError from .utils import get_local_ip_address, get_service_info, get_zip_file, \ qr_code, register_service __all__ = ["send", "send_server", "send_server_proc"] # Request handlers async def _text_page(request): """Renders a text viewing page, GET handler for route '/'.""" text = pkgutil.get_data(__name__, "static/text.html").decode() return web.Response(text=text, content_type="text/html") async def _text_sender(request): """Returns the text being shared, GET handler for route '/text'.""" address = "" peername = request.transport.get_extra_info("peername") if peername is not None: host, _ = peername address = " (by " + str(host) + ")" print("Content viewed" + address + "!") return web.Response(text=request.app["text"]) async def _download_page(request): """Renders a download page, GET handler for route '/'.""" download = pkgutil.get_data(__name__, "static/download.html").decode() return web.Response(text=download, content_type="text/html") async def _file_stream_sender(request): """Streams a file from the server, GET handler for route '/download'.""" address = "" peername = request.transport.get_extra_info("peername") if peername is not None: host, _ = peername address = " (by " + str(host) + ")" if request.method == "GET": print("Content requested" + address + ", transferring!") elif request.method == "HEAD": print("Content examined" + address + "!") response = web.StreamResponse() file_path = request.app["file_path"] file_name = request.app["file_name"] file_size = str(request.app["file_size"]) header = "attachment; filename='{}'; size={}".format(file_name, file_size) response.headers["content-type"] = "application/octet-stream" response.headers["content-length"] = str(request.app["file_size"]) response.headers["content-disposition"] = header response.headers["airshare-compress"] = request.app["compress"] await response.prepare(request) with open(file_path, "rb") as f: chunk = f.read(8192) while chunk: await response.write(chunk) chunk = f.read(8192) return response async def _is_airshare_text_sender(request): """Returns 'Text Sender', GET handler for route '/airshare'.""" return web.Response(text="Text Sender") async def _is_airshare_file_sender(request): """Returns 'File Sender', GET handler for route '/airshare'.""" return web.Response(text="File Sender") # Sender functions def send(*, code, file, compress=False): r"""Send file(s) or directories to a receiving server. Parameters ---------- code : str Identifying code for the Airshare receiving server. file : str or list or None Relative path or list of paths of the files or directories to serve. For multiple files or directories, contents are automatically zipped. compress : boolean, default=False Flag to enable or disable compression (Zip). Effective when only one file is given. Returns ------- status_code : int Status code of upload POST request. """ info = get_service_info(code) if info is None: raise CodeNotFoundError(code) if type(file) is str: if file == "": file = None else: file = [file] elif len(file) == 0: file = None if file is None: raise ValueError("The parameter `file` must be non-empty!") if compress or len(file) > 1 or os.path.isdir(file[0]): compress = "true" print("Compressing...") file, name = get_zip_file(file) print("Compressed to `" + name + "`!") else: compress = "false" file, name = file[0], file[0].split(os.path.sep)[-1] ip = socket.inet_ntoa(info.addresses[0]) url = "http://" + ip + ":" + str(info.port) airshare_type = requests.get(url + "/airshare") if airshare_type.text != "Upload Receiver": raise IsNotReceiverError(code) m = MultipartEncoder(fields={"field0": (name, open(file, "rb"))}) headers = {"content-type": m.content_type, "airshare-compress": compress} r = requests.post(url + "/upload", data=m, headers=headers) print("Uploaded `" + name + "` to Airshare `" + code + ".local`!") return r.status_code def send_server(*, code, text=None, file=None, compress=False, port=80): r"""Serves a file or text and registers it as a Multicast-DNS service. Parameters ---------- code : str Identifying code for the Airshare service and server. text : str or None String value to be shared. If both `text` and `files` are given, `text` will be shared. Must be given if `files` is not given. file : str or list or None Relative path or list of paths of the files or directories to serve. If multiple files or directories are given, the contents are automatically zipped. If not given or both `files` and `text` are given, `text` will be shared. Must be given if `text` is not given. compress : boolean, default=False Flag to enable or disable compression (Zip). Effective when only one file is given. port : int, default=80 Port number at which the server is hosted on the device. """ info = get_service_info(code) if info is not None: raise CodeExistsError(code) if file is not None: if type(file) is str: if file == "": file = None else: file = [file] elif len(file) == 0: file = None content = text or file name = None if content is None: raise ValueError("Either `file` or `text` (keyword arguments) must be" + " given and non-empty!") elif text is None and file is not None: if compress or len(file) > 1 or os.path.isdir(file[0]): compress = "true" print("Compressing...") content, name = get_zip_file(file) print("Compressed to `" + name + "`!") else: compress = "false" content = file[0] addresses = [get_local_ip_address()] register_service(code, addresses, port) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) app = web.Application() file_size = "" if text is not None: app["text"] = content app.router.add_get(path="/", handler=_text_page) app.router.add_get(path="/text", handler=_text_sender) app.router.add_get(path="/airshare", handler=_is_airshare_text_sender) elif file: app["file_path"] = os.path.realpath(content) app["file_name"] = name or app["file_path"].split(os.path.sep)[-1] app["file_size"] = os.stat(app["file_path"]).st_size app["compress"] = compress file_size = " (" + humanize.naturalsize(app["file_size"]) + ")" content = app["file_name"] app.router.add_get(path="/", handler=_download_page) app.router.add_get(path="/airshare", handler=_is_airshare_file_sender) app.router.add_get(path="/download", handler=_file_stream_sender) runner = web.AppRunner(app) loop.run_until_complete(runner.setup()) site = web.TCPSite(runner, "0.0.0.0", str(port)) loop.run_until_complete(site.start()) url_port = "" if port != 80: url_port = ":" + str(port) ip = socket.inet_ntoa(addresses[0]) + url_port print("`" + content + "`" + file_size + " available at " + ip + " and `http://" + code + ".local" + url_port + "`, press CtrlC" + " to stop sharing...") qr_code("http://" + ip) loop.run_forever() def send_server_proc(*, code, text=None, file=None, compress=False, port=80): r"""Creates a process with 'send_server' as the target. Parameters ---------- code : str Identifying code for the Airshare service and server. text : str or None String value to be shared. If both `text` and `files` are given, `text` will be shared. Must be given if `files` is not given. file : str or list or None Relative path or list of paths of the files or directories to serve. If multiple files or directories are given, the contents are automatically zipped. If not given or both `files` and `text` are given, `text` will be shared. Must be given if `text` is not given. compress : boolean, default=False Flag to enable or disable compression (Zip). Effective when only one file is given. port : int, default=80 Port number at which the server is hosted on the device. Returns ------- process: multiprocessing.Process A multiprocessing.Process object with 'send_server' as target. """ kwargs = {"code": code, "file": file, "text": text, "compress": compress, "port": port} process = Process(target=send_server, kwargs=kwargs) return process
input.py
from riem.debug import Debug, DebugChannel from riem.library import ArrayList from threading import Event, Thread import enum, pygame class Action(enum.Enum): ACTION = 0 UP = 1 DOWN = 2 LEFT = 3 RIGHT = 4 class Controller: def __init__(self, app) -> None: # NOTE: app is Application type but partially initialised here self.app = app self.joystick_active = False # Action Queue self.action_queue = ArrayList() # Detect Controller pygame.init() pygame.joystick.init() if pygame.joystick.get_count() == 1: # Create Listener pygame.joystick.Joystick(0).init() self.joystick_active = True self.listener_halt = Event() self.listener_thread = Thread(target = self.listener, args = (self.listener_halt, self.action_queue), daemon = False) self.listener_thread.start() def add_action(self, action: Action) -> None: self.action_queue = self.action_queue.add(action) def get_actions(self) -> ArrayList: # Create Result result = self.action_queue.copy() # Empty Queue self.action_queue = ArrayList() # Return Actions return result def listener(self, halt: Event, queue: ArrayList) -> None: while True: # Terminate if halt.is_set(): break # Handle Events for event in pygame.event.get(): # Button Events if event.type == pygame.JOYBUTTONDOWN and (event.button == 0 or event.button == 1): self.add_action(Action.ACTION) # Stick Events elif event.type == pygame.JOYAXISMOTION and (event.axis == 0 or event.axis == 1): if event.axis == 0: if event.value >= 0.8: self.add_action(Action.RIGHT) elif event.value <= -0.8: self.add_action(Action.LEFT) elif event.axis == 1: if event.value >= 0.8: self.add_action(Action.DOWN) elif event.value <= -0.8: self.add_action(Action.UP) def terminate(self) -> None: if self.joystick_active is True: self.listener_halt.set() self.listener_thread.join() pygame.quit() class Keyboard: action = { 13: Action.ACTION, 36: Action.ACTION, 37: Action.LEFT, 38: Action.UP, 39: Action.RIGHT, 40: Action.DOWN, 111: Action.UP, 113: Action.LEFT, 114: Action.RIGHT, 116: Action.DOWN }
scan_main.py
# -*- coding: utf-8 -*- # # Enteletaor - https://github.com/cr0hn/enteletaor # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the # following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the # following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import six import zmq import json import redis import socket import logging import eventlet import ipaddress import amqp.connection from functools import partial from collections import defaultdict from threading import Thread, BoundedSemaphore from .patch import patch_transport from enteletaor_lib.libs.contrib.inetnum import get_inet_num # Monkey patch for AMQP lib patch_transport() # Path thread library eventlet.monkey_patch(socket=True, select=True, thread=True) # Reconfigure AMQP LOGGER logging.getLogger('amqp').setLevel(100) log = logging.getLogger() OPEN_SERVICES = defaultdict(dict) # ---------------------------------------------------------------------- def _do_scan(config, sem, host): """ This function try to find brokers services open in remote servers """ handlers = { 'Redis': handle_redis, 'RabbitMQ': handle_amqp, 'ZeroMQ': handle_zmq } log.warning(" > Analyzing host '%s' " % host) for port in config.ports.split(","): # Check each serve for server_type, handle in six.iteritems(handlers): log.info(" >> Trying to find %s service in '%s' port '%s'." % (server_type, host, port)) try: # Try to check if port is open s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(config.timeout) result = s.connect_ex((host, int(port))) except socket.gaierror as e: log.debug("%s : %s error: %s" % (server_type, port, e)) continue finally: s.close() # Is port open? if result == 0: log.info(" <i> Port '%s' is open in '%s'" % (port, host)) if handle(host, port, config) is True: log.error(" <!!> Open '%s' server found in port '%s' at '%s'" % (server_type, port, host)) OPEN_SERVICES[host][server_type] = dict( state="open", port=port ) else: log.debug(" <i> Port %s is closed" % port) sem.release() # ---------------------------------------------------------------------- def action_scan_main(config): # -------------------------------------------------------------------------- # Resolve target # -------------------------------------------------------------------------- all_ips = build_targets(config) # -------------------------------------------------------------------------- # Preparing scan # -------------------------------------------------------------------------- target_number = len(all_ips) log.warning(" - Number of targets to analyze: %s" % target_number) # Semaphore sem = BoundedSemaphore(config.concurrency) threads = [] # Map parameters _fn = partial(_do_scan, config, sem) log.error(" - Starting scan") # -------------------------------------------------------------------------- # Do scan # -------------------------------------------------------------------------- for x in all_ips: sem.acquire() t = Thread(target=_fn, args=(x,)) threads.append(t) t.start() for t in threads: t.join() # -------------------------------------------------------------------------- # Display results # -------------------------------------------------------------------------- if OPEN_SERVICES: log.error(" - Open services found:") for host, content in six.iteritems(OPEN_SERVICES): log.error(" -> Host - %s" % host) for server_type, server_info in six.iteritems(content): log.error(" * %s/TCP [%s]" % (server_info['port'], server_type)) else: log.error(" - No open services found") # -------------------------------------------------------------------------- # Export results # -------------------------------------------------------------------------- if config.output is not None: _output_path = "%s.json" % config.output if ".json" not in config.output else config.output with open(_output_path, "w") as f: json.dump(OPEN_SERVICES, f) log.error(" - Output results saved into: %s" % _output_path) # -------------------------------------------------------------------------- def build_targets(config): results = set() # Split targets for t in config.target.split(","): try: results.update(str(x) for x in ipaddress.ip_network(t, strict=False)) except ValueError: # -------------------------------------------------------------------------- # If reach this, is not a IPs, is a domain # -------------------------------------------------------------------------- # Try to get all assigned IP of domain if config.own_ips is True: # Extract domain try: val = get_inet_num(t.split(".")[-2]) if val is not None: for v in val: log.debug(" -> Detected registered network '%s'. Added for scan." % v) results.update(str(x) for x in ipaddress.ip_network(six.u(v), strict=False)) except KeyError: # Invalid domain log.debug(" <ii> Error while try to extract domain: '%s'" % t) # -------------------------------------------------------------------------- # Get all IPs for domain # -------------------------------------------------------------------------- # If target is a domain, remove CDIR _target_cdir = t.split("/") _cleaned_target = _target_cdir[0] try: # Resolve host_ip = socket.gethostbyname(_cleaned_target) except socket.gaierror: # Try with the hostname with "www." again try: host_ip = socket.gethostbyname("www.%s" % _cleaned_target) except socket.gaierror: log.error(" <ii> Unable to resolve '%s'" % _cleaned_target) continue # Add CDIR to result scan_target = "%s%s" % (host_ip, "/%s" % _target_cdir[1] if len(_target_cdir) > 1 else "") results.update(str(x) for x in ipaddress.ip_network(six.u(scan_target), strict=False)) return results # -------------------------------------------------------------------------- # These 3 functions determinate if server has listen one of these services: # - Redis server # - RabbitMQ server # - ZeroMQ PUB/SUB pattern # # Each function try to connect or do some action and determinate if service # is on or not. # -------------------------------------------------------------------------- def handle_redis(host, port=6379, extra_config=None): # log.debug(" * Connection to Redis: %s : %s" % (host, port)) try: redis.StrictRedis(host=host, port=port, socket_connect_timeout=1, socket_timeout=1).config_get() return True except Exception: return False # ---------------------------------------------------------------------- def handle_amqp(host, port=5672, extra_config=None): host_and_port = "%s:%s" % (host, port) # log.debug(" * Connection to RabbitMQ: %s : %s" % (host, port)) try: amqp.connection.Connection(host=host_and_port, connect_timeout=1, read_timeout=1, socket_timeout=1) return True except Exception: return False # ---------------------------------------------------------------------- def handle_zmq(host, port=5555, extra_config=None): # log.debug(" * Connection to ZeroMQ: %s : %s" % (host, port)) context = zmq.Context() # Configure socket = context.socket(zmq.SUB) socket.setsockopt(zmq.SUBSCRIBE, b"") # All topics socket.setsockopt(zmq.LINGER, 0) # All topics socket.RCVTIMEO = 1000 # timeout: 1 sec # Connect socket.connect("tcp://%s:%s" % (host, port)) # Try to receive try: socket.recv() return True except Exception: return False finally: socket.close()
generate.py
# Copyright (c) 2018, Kevin Spiteri # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import os import subprocess import json import sys import threading # TODO: Clean up to give an easier-to-understand example how to use Sabre! def load_json(path): with open(path) as file: obj = json.load(file) return obj def cdf(l, margin = 0.025): l = sorted(l) range = l[-1] - l[0] if range > 0: margin *= range inc = 1 / len(l) c = [] y = 0 if range == 0: c += [[l[0] - margin, y]] for x in l: c += [[x, y]] y += inc c += [[x, y]] if range == 0: c += [[l[-1] + margin, y]] return c def mean_stddev(l): mean = sum(l) / len(l) var = sum([(x - mean) * (x - mean) for x in l]) / len(l) stddev = math.sqrt(var) return (mean, stddev) def thread_run_sabre(results, command): completed = subprocess.run(command, stdout = subprocess.PIPE) for line in completed.stdout.decode('ascii').split('\n'): l = line.split(':') if len(l) != 2: continue if l[0] in results: results[l[0]].append(float(l[1])) def thread_run_gnuplot(plotting): subprocess.run('gnuplot', input = plotting.encode('ascii')) def do_figure(prefix, subfigs, algorithms, metrics, term = None): print(prefix + ' ', end = '') plotting_threads = [] for subfig in subfigs: title = subfig[0] dir = subfig[1] args1 = subfig[2] print(title + ' ', end = '') # info['metric_name']['algorithm_name'] = (mean, stddev, 'name of .dat file') info = {m[0]: {} for m in metrics} plot_mark_offset = 0 for algorithm in algorithms: plot_mark_offset += 1 name = algorithm[0] args = args1 + algorithm[1] print(name + ' ', end = '') results = {m[1]: [] for m in metrics} cnt = 0 max_threads = 5 threads = [] for trace in os.listdir(dir)[:]: # use this line to limit directory size cnt += 1 print('%d' % cnt, end = '') sys.stdout.flush() if len(threads) >= max_threads: for t in threads: if not t.is_alive(): t.join() threads.remove(t) break if len(threads) >= max_threads: threads[0].join() threads.pop(0) command = ['python3', './sabre-mmsys18.py', '-n', dir + '/' + trace] + args t = threading.Thread(target = thread_run_sabre, args = (results, command)) threads.append(t) t.start() print('\b' * len(str(cnt)), end = '') print(' ' * len(str(cnt)), end = '') print('\b' * len(str(cnt)), end = '') for t in threads: t.join() print('\b' * (len(name) + 1), end = '') print(' ' * (len(name) + 1), end = '') print('\b' * (len(name) + 1), end = '') for metric in metrics: config = metric[2] if len(metric) > 2 else {} samples = results[metric[1]] points = cdf(samples) median = (points[(len(points) - 1) // 2][0] + points[len(points) // 2][0]) / 2 stats = (median, ) + mean_stddev(samples) datname = ('tmp/' + prefix + '-' + title.replace(' ', '-') + '-' + metric[0].replace(' ', '-') + '-' + algorithm[0].replace(' ', '-') + '.dat') info[metric[0]][algorithm[0]] = stats + (datname, ) with open(datname, 'w') as f: for l in points: xoffset = config['xoffset'] if 'xoffset' in config else 0 f.write('%f %f\n' % (xoffset + l[0], l[1])) dot_count = 4 step = math.floor(len(points) / dot_count) # plot_mark_offset in [1, len(algorithms)] first = math.ceil(plot_mark_offset / (len(algorithms) + 1) * step) with open(datname + '.dot', 'w') as f: for l in points[first::step]: xoffset = config['xoffset'] if 'xoffset' in config else 0 f.write('%f %f\n\n' % (xoffset + l[0], l[1])) statname = ('stats/' + prefix + '-' + title.replace(' ', '-') + '.txt') delim = '' with open(statname, 'w') as f: for metric in metrics: f.write(delim) delim = '\n' f.write('%s:\n' % metric[0]) for algorithm in algorithms: i = info[metric[0]][algorithm[0]] f.write('%s: %f %f %f\n' % (algorithm[0], i[0], i[1], i[2])) xranges = subfig[3][:] if len(subfig) > 3 else None mi = -1 for metric in metrics: mi += 1 config = metric[2] if len(metric) > 2 else {} pdfname = ('figures/' + prefix + '-' + title.replace(' ', '-') + '-' + metric[0].replace(' ', '-') + '.pdf') key = config['key'] if 'key' in config else 'bottom right' xtics = str(config['xtics']) if 'xtics' in config else 'autofreq' #xlabel = title + ' ' + metric[0] xlabel = metric[0] if 'time' in xlabel: xlabel += ' (s)' elif 'bitrate' in xlabel: xlabel += ' (kbps)' if xranges: xrange = '[0:%f]' % xranges.pop(0) else: xrange = '[0:*]' plot_list = [] point_types = [1, 2, 4, 6, 8, 10] pti = 0 for algorithm in algorithms: pt = point_types[pti] pti += 1 alg_pars = algorithm[2] if alg_pars.startswith('notitle'): alg_pars = alg_pars[len('notitle'):] # HACK if isinstance(term, list) and term[mi] != None: do_title = ' notitle ' else: do_title = ' title "' + algorithm[0] + '" ' else: do_title = ' title "' + algorithm[0] + '" ' datname = info[metric[0]][algorithm[0]][-1] plot_list += ['"' + datname + '" notitle ' + alg_pars + ' lw 2'] plot_list += ['"' + datname + '.dot" ' + do_title + ' with linespoints pt ' + str(pt) + ' ' + alg_pars + ' lw 2'] trm = term[mi] if isinstance(term, list) else term if trm == None: trm = 'pdf size 2.3, 1.75 font ",16"' plotting = '''set term ''' + trm + ''' set bmargin 3.5 set style data lines set key ''' + key + ''' set xlabel "''' + xlabel + '''" set xtics ''' + xtics + ''' set xrange ''' + xrange + ''' set output "''' + pdfname + '''" plot ''' + ', '.join(plot_list) + ''' set output ''' #subprocess.run('gnuplot', input = plotting.encode('ascii')) t = threading.Thread(target = thread_run_gnuplot, args = (plotting, )) plotting_threads.append(t) t.start() print('\b' * (len(title) + 1), end = '') print(' ' * (len(title) + 1), end = '') print('\b' * (len(title) + 1), end = '') for t in plotting_threads: t.join() print('\b' * (len(prefix) + 1), end = '') print(' ' * (len(prefix) + 1), end = '') print('\b' * (len(prefix) + 1), end = '') def figure12_write_network(): with open('tmp/network.json', 'w') as f: f.write('[ {"duration_ms": 60000, "bandwidth_kbps": 8000, "latency_ms": 0} ]') def figure6a(): figure12_write_network() completed = subprocess.run(['python3', './sabre-mmsys18.py', '-v', '-m', 'bbb.json', '-n', 'tmp/network.json', '-a', 'bola', '-ab'], stdout = subprocess.PIPE) basic = completed.stdout.decode('ascii') completed = subprocess.run(['python3', './sabre-mmsys18.py', '-v', '-m', 'bbb.json', '-n', 'tmp/network.json', '-a', 'bolae'], stdout = subprocess.PIPE) bolapl = completed.stdout.decode('ascii') fig1 = [] for out in [basic, bolapl]: fig = [] for line in out.split('\n'): if not '[' in line or 'Network' in line: continue l = line.split() index = int(l[1].split(':')[0]) quality = int(l[2].split('=')[1]) #print('%d %d' % (index, quality)) fig += [(index * 3, bbb['bitrates_kbps'][quality])] fig += [((index + 1) * 3, bbb['bitrates_kbps'][quality])] if index == 9: break fig1 += [fig] for i in [0, 1]: name = 'fig1%s.dat' % ['a', 'b'][i] with open('tmp/%s' % name, 'w') as f: for l in fig1[i]: f.write('%f %f\n' % (l[0], l[1])) plotting = '''set term pdf size 1.9, 1.75 font ",16" set bmargin 3.5 set style data lines set yrange[0:6500] set xlabel 'play time (s)' set ylabel 'bitrate (kbps)' set xtics 10 #set key bottom right set key out top center set output "figures/fig6a.pdf" #plot "tmp/fig1a.dat" title "BOLA" lc 7 dt 4 lw 2, "tmp/fig1b.dat" title "BOLA-PL" lc 6 lw 2 plot "tmp/fig1a.dat" title "BOLA" lc 7 dt 4 lw 2, "tmp/fig1b.dat" notitle lc 6 lw 2 set output ''' subprocess.run('gnuplot', input = plotting.encode('ascii')) def figure6b(): figure12_write_network() completed = subprocess.run(['python3', './sabre-mmsys18.py', '-v', '-m', 'bbb.json', '-n', 'tmp/network.json', '-s', '120', '180', '-a', 'bola', '-ab'], stdout = subprocess.PIPE) basic = completed.stdout.decode('ascii') completed = subprocess.run(['python3', './sabre-mmsys18.py', '-v', '-m', 'bbb.json', '-n', 'tmp/network.json', '-s', '120', '180', '-a', 'bolae'], stdout = subprocess.PIPE) bolapl = completed.stdout.decode('ascii') fig2 = [] for out in [basic, bolapl]: fig = [] for line in out.split('\n'): if not '[' in line or 'Network' in line: continue l = line.split() index = int(l[1].split(':')[0]) quality = int(l[2].split('=')[1]) if index < 35: continue if index == 60: fig += [None] #print('%d %d' % (index, quality)) fig += [(index * 3, bbb['bitrates_kbps'][quality])] fig += [((index + 1) * 3, bbb['bitrates_kbps'][quality])] if index == 69: break fig2 += [fig] for i in [0, 1]: name = 'fig2%s.dat' % ['a', 'b'][i] with open('tmp/%s' % name, 'w') as f: for l in fig2[i]: if l == None: f.write('\n') else: f.write('%f %f\n' % (l[0], l[1])) plotting = '''set term pdf size 1.47, 1.75 font ",16" set bmargin 3.5 set style data lines set xrange[180:] set yrange[0:6500] set xlabel 'play time (s)' #set ylabel 'bitrate (kbps)' set ytics format "" set xtics 10 #set key bottom right set key out top center set output "figures/fig6b.pdf" #plot "tmp/fig2a.dat" title "BOLA" lc 7 dt 4 lw 2, "tmp/fig2b.dat" title "BOLA-PL" lc 6 lw 2 plot "tmp/fig2a.dat" notitle lc 7 dt 4 lw 2, "tmp/fig2b.dat" title "BOLA-PL" lc 6 lw 2 set output ''' subprocess.run('gnuplot', input = plotting.encode('ascii')) def figure_1_4(): with open('tmp/egbuf.dat', 'w') as f: f.write('''0 1000 5 1000 5 2500 10 2500 10 5000 15 5000 15 0 18 0 ''') with open('tmp/lowbufa.dat', 'w') as f: f.write('''0 0 0 230 3.534 230 3.534 331 3.843 331 3.843 477 4.153 477 4.153 688 4.462 688 4.462 991 4.771 991 4.771 1427 5.081 1427 5.081 2056 5.390 2056 5.390 2962 5.759 2962 5.759 5027 6.075 5027 6.075 6000 7.000 6000 7.000 0 10 0 ''') with open('tmp/lowbufb.dat', 'w') as f: f.write('''0 0 0 230 11.048 230 11.048 331 13.284 331 13.284 477 15.527 477 15.527 688 17.770 688 17.770 991 20.007 991 20.007 1427 22.244 1427 22.244 2056 24.483 2056 24.483 2962 27.150 2962 27.150 5027 29.441 5027 29.441 6000 36.132 6000 36.132 0 40 0 ''') plotting1 = '''set term pdf size 3.35, 1.5 font ",16" set bmargin 3.5 set style data lines set xlabel 'buffer level (s)' set ylabel 'bitrate (kbps)' set output "figures/fig-1.pdf" set xrange[0:18] set yrange[0:6000] #set arrow from 12.5,3500 to 5,0 #set arrow from 12.5,3500 to 10,0 #set arrow from 12.5,3500 to 15,0 # #set arrow from 2,4000 to 0,5000 #set arrow from 2,4000 to 0,2500 #set arrow from 2,4000 to 0,1000 set arrow from 5,0 to 5,1000 nohead dt 2 set arrow from 10,0 to 10,2500 nohead dt 2 set arrow from 0,2500 to 5,2500 nohead dt 2 set arrow from 0,5000 to 10,5000 nohead dt 2 plot "tmp/egbuf.dat" lc 7 lw 2 notitle set output ''' plotting2 = '''set term pdf size 3.35, 1.5 font ",16" set bmargin 3.5 set style data lines set xlabel 'buffer level (s)' set ylabel 'bitrate (kbps)' set output "figures/fig-4a.pdf" set xrange[0:10] set yrange[0:6500] plot "tmp/lowbufa.dat" lc 7 lw 2 notitle set output set output "figures/fig-4b.pdf" set xrange[0:40] set yrange[0:6500] set grid noxtics noytics noztics front set style rect fc lt -1 fs solid 0.25 noborder set obj rect from 16, 0 to 26, 6500 set arrow from 0.1,3500 to 15.9,3500 heads set arrow from 16.1,3500 to 25.9,3500 heads set label "virtual\\nplaceholder\\nsegments" at 8,5600 center set label "actual\\nvideo\\nsegments" at 21,5600 center plot "tmp/lowbufb.dat" lc 6 lw 2 notitle set output ''' subprocess.run('gnuplot', input = plotting1.encode('ascii')) subprocess.run('gnuplot', input = plotting2.encode('ascii')) def figure_7_10(): subfigs = [ #('12dash vod' , '12dash', ['-m', 'bbb.json' , '-b', '25']), #('3Glogs vod' , '3Glogs', ['-m', 'bbb.json' , '-b', '25']), ('4G VOD' , '4Glogs', ['-m', 'bbb4k.json', '-b', '25']), #('12dash live10', '12dash', ['-m', 'bbb.json' , '-b', '10']), #('3G LIVE 10s', '3Glogs', ['-m', 'bbb.json' , '-b', '10']), #('4Glogs live10', '4Glogs', ['-m', 'bbb4k.json', '-b', '10']), #('12dash live5' , '12dash', ['-m', 'bbb.json' , '-b', '5' ]), #('3Glogs live5' , '3Glogs', ['-m', 'bbb.json' , '-b', '5' ]), #('4Glogs live5' , '4Glogs', ['-m', 'bbb4k.json', '-b', '5' ]), ] metrics = [ #('rebuffer' , 'rebuffer ratio'), #('oscillation', 'time average bitrate change'), #('bitrate' , 'time average played bitrate'), ('reaction time' , 'rampup time', {'key': 'out top center horizontal', 'xtics': 10}), ] prefix = 'fig7a' algorithms = [ ('BOLA' , ['-ao', '-a', 'bola', '-ab'] , 'lc 7'), ('BOLA-PL', ['-ao', '-a', 'bolae', '-noibr'], 'notitle lc 6'), ] term = 'pdf size 1.8, 1.75 font ",16"' do_figure(prefix, subfigs, algorithms, metrics, term = term) prefix = 'fig7b' algorithms = [ ('BOLA' , ['-ao', '-a', 'bola', '-ab' , '-s', '120', '180'], 'notitle lc 7'), ('BOLA-PL', ['-ao', '-a', 'bolae', '-noibr', '-s', '120', '180'], 'lc 6'), ] term = 'pdf size 1.5, 1.75 font ",16"\nset ytics format ""' do_figure(prefix, subfigs, algorithms, metrics, term = term) prefix = 'fig10a' algorithms = [ ('BOLA' , ['-ao', '-a', 'bola', '-ab'], 'lc 4'), ('TPUT' , ['-ao', '-a', 'throughput'], 'lc 2'), ('DYNAMIC' , ['-ao', '-a', 'dynamic', '-ab'], 'notitle lc 1'), ] term = 'pdf size 1.8, 1.75 font ",16"' do_figure(prefix, subfigs, algorithms, metrics, term = term) prefix = 'fig10b' algorithms = [ ('BOLA' , ['-ao', '-a', 'bola', '-ab', '-s', '120', '180'], 'notitle lc 4'), ('TPUT' , ['-ao', '-a', 'throughput', '-s', '120', '180'], 'notitle lc 2'), ('DYNAMIC' , ['-ao', '-a', 'dynamic', '-ab', '-s', '120', '180'], 'lc 1'), ] term = 'pdf size 1.5, 1.75 font ",16"\nset ytics format ""' do_figure(prefix, subfigs, algorithms, metrics, term = term) def figure8(): prefix = 'fig8' algorithms = [ ('BOLA', ['-a', 'bola', '-ao', '-ab'], ' lc 4'), ('BOLA-PL', ['-a', 'bolae', '-ao', '-noibr'], ' lc 7'), ('BOLA-E' , ['-a', 'bolae', '-ao' ], ' lc 6'), ] metrics = [ ('rebuffer ratio' , 'rebuffer ratio', {'xtics' : 0.1}), ('average bitrate oscillation', 'time average bitrate change', {'xtics': 150}), ('average bitrate' , 'time average played bitrate', {'xtics': 500}), ] subfigs = [ ('3G Live 10s' , '3Glogs', ['-m', 'bbb.json' , '-b', '10'], [0.6, 600, 2000]), ] do_figure(prefix, subfigs, algorithms, metrics) # metrics = [ # ('rebuffer ratio' , 'rebuffer ratio'), # ('average bitrate oscillation', 'time average bitrate change', {'xtics': 500}), # ('average bitrate' , 'time average played bitrate', {'xtics': 10000}), # ] # # subfigs = [ # ('4G Live 10s', '4Glogs', ['-m', 'bbb4k.json', '-b', '10']), # ('4G VOD', '4Glogs', ['-m', 'bbb4k.json', '-b', '25']), # ] # # do_figure(prefix, subfigs, algorithms, metrics) def figure11(): prefix = 'fig11' subfigs = [ #('12dash vod' , '12dash', ['-m', 'bbb.json' , '-b', '25']), #('3Glogs vod' , '3Glogs', ['-m', 'bbb.json' , '-b', '25']), ('4G VOD' , '4Glogs', ['-m', 'bbb4k.json', '-b', '25'], [0.1, 2200, 34000]), #('12dash live10', '12dash', ['-m', 'bbb.json' , '-b', '10']), #('3Glogs live10', '3Glogs', ['-m', 'bbb.json' , '-b', '10']), ('4G Live 10s', '4Glogs', ['-m', 'bbb4k.json', '-b', '10'], [0.1, 4600, 31500]), #('12dash live5' , '12dash', ['-m', 'bbb.json' , '-b', '5' ]), #('3Glogs live5' , '3Glogs', ['-m', 'bbb.json' , '-b', '5' ]), #('4Glogs live5' , '4Glogs', ['-m', 'bbb4k.json', '-b', '5' ], [0.1, 4000, 35000]), ] algorithms = [ #('BOLA-E' , ['-ao', '-a', 'bolae' ], 'lc 7'), #('DYNAMIC-DASH' , ['-ao', '-a', 'dynamicdash'], 'lc 1'), ('BOLA' , ['-ao', '-a', 'bola' ], 'lc 4'), ('THROUGHPUT' , ['-ao', '-a', 'throughput'], 'lc 2'), ('DYNAMIC' , ['-ao', '-a', 'dynamic' ], 'lc 1'), ] metrics = [ ('rebuffer ratio' , 'rebuffer ratio'), ('average bitrate oscillation', 'time average bitrate change', {'xtics': 1000, 'key': 'bottom right font ",12"'}), ('average bitrate' , 'time average played bitrate', {'xtics': 10000, 'key': 'bottom right font ",12"'}), ] do_figure(prefix, subfigs, algorithms, metrics) def figure_12_13(): prefix = '12_13' subfigs = [ ('FCC SD', 'sd_fs', ['-m', 'bbb.json' , '-b', '25'], [0.01, 450, 4500, 120]), ] algorithms = [ ('BOLA-E' , ['-ao', '-r', 'none', '-a', 'bolae' , '-rmp', '9', '-ml', '180'], 'lc 7'), ('BOLA-E-FS' , ['-ao', '-r', 'left', '-a', 'bolae' , '-rmp', '9', '-ml', '180'], 'lc 1'), ('DYNAMIC' , ['-ao', '-r', 'none', '-a', 'dynamic', '-rmp', '9', '-ml', '180'], 'notitle lc 3'), ('DYNAMIC-FS', ['-ao', '-r', 'left', '-a', 'dynamic', '-rmp', '9', '-ml', '180'], 'notitle lc 4'), ] metrics = [ ('rebuffer ratio' , 'rebuffer ratio'), ('average bitrate oscillation' , 'time average bitrate change', {'xtics': 150}), ('average bitrate' , 'time average played bitrate', {'key': 'top left reverse Left font ",12"', 'xtics': 1500}), ('reaction time', 'rampup time', {'xoffset': -60, 'key': 'out top center vertical', 'xtics': 40}), ] term = 'pdf size 1.8, 1.75 font ",16"\n' do_figure(prefix, subfigs, algorithms, metrics, term = [None, None, None, term]) prefix = '12_13' subfigs = [ ('FCC HD', 'hd_fs', ['-m', 'bbb4k.json', '-b', '25'], [0.01, 1200, 12000, 120]), ] algorithms = [ ('BOLA-E' , ['-ao', '-r', 'none', '-a', 'bolae' , '-rmp', '4', '-ml', '180'], 'notitle lc 7'), ('BOLA-E-FS' , ['-ao', '-r', 'left', '-a', 'bolae' , '-rmp', '4', '-ml', '180'], 'notitle lc 1'), ('DYNAMIC' , ['-ao', '-r', 'none', '-a', 'dynamic', '-rmp', '4', '-ml', '180'], 'lc 3'), ('DYNAMIC-FS', ['-ao', '-r', 'left', '-a', 'dynamic', '-rmp', '4', '-ml', '180'], 'lc 4'), ] metrics = [ ('rebuffer ratio' , 'rebuffer ratio'), ('average bitrate oscillation' , 'time average bitrate change', {'xtics': 400}), ('average bitrate' , 'time average played bitrate', {'key': 'top left reverse Left font ",12"', 'xtics': 4000}), ('reaction time', 'rampup time', {'xoffset': -60, 'key': 'out top center vertical', 'xtics': 40}), ] term = 'pdf size 1.5, 1.75 font ",16"\nset ytics format ""' do_figure(prefix, subfigs, algorithms, metrics, term = [None, None, None, term]) if __name__ == '__main__': bbb = load_json('bbb.json') bbb4k = load_json('bbb4k.json') os.makedirs('tmp', exist_ok = True) os.makedirs('figures', exist_ok = True) os.makedirs('stats', exist_ok = True) figure6a() figure6b() figure_1_4() figure_7_10() figure8() figure11() figure_12_13()
ros2_facelook_node.py
# Copyright 2018 Robert Adams # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import queue import threading import time import sys import rclpy from rclpy.node import Node from rclpy.parameter import Parameter from std_msgs.msg import Int32MultiArray from std_msgs.msg import MultiArrayDimension from ros2_adafruit_pwmhat_msgs.msg import PWMPinAngle, PWMAngle class ROS2_facelook_node(Node): def __init__(self): super().__init__('ros2_facelook_node', namespace='raspicam') self.set_parameter_defaults( [ ('bounding_box_topic', Parameter.Type.STRING, 'found_faces'), ('pwm_topic', Parameter.Type.STRING, '/pwmhatter/angle'), ('angle_step', Parameter.Type.DOUBLE, 1.0), ('delta_magnification', Parameter.Type.DOUBLE, 10.0), ('max_angle', Parameter.Type.DOUBLE, 80.0), ] ) self.initialize_pwm_publisher() self.initialize_processing_queue() self.initialize_bounding_box_subscriber() def destroy_node(self): # overlay Node function called when class is being stopped and camera needs closing super().destroy_node() def initialize_bounding_box_subscriber(self): # Setup subscription for incoming bounding box info self.receiver = self.create_subscription(Int32MultiArray, self.get_parameter_value('bounding_box_topic'), self.receive_bounding_box) def initialize_processing_queue(self): # Create a queue and a thread that processes messages in the queue self.queue_lock = threading.Lock() self.bbox_queue = queue.Queue() # self.bbox_queue = queue.SimpleQueue() # introduced in Python 3.7 # thread to read images placed in the queue and process them self.processor_event = threading.Event() self.processor = threading.Thread(target=self.process_bounding_boxes, name='bounding box') self.processor.start() def initialize_pwm_publisher(self): # initialize 'known' angle so first request will be sure to go out self.pan_angle = 10000 self.tilt_angle = 10000 self.pwmmer = PWMmer(self, self.get_parameter_value('pwm_topic'), -self.get_parameter_value('max_angle'), self.get_parameter_value('max_angle'), self.get_logger()) self.send_pwm_commands(self.pan_angle, self.tilt_angle) def stop_workers(self): # if workers are initialized and running, tell them to stop and wait until stopped if hasattr(self, 'processor_event') and self.processor_event != None: self.processor_event.set() if hasattr(self, 'processor') and self.processor.is_alive(): self.processor.join() def receive_bounding_box(self, msg): if type(msg) != type(None) and hasattr(msg, 'data'): self.get_logger().debug('FLooker: receive_bbox. dataLen=%s' % (len(msg.data))) self.bbox_queue.put(msg) else: self.get_logger().error('FLooker: receive_bbox. no data attribute') def process_bounding_boxes(self): # Take bounding boxes from the queue and send angle commands to the camera # Initialize camera position self.get_logger().debug('FLooker: Initializing camera to 0,0') self.send_pwm_commands(0, 0) # Loop for each bounding box info and update the camera movement while True: if self.processor_event.is_set(): break try: msg = self.bbox_queue.get(block=True, timeout=2) except queue.Empty: msg = None if self.processor_event.is_set(): break if type(msg) != type(None): # Bounding boxes come in a two dimensional array: # Row 0 => ( 0, 0, imageAreaWidth, imageAreaHeight) # Row n => ( bb_right, bb_top, bb_width, bb_height ) bboxes = AccessInt32MultiArray(msg) width = bboxes.get(0, 2) widthhalf = width / 2 height = bboxes.get(0, 3) heighthalf = height / 2 self.get_logger().debug('FLooker: process_bounding_boxes. image=%s/%s' % (width, height) ) # loop over all bounding boxes and computer the average center wcenter = 0 wheight = 0 hcenter = 0 hwidth = 0 for ii in range(1, bboxes.rows): wcenter = wcenter + ((bboxes.get(ii, 2) - bboxes.get(ii, 0)) / 2) + bboxes.get(ii,0) wheight = wheight + bboxes.get(ii,3) hcenter = hcenter + ((bboxes.get(ii, 3) - bboxes.get(ii, 1)) / 2) + bboxes.get(ii,1) hwidth = hwidth + bboxes.get(ii,2) waverage = wcenter / ( bboxes.rows - 1) # average horizontal center of all boxes wheight = wheight / ( bboxes.rows - 1) # average height of all boxes haverage = hcenter / ( bboxes.rows - 1) # average vertical center of all boxes hwidth = hwidth / ( bboxes.rows - 1) # average width of all boxes self.get_logger().debug('FLooker: process_bounding_boxes. averageCenter=%s/%s, averageSize=%s/%s' % (waverage, haverage, hwidth, wheight) ) # positive deltas mean above the middle and negative deltas mean below the middle wdelta = (width / 2) - waverage hdelta = (height / 2) - haverage self.get_logger().debug('FLooker: process_bounding_boxes. deltas=%s/%s' % (wdelta, hdelta) ) if (wdelta <= -widthhalf or wdelta >= widthhalf or hdelta <= -heighthalf or hdelta >= heighthalf): self.get_logger().error('FLooker: deltas wrong! dim=%s/%s, avg=%s/%s, delta=%s/%s' % ( width, height, waverage, haverage, wdelta, hdelta) ) else: target_pan_angle = (self.pan_angle + (self.get_parameter_value('angle_step') * self.sign(wdelta) * abs(wdelta) / self.get_parameter_value('delta_magnification') ) ) target_tilt_angle = (self.tilt_angle - (self.get_parameter_value('angle_step') * self.sign(hdelta) * abs(hdelta) / self.get_parameter_value('delta_magnification') ) ) self.send_pwm_commands(target_pan_angle, target_tilt_angle) def send_pwm_commands(self, target_pan_angle, target_tilt_angle): # Send command to PWM channels if the desired angle has changed. # Note: uses and updates self.pan_angle and self.tilt_angle. if target_pan_angle != self.pan_angle: if self.pwmmer.setPWM('pan', target_pan_angle): self.get_logger().debug('FLooker: sending chan=%s, angle=%s' % ('pan', target_pan_angle)) self.pan_angle = target_pan_angle else: self.get_logger().error('FLooker: target pan angle failed! targets=%s/%s' % (target_pan_angle, target_tilt_angle) ) if target_tilt_angle != self.tilt_angle: if self.pwmmer.setPWM('tilt', target_tilt_angle): self.get_logger().debug('FLooker: sending chan=%s, angle=%s' % ('tilt', target_tilt_angle)) self.tilt_angle = target_tilt_angle else: self.get_logger().error('FLooker: target tilt angle failed! targets=%s/%s' % (target_pan_angle, target_tilt_angle) ) def get_parameter_or(self, param, default): # Helper function to return value of a parameter or a default if not set ret = None param_desc = self.get_parameter(param) if param_desc.type_== Parameter.Type.NOT_SET: ret = default else: ret = param_desc.value return ret def get_parameter_value(self, param): # Helper function to return value of a parameter ret = None param_desc = self.get_parameter(param) if param_desc.type_== Parameter.Type.NOT_SET: raise Exception('Fetch of parameter that does not exist: ' + param) else: ret = param_desc.value return ret def set_parameter_defaults(self, params): # If a parameter has not been set externally, set the value to a default. # Passed a list of "(parameterName, parameterType, defaultValue)" tuples. parameters_to_set = [] for (pparam, ptype, pdefault) in params: if not self.has_parameter(pparam): parameters_to_set.append( Parameter(pparam, ptype, pdefault) ) if len(parameters_to_set) > 0: self.set_parameters(parameters_to_set) def has_parameter(self, param): # Return 'True' if a parameter by that name is specified param_desc = self.get_parameter(param) if param_desc.type_== Parameter.Type.NOT_SET: return False return True def sign(self, val): # Helper function that returns the sign of the passed value (1 or -1). # Defined here so we don't have to require numpy. return 1 if val >= 0 else -1 class PWMmer: # Small class to hold current state of PWM channel def __init__(self, node, topic, minVal, maxVal, logger=None): self.node = node self.topic = topic self.minVal = minVal self.maxVal = maxVal self.logger = logger self.channels = {} self.logger.debug('PWMmer: init: topic=%s, min=%s, max=%s' % (topic, str(minVal), str(maxVal))) self.publisher = self.node.create_publisher(PWMAngle, topic) def setPWM(self, channel, angle): # Send the message to set the given PWM channel ret = True if not channel in self.channels: self.channels[channel] = self.minVal - 1000 if angle != self.channels[channel]: if angle >= self.maxVal or angle <= self.minVal: self.logger.error('PWMmer: angle out of range. channel=%s, angle=%s' % (channel, angle) ) ret = False else: msg = PWMAngle() msg.chan = str(channel) msg.angle = float(angle) msg.angle_units = PWMAngle.DEGREES self.publisher.publish(msg) self.channels[channel] = angle ret = True return ret class AccessInt32MultiArray: # Wrap a multi-access array with functions for 2D access def __init__(self, arr): self.arr = arr self.columns = self.ma_get_size_from_label('width') self.rows = self.ma_get_size_from_label('height') def rows(self): # return the number of rows in the multi-array return self.rows def get(self, row, col): # return the entry at column 'ww' and row 'hh' return self.arr.data[col + ( row * self.columns)] def ma_get_size_from_label(self, label): # Return dimension size for passed label (usually 'width' or 'height') for mad in self.arr.layout.dim: if mad.label == label: return int(mad.size) return 0 class CodeTimer: # A little helper class for timing blocks of code def __init__(self, logger, name=None): self.logger = logger self.name = " '" + name + "'" if name else '' def __enter__(self): self.start = time.clock() def __exit__(self, exc_type, exc_value, traceback): self.took = (time.clock() - self.start) * 1000.0 self.logger('Code block' + self.name + ' took: ' + str(self.took) + ' ms') def main(args=None): rclpy.init(args=args) ffNode = ROS2_facelook_node() try: rclpy.spin(ffNode) except KeyboardInterrupt: ffNode.get_logger().info('FLooker: Keyboard interrupt') ffNode.stop_workers() ffNode.destroy_node() rclpy.shutdown() if __name__ == '__main__': main()
manager.py
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import errno import traceback import socket import logging import json import collections from shadowsocks.core import common, eventloop, tcprelay, udprelay, asyncdns from shadowsocks.lib import shell BUF_SIZE = 1506 STAT_SEND_LIMIT = 50 class Manager(object): def __init__(self, config): self._config = config self._relays = {} # (tcprelay, udprelay) self._loop = eventloop.EventLoop() self._dns_resolver = asyncdns.DNSResolver() self._dns_resolver.add_to_loop(self._loop) self._statistics = collections.defaultdict(int) self._control_client_addr = None try: manager_address = common.to_str(config['manager_address']) if ':' in manager_address: addr = manager_address.rsplit(':', 1) addr = addr[0], int(addr[1]) addrs = socket.getaddrinfo(addr[0], addr[1]) if addrs: family = addrs[0][0] else: logging.error('invalid address: %s', manager_address) exit(1) else: addr = manager_address family = socket.AF_UNIX self._control_socket = socket.socket(family, socket.SOCK_DGRAM) self._control_socket.bind(addr) self._control_socket.setblocking(False) except (OSError, IOError) as e: logging.error(e) logging.error('can not bind to manager address') exit(1) self._loop.add(self._control_socket, eventloop.POLL_IN, self) self._loop.add_periodic(self.handle_periodic) port_password = config['port_password'] del config['port_password'] for port, password in port_password.items(): a_config = config.copy() a_config['server_port'] = int(port) a_config['password'] = password self.add_port(a_config) def add_port(self, config): port = int(config['server_port']) servers = self._relays.get(port, None) if servers: logging.error("server already exists at %s:%d" % (config['server'], port)) return logging.info("adding server at %s:%d" % (config['server'], port)) t = tcprelay.TCPRelay(config, self._dns_resolver, False, stat_callback=self.stat_callback) u = udprelay.UDPRelay(config, self._dns_resolver, False, stat_callback=self.stat_callback) t.add_to_loop(self._loop) u.add_to_loop(self._loop) self._relays[port] = (t, u) def remove_port(self, config): port = int(config['server_port']) servers = self._relays.get(port, None) if servers: logging.info("removing server at %s:%d" % (config['server'], port)) t, u = servers t.close(next_tick=False) u.close(next_tick=False) del self._relays[port] else: logging.error("server not exist at %s:%d" % (config['server'], port)) def handle_event(self, sock, fd, event): if sock == self._control_socket and event == eventloop.POLL_IN: data, self._control_client_addr = sock.recvfrom(BUF_SIZE) parsed = self._parse_command(data) if parsed: command, config = parsed a_config = self._config.copy() if config: # let the command override the configuration file a_config.update(config) if 'server_port' not in a_config: logging.error('can not find server_port in config') else: if command == 'add': self.add_port(a_config) self._send_control_data(b'ok') elif command == 'remove': self.remove_port(a_config) self._send_control_data(b'ok') elif command == 'ping': self._send_control_data(b'pong') else: logging.error('unknown command %s', command) def _parse_command(self, data): # commands: # add: {"server_port": 8000, "password": "foobar"} # remove: {"server_port": 8000"} data = common.to_str(data) parts = data.split(':', 1) if len(parts) < 2: return data, None command, config_json = parts try: config = shell.parse_json_in_str(config_json) return command, config except Exception as e: logging.error(e) return None def stat_callback(self, port, data_len): self._statistics[port] += data_len def handle_periodic(self): r = {} i = 0 def send_data(data_dict): if data_dict: # use compact JSON format (without space) data = common.to_bytes(json.dumps(data_dict, separators=(',', ':'))) self._send_control_data(b'stat: ' + data) for k, v in self._statistics.items(): r[k] = v i += 1 # split the data into segments that fit in UDP packets if i >= STAT_SEND_LIMIT: send_data(r) r.clear() i = 0 if len(r) > 0 : send_data(r) self._statistics.clear() def _send_control_data(self, data): if self._control_client_addr: try: self._control_socket.sendto(data, self._control_client_addr) except (socket.error, OSError, IOError) as e: error_no = eventloop.errno_from_exception(e) if error_no in (errno.EAGAIN, errno.EINPROGRESS, errno.EWOULDBLOCK): return else: shell.print_exception(e) if self._config['verbose']: traceback.print_exc() def run(self): self._loop.run() def run(config): Manager(config).run() def test(): import time import threading import struct from shadowsocks.core import encrypt logging.basicConfig(level=5, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') enc = [] eventloop.TIMEOUT_PRECISION = 1 def run_server(): shell.parse_args() config = shell.parse_config(True) config = config.copy() a_config = { 'server': '127.0.0.1', 'local_port': 1081, 'port_password': { '8381': 'foobar1', '8382': 'foobar2' }, 'method': 'aes-256-cfb', 'manager_address': '127.0.0.1:6001', 'timeout': 60, 'fast_open': False, 'verbose': 2 } config.update(a_config) manager = Manager(config) enc.append(manager) manager.run() t = threading.Thread(target=run_server) t.start() time.sleep(1) manager = enc[0] cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) cli.connect(('127.0.0.1', 6001)) # test add and remove time.sleep(1) cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}') time.sleep(1) assert 7001 in manager._relays data, addr = cli.recvfrom(1506) assert b'ok' in data cli.send(b'remove: {"server_port":8381}') time.sleep(1) assert 8381 not in manager._relays data, addr = cli.recvfrom(1506) assert b'ok' in data logging.info('add and remove test passed') # test statistics for TCP header = common.pack_addr(b'google.com') + struct.pack('>H', 80) data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1, header + b'GET /\r\n\r\n') tcp_cli = socket.socket() tcp_cli.connect(('127.0.0.1', 7001)) tcp_cli.send(data) tcp_cli.recv(4096) tcp_cli.close() data, addr = cli.recvfrom(1506) data = common.to_str(data) assert data.startswith('stat: ') data = data.split('stat:')[1] stats = shell.parse_json_in_str(data) assert '7001' in stats logging.info('TCP statistics test passed') # test statistics for UDP header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80) data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1, header + b'test') udp_cli = socket.socket(type=socket.SOCK_DGRAM) udp_cli.sendto(data, ('127.0.0.1', 8382)) tcp_cli.close() data, addr = cli.recvfrom(1506) data = common.to_str(data) assert data.startswith('stat: ') data = data.split('stat:')[1] stats = json.loads(data) assert '8382' in stats logging.info('UDP statistics test passed') manager._loop.stop() t.join() if __name__ == '__main__': test()
config.py
# ================================= # # --------------------------------- # # -- Dunkmania101's Qtile Config -- # # --------------------------------- # # ================================= # import os, subprocess, imaplib, re #, gi #gi.require_version("Gdk", "3.0") #from gi.repository import Gdk from json import loads as jloads #, dumps as jdumps from time import sleep from shutil import which from threading import Thread from random import choice from libqtile import qtile, layout, hook, bar, widget, extension from libqtile.backend.x11.core import get_keys # from libqtile.backend.wayland.core import keyboard as wl_kbd from libqtile.config import Key, KeyChord, Drag, Screen, Match, Group, ScratchPad, DropDown #, Click from libqtile.widget import base as widget_base from libqtile.lazy import lazy # --------------------- # # -- Basic Functions -- # # --------------------- # def shcmd_exists(cmd): return which(cmd) is not None def sub_run_cmd(cmd, cwd): try: subprocess.run(cmd, cwd=cwd, shell=True) except Exception as e: print(str(e)) pass def run_cmd(cmd, cwd=None, thread=True): if thread: Thread(target=sub_run_cmd, args=(cmd, cwd,)).start() else: sub_run_cmd(cmd, cwd) def get_cmd_output(cmd, cwd=None): output = "" try: output = str(subprocess.run(cmd, cwd=cwd, shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8')) except Exception as e: print(str(e)) pass return output def exec_func_no_qtile(_, func, args): if callable(func): func(*args) def is_wayland(): return qtile.core.name == "wayland" def gen_jgmenu_cmd(fmt="uxterm"): return f"echo \'{fmt}\' | jgmenu --simple" # -------------------- # # -- Base Variables -- # # -------------------- # cfg_dir = os.path.expanduser("~/.config/qtile") #run_cmd(cfg_dir + "/autostart.sh") rofi_dir = cfg_dir + "/rofi" env_file = os.path.expanduser("~/.private/data/env.json") env_data = {} if os.path.isfile(env_file): with open(env_file, "r") as f: try: env_data = jloads(f.read()) except: pass # Themes my_font = "Iosevka" my_term_font = "Iosevka Term" # One Dark #bg_color = "#282c34" #fg_color = "#5c6370" #dark_bg_color = "#222222" #bg_line_color = "#5c6370" #fg_line_color = "#61afef" #bg_line_color_alt = "#504d4d" #fg_line_color_alt = "#4b5263" #bg_txt_color = "#abb2bf" #fg_txt_color = "#61afef" #green_color = "#504945" # Gruvbox bg_color = "#1B2229" fg_color = "#4f443a" dark_bg_color = "#222222" bg_line_color = "#3c3836" fg_line_color = "#4f4347" bg_line_color_alt = "#aa5000" fg_line_color_alt = "#ff8000" bg_txt_color = "#3c3836" fg_txt_color = "#ebdbb2" green_color = "#687b01" red_color = "#cc241d" # Base Groups (copied to each monitor) my_base_groups = "~ 1 2 3 4 5 6 7 8 9 0 - =".split(" ") # Screen to put the systray widget on my_systray_screen = 0 # Gap and border sizes my_border_width = 4 my_margin = 1 # Directories my_wallpapers = os.path.expanduser("~/Wallpapers") # Can point to a directory or a single image file. my_screenshots_dir = os.path.expanduser("~/Screenshots") # Details my_distro = "Guix" my_check_updates_cmd = "" #if shcmd_exists("pip"): # my_check_updates_cmd += "; pip list --outdated --format=freeze" if shcmd_exists("paru"): my_check_updates_cmd += "; paru --query --upgrades" if shcmd_exists("guix"): my_check_updates_cmd += "; guix refresh" if is_wayland(): my_get_monitors_cmd = "wlr-randr" else: my_get_monitors_cmd = "xrandr --query | grep \" connected\" | cut -d\" \" -f1" my_mouse_move_cmd = "xdotool mousemove_relative -- " my_mouse_move_dist = "10" my_mouse_click_cmd = "xdotool click " my_gmail_username = env_data.get("gmail.username", "") my_gmail_pass = env_data.get("gmail.pass", "") # Applications #my_terminal = "kitty -e tmux" #my_terminal_tmux = f"kitty -e \'{cfg_dir}/scripts/run/run-tmux-session.sh\'" my_terminal = "kitty" #my_terminal = f"uxterm -si -fs 10 -fa \"{my_term_font}\" -bg \'#212121\' -bd \'#212111\'" #my_terminal_alt = "kitty" my_terminal_alt = f"uxterm -si -fs 10 -fa \"{my_term_font}\" -bg \'#212121\' -bd \'#212111\'" #my_terminal_alt = "st" #my_terminal_alt = "cool-retro-term" #my_terminal_alt = "darktile" #my_terminal_alt = "extraterm" #my_terminal_alt1 = "kitty -e tmux" #my_terminal_alt1 = "kitty" my_terminal_alt1 = "cool-retro-term" my_terminal_alt2 = "extraterm" my_terminal_alt3 = "urxvt" my_terminal_alt4 = f"uxterm -si -fa \"{my_font}\"" #my_editor = cfg_dir + "/scripts/run/run-emacs.sh" my_editor = "emacsclient -a '' -c" my_editor_alt = "neovide --multigrid" #my_editor_alt = "vscodium" # my_editor_alt = "notepadqq" my_editor_alt1 = "emacs" my_editor_alt2 = "kitty zsh -c \'source ~/.zshrc; nvim\'" my_browser = "nyxt -S" #my_browser_alt = os.path.expanduser("~/.nix-profile/bin/vivaldi") my_browser_alt = "firefox-developer-edition" #my_browser_alt = "vivaldi" #my_browser_alt = "vivaldi-stable" my_browser_alt1 = "firedragon" my_browser_alt2 = "qutebrowser" # my_browser_alt2 = "min" # my_browser_alt2 = "luakit" my_browser_alt3 = "brave-beta" my_private_browser = "nyxt --data-profile nosave" #my_private_browser_alt = "vivaldi --incognito" my_private_browser_alt = "vivaldi-stable --incognito" my_browser_profile_menu = rofi_dir + "/nyxt_profile_menu/nyxt_profile_menu.sh" my_file_manager = "pcmanfm" my_file_manager_alt = "filezilla" my_file_manager_alt1 = "thunar" #my_mp = "deadbeef" #my_mp = "kawaii-player" #my_mp = "lollypop" #my_mp = "celluloid" my_mp = rofi_dir + "/mpvselect/mpvselect.sh" my_mp_alt = rofi_dir + "/ytfzf/ytfzf.sh --savefile" my_mp_alt1 = rofi_dir + "/ytfzf/ytfzf.sh" #my_mp_alt1 = rofi_dir + "/notflix/notflix.sh" my_mp_alt2 = "freetube" #my_mp_alt = "motionbox" #my_mp_alt = "freetube" #my_mp_alt = "vlc" #my_mp_private = rofi_dir + "/mpvselect/mpvselect.sh" my_mp_private = rofi_dir + "/mpvselect/mpvselect.sh --nosave" my_package_manager = "pamac-manager" my_package_manager_alt = "pamac-manager" my_calculator = "qalculate-gtk" my_calculator_alt = "qalculate-gtk" my_control_panel = my_terminal + " -e btop" #my_control_panel = "kitty -e btop" my_control_panel_alt = "stacer" my_audio_mixer = my_terminal + " -e pulsemixer" my_audio_mixer_alt = "easyeffects" my_audio_mixer_alt1 = my_terminal + " -e alsamixer" # Menus (Rofi Scripts, etc...) my_launcher = rofi_dir + "/launcher/launcher.sh" my_launcher_alt = "jgmenu" my_clipmenu = rofi_dir + "/clipmenu/clipmenu.sh" my_clipmenu_alt = "copyq toggle" my_powermenu = rofi_dir + "/powermenu/powermenu.sh" my_handy = rofi_dir + "/handy/handy.sh" my_window_pager = rofi_dir + "/window/window.sh" my_player_ctrl = rofi_dir + "/player/player.sh" my_workspaces = rofi_dir + "/workspaces/workspaces.sh" my_emoji = rofi_dir + "/emoji/emoji.sh" my_window_killer = f"{my_terminal} -e xkill" # ---------- # # -- Keys -- # # ---------- # left = "Left" right = "Right" up = "Up" down = "Down" sup = "mod4" alt = "mod1" ctrl = "control" shift = "shift" space = "space" ret = "Return" tab = "Tab" grave = "grave" semicolon = "semicolon" apostrophe = "apostrophe" period = "period" minus = "minus" equal = "equal" # quote = "quoteright" monitors = get_cmd_output(my_get_monitors_cmd).split("\n") #gdkdsp = Gdk.Screen.get_default() #monitors = [gdkdsp.get_monitor_plug_name(i) for i in range(gdkdsp.get_n_monitors())] def take_screenshot(cmd="scrot", cwd=my_screenshots_dir): if not os.path.isdir(cwd): os.makedirs(cwd) run_cmd(cmd, cwd) def run_keysboard(start=True): if start: run_cmd(cfg_dir + "/scripts/run/run-keysboard.sh") else: run_cmd('tmux kill-session -t keysboard-bar; rm -f /tmp/tmux-bar-keysboard-pipe') def run_kmonad(start=True): if start: run_cmd(cfg_dir + "/scripts/run/run-kmonad.sh") else: run_cmd('tmux kill-session -t kmonad-bar; rm -f /tmp/tmux-bar-kmonad-pipe') #def run_plank(start=True): # if start: # for _ in monitors: # run_cmd(f"plank") # else: # run_cmd("killall -q plank") # ------------------------------ # # -- Binds & Functions Galore -- # # ------------------------------ # def get_full_group_name(screen_name, base_name): return f"{screen_name}:{base_name}" def get_full_group_names_for_screen(i): return [get_full_group_name(i, g) for g in my_base_groups] def get_current_screen_index(qtile): return qtile.screens.index(qtile.current_screen) def get_screen_by_offset(qtile, offset=1): return (get_current_screen_index(qtile) + offset) % len(qtile.screens) def get_current_group_index(qtile): return qtile.groups.index(qtile.current_group) def get_group_obj_by_name(qtile, g): return qtile.groups_map.get(g) def get_current_group_index_on_current_screen(qtile): return get_current_group_index(qtile) - ((len(qtile.screens) * len(my_base_groups))) def get_group_on_current_screen(qtile, g): return get_full_group_name(get_current_screen_index(qtile), g) def get_group_index_on_current_screen_by_offset(qtile, offset=1): return ((get_current_group_index_on_current_screen(qtile) + offset) % len(my_base_groups)) + (len(my_base_groups) * get_current_screen_index(qtile)) def get_group_on_current_screen_by_offset(qtile, offset=1): return qtile.groups[get_group_index_on_current_screen_by_offset(qtile, offset)] def set_screen(qtile, screen, move_focus=True, move_window=True): if move_window: qtile.current_window.cmd_toscreen(screen) if move_focus: qtile.cmd_to_screen(screen) def cycle_screen(qtile, offset=1, move_focus=True, move_window=True): set_screen(qtile, get_screen_by_offset(qtile, offset), move_focus, move_window) def set_current_screen_group(qtile, g, toggle=True): if toggle: qtile.current_screen.cmd_toggle_group(g) else: qtile.current_screen.set_group(get_group_obj_by_name(qtile, g)) def set_current_screen_group_on_current_screen(qtile, g, toggle=True): set_current_screen_group(qtile, get_group_on_current_screen(qtile, g), toggle) def set_current_screen_group_on_current_screen_no_toggle(qtile, g): set_current_screen_group_on_current_screen(qtile, g, toggle=False) def set_current_screen_group_by_offset(qtile, offset=1): set_current_screen_group(qtile, get_group_on_current_screen_by_offset(qtile, offset).name) def send_current_win_to_group(qtile, g, switch_group=True): qtile.current_window.togroup(g, switch_group=switch_group) def send_current_win_to_group_on_current_screen_switch(qtile, g): send_current_win_to_group(qtile, get_group_on_current_screen(qtile, g)) def send_current_win_to_group_on_current_screen_noswitch(qtile, g): send_current_win_to_group(qtile, get_group_on_current_screen(qtile, g), False) def win_cycle_group_next_switch(qtile): send_current_win_to_group(qtile, get_group_on_current_screen_by_offset(qtile).name, switch_group=True) def win_cycle_group_prev_switch(qtile): send_current_win_to_group(qtile, get_group_on_current_screen_by_offset(qtile, -1).name, switch_group=True) def win_cycle_group_next_noswitch(qtile): send_current_win_to_group(qtile, get_group_on_current_screen_by_offset(qtile).name, switch_group=False) def win_cycle_group_prev_noswitch(qtile): send_current_win_to_group(qtile, get_group_on_current_screen_by_offset(qtile, -1).name, switch_group=False) def cycle_screen_next(qtile): cycle_screen(qtile, 1, True, False) def cycle_screen_prev(qtile): cycle_screen(qtile, -1, True, False) def cycle_group_next(qtile): set_current_screen_group_by_offset(qtile) def cycle_group_prev(qtile): set_current_screen_group_by_offset(qtile, -1) def win_cycle_screen_next_switch(qtile): cycle_screen(qtile, 1, True, True) def win_cycle_screen_prev_switch(qtile): cycle_screen(qtile, -1, True, True) def win_cycle_screen_next_noswitch(qtile): cycle_screen(qtile, 1, False, True) def win_cycle_screen_prev_noswitch(qtile): cycle_screen(qtile, -1, False, True) # ---------- keys = [ # Menus Key([sup], space, lazy.spawn(my_launcher)), Key([sup, shift], space, lazy.spawn(my_launcher_alt)), Key([sup], tab, lazy.spawn(my_window_pager)), Key([sup, shift], tab, lazy.run_extension( extension.WindowList ( foreground=fg_color, background=bg_color, selected_foreground=fg_txt_color, selected_background=bg_txt_color ) )), Key([sup], "v", lazy.spawn(my_clipmenu)), Key([sup, shift], "v", lazy.spawn(my_clipmenu_alt)), Key([sup], "q", lazy.spawn(my_powermenu)), Key([sup], "p", lazy.spawn(my_player_ctrl)), Key([sup], "y", lazy.spawn(my_workspaces)), Key([sup], "r", lazy.spawn(my_handy)), Key([sup], "i", lazy.spawn(my_emoji)), # Window / Layout Management Key([sup], "f", lazy.window.toggle_fullscreen()), Key([sup], "t", lazy.window.toggle_floating()), Key([sup], "F4", lazy.window.kill()), Key([sup, shift], "F4", lazy.spawn(my_window_killer)), Key([sup, shift], "q", lazy.window.kill()), Key([sup], "j", lazy.layout.down()), Key([sup], "k", lazy.layout.up()), Key([sup], "h", lazy.layout.left()), Key([sup], "l", lazy.layout.right()), Key([sup, alt], "j", lazy.layout.shuffle_down()), Key([sup, alt], "k", lazy.layout.shuffle_up()), Key([sup, alt], "h", lazy.layout.shuffle_left()), Key([sup, alt], "l", lazy.layout.shuffle_right()), Key([sup, shift], "j", lazy.layout.grow_down()), Key([sup, shift], "k", lazy.layout.grow_up()), Key([sup, shift], "h", lazy.layout.grow_left()), Key([sup, shift], "l", lazy.layout.grow_right()), Key([sup, alt, shift], "h", lazy.layout.swap_column_left()), Key([sup, alt, shift], "l", lazy.layout.swap_column_right()), Key([sup], "g", lazy.layout.toggle_split()), Key([sup, shift], "g", lazy.layout.normalize()), # Key([sup], left, lazy.layout.shrink_main()), # Key([sup], right, lazy.layout.grow_main()), # Key([sup], down, lazy.layout.down()), # Key([sup], up, lazy.layout.up()), # Key([sup, shift], down, lazy.layout.shuffle_down()), # Key([sup, shift], up, lazy.layout.shuffle_up()), # Key([sup], "h", lazy.layout.shrink_main()), # Key([sup], "l", lazy.layout.grow_main()), # Key([sup], "j", lazy.layout.down()), # Key([sup], "k", lazy.layout.up()), # Key([sup, shift], "j", lazy.layout.shuffle_down()), # Key([sup, shift], "k", lazy.layout.shuffle_up()), # Groups # Key([sup], "n", lazy.screen.prev_group()), # Key([sup], "m", lazy.screen.next_group()), Key([sup], "n", lazy.function(cycle_group_prev)), Key([sup], "m", lazy.function(cycle_group_next)), Key([sup, alt], "n", lazy.function(win_cycle_group_prev_switch)), Key([sup, alt], "m", lazy.function(win_cycle_group_next_switch)), Key([sup, shift, alt], "n", lazy.function(win_cycle_group_prev_noswitch)), Key([sup, shift, alt], "m", lazy.function(win_cycle_group_next_noswitch)), Key([sup, ctrl], "n", lazy.function(cycle_screen_prev)), Key([sup, ctrl], "m", lazy.function(cycle_screen_next)), Key([sup, ctrl, alt], "n", lazy.function(win_cycle_screen_prev_switch)), Key([sup, ctrl, alt], "m", lazy.function(win_cycle_screen_next_switch)), Key([sup, shift, ctrl, alt], "n", lazy.function(win_cycle_screen_prev_noswitch)), Key([sup, shift, ctrl, alt], "m", lazy.function(win_cycle_screen_next_noswitch)), # WM Cmds Key([sup, shift], "r", lazy.restart()), Key([sup, shift, ctrl, alt], "q", lazy.shutdown()), # Mouse Emulation Key([sup, ctrl], "h", lazy.spawn(my_mouse_move_cmd + f"-{my_mouse_move_dist} 0")), Key([sup, ctrl], "j", lazy.spawn(my_mouse_move_cmd + f"0 {my_mouse_move_dist}")), Key([sup, ctrl], "k", lazy.spawn(my_mouse_move_cmd + f"0 -{my_mouse_move_dist}")), Key([sup, ctrl], "l", lazy.spawn(my_mouse_move_cmd + f"{my_mouse_move_dist} 0")), Key([sup, ctrl], "a", lazy.spawn(my_mouse_click_cmd + "1")), # LC Key([sup, ctrl], "d", lazy.spawn(my_mouse_click_cmd + "3")), # RC Key([sup, ctrl], "x", lazy.spawn(my_mouse_click_cmd + "2")), # MC Key([sup, ctrl], "s", lazy.spawn(my_mouse_click_cmd + "5")), # WU Key([sup, ctrl], "w", lazy.spawn(my_mouse_click_cmd + "4")), # WD # Apps Key([sup], period, lazy.spawn(my_audio_mixer)), Key([sup, shift], period, lazy.spawn(my_audio_mixer_alt)), KeyChord([sup, ctrl, shift], period, [ Key([], period, lazy.spawn(my_audio_mixer)), Key([shift], period, lazy.spawn(my_audio_mixer_alt)), Key([], "1", lazy.spawn(my_audio_mixer_alt1)), ]), #Key([sup], apostrophe, lazy.function(exec_func_no_qtile, run_keysboard, [True])), Key([sup], apostrophe, lazy.function(exec_func_no_qtile, run_kmonad, [True])), #Key([sup, shift], apostrophe, lazy.function(exec_func_no_qtile, run_keysboard, [False])), Key([sup, shift], apostrophe, lazy.function(exec_func_no_qtile, run_kmonad, [False])), Key([sup], ret, lazy.spawn(my_terminal)), Key([sup, shift], ret, lazy.spawn(my_terminal_alt)), KeyChord([sup, ctrl, shift], ret, [ Key([], ret, lazy.spawn(my_terminal)), Key([shift], ret, lazy.spawn(my_terminal_alt)), Key([], "1", lazy.spawn(my_terminal_alt1)), Key([], "2", lazy.spawn(my_terminal_alt2)), Key([], "3", lazy.spawn(my_terminal_alt3)), Key([], "4", lazy.spawn(my_terminal_alt4)), ]), Key([sup], "w", lazy.spawn(my_editor)), Key([sup, shift], "w", lazy.spawn(my_editor_alt)), KeyChord([sup, ctrl, shift], "w", [ Key([], "w", lazy.spawn(my_editor)), Key([shift], "w", lazy.spawn(my_editor_alt)), Key([], "1", lazy.spawn(my_editor_alt1)), Key([], "2", lazy.spawn(my_editor_alt2)), ]), Key([sup], "b", lazy.spawn(my_browser)), Key([sup, alt], "b", lazy.spawn(my_browser_profile_menu)), Key([sup, shift], "b", lazy.spawn(my_browser_alt)), KeyChord([sup, ctrl, shift], "b", [ Key([], "b", lazy.spawn(my_browser)), Key([shift], "b", lazy.spawn(my_browser_alt)), Key([], "1", lazy.spawn(my_browser_alt1)), Key([], "2", lazy.spawn(my_browser_alt2)), Key([], "3", lazy.spawn(my_browser_alt3)), ]), Key([sup, ctrl], "b", lazy.spawn(my_private_browser)), Key([sup, ctrl, alt], "b", lazy.spawn(my_private_browser_alt)), Key([sup], "e", lazy.spawn(my_file_manager)), Key([sup, shift], "e", lazy.spawn(my_file_manager_alt)), KeyChord([sup, ctrl, shift], "e", [ Key([], "e", lazy.spawn(my_file_manager)), Key([shift], "e", lazy.spawn(my_file_manager_alt)), Key([], "1", lazy.spawn(my_file_manager_alt1)), ]), Key([sup], "x", lazy.spawn(my_mp)), Key([sup, alt], "x", lazy.spawn(my_mp_private)), Key([sup, shift], "x", lazy.spawn(my_mp_alt)), KeyChord([sup, ctrl, shift], "x", [ Key([], "x", lazy.spawn(my_mp)), Key([shift], "x", lazy.spawn(my_mp_alt)), Key([], "1", lazy.spawn(my_mp_alt1)), Key([], "2", lazy.spawn(my_mp_alt2)), ]), Key([sup], "s", lazy.spawn(my_package_manager)), Key([sup, shift], "s", lazy.spawn(my_package_manager_alt)), Key([sup], "c", lazy.spawn(my_calculator)), Key([sup, shift], "c", lazy.spawn(my_calculator_alt)), Key([sup], semicolon, lazy.spawn(my_control_panel)), Key([sup, shift], semicolon, lazy.spawn(my_control_panel_alt)), # DropDown KeyChord([sup], "d", [ Key([], ret, lazy.group['main-scratchpad'].dropdown_toggle('term')), Key([], 'x', lazy.group['main-scratchpad'].dropdown_toggle('media')), ]), # System # Key([sup, shift, ctrl], "F11", lazy.spawn("sudo hibernate-reboot")), # Key([sup, shift, ctrl], "F12", lazy.spawn("systemctl hibernate")), Key([], "Print", lazy.function(exec_func_no_qtile, take_screenshot)), # Special Keys Key([], 'XF86AudioRaiseVolume', lazy.spawn('amixer sset Master 1%+')), Key([], 'XF86AudioLowerVolume', lazy.spawn('amixer sset Master 1%-')), Key([shift], 'XF86AudioRaiseVolume', lazy.spawn('amixer sset Master 1%+')), Key([shift], 'XF86AudioLowerVolume', lazy.spawn('amixer sset Master 1%-')), Key([], 'XF86AudioMute', lazy.spawn('amixer sset Master toggle')), Key([], 'XF86AudioPause', lazy.spawn('playerctl play-pause')), Key([], 'XF86AudioPlay', lazy.spawn('playerctl play-pause')), Key([ctrl], 'XF86AudioPause', lazy.spawn('playerctl -a play-pause')), Key([ctrl], 'XF86AudioPlay', lazy.spawn('playerctl -a play-pause')), Key([], 'XF86AudioNext', lazy.spawn('playerctl position 1+')), Key([], 'XF86AudioPrev', lazy.spawn('playerctl position 1-')), Key([shift], 'XF86AudioNext', lazy.spawn('playerctl position 1+')), Key([shift], 'XF86AudioPrev', lazy.spawn('playerctl position 1-')), Key([], 'XF86MonBrightnessUp', lazy.spawn('brightnessctl set 1%+')), Key([], 'XF86MonBrightnessDown', lazy.spawn('brightnessctl set 1%-')), ] for i, g in enumerate(my_base_groups): g_key = g if g_key != "1" and not g_key in get_keys(): if i == 0: g_key = grave elif i == 11: g_key = minus elif i == 12: g_key = equal if g_key in get_keys(): keys.extend( [ Key([sup], g_key, lazy.function(set_current_screen_group_on_current_screen, g)), Key([sup, shift], g_key, lazy.function(set_current_screen_group_on_current_screen_no_toggle, g)), Key([sup, alt], g_key, lazy.function(send_current_win_to_group_on_current_screen_switch, g)), Key([sup, shift, alt], g_key, lazy.function(send_current_win_to_group_on_current_screen_noswitch, g)), ] ) mouse = [ # Drag([sup], "Button1", lazy.window.set_position(), # start=lazy.window.get_position()), # Drag([sup], "Button3", lazy.window.set_size(), # start=lazy.window.get_size()), Drag([sup], "Button1", lazy.window.set_position_floating(), start=lazy.window.get_position()), Drag([sup], "Button3", lazy.window.set_size_floating(), start=lazy.window.get_size()), ] # -------------------------------- # # -- Widgets & Screens & Groups -- # # -------------------------------- # widget_defaults = dict( font=my_font, fontsize=14, padding=2, margin=my_margin, background=[dark_bg_color, dark_bg_color], foreground=[fg_txt_color, fg_txt_color], graph_color=[fg_txt_color, fg_txt_color], fill_color=[bg_txt_color, bg_txt_color], ) class DividerWidget(widget.TextBox): def __init__(self, div_mid="|", div_padding_left=1, div_padding_right=1, **config): super().__init__(f"{' ' * div_padding_left}{div_mid}{' ' * div_padding_right}", **config) class FileReaderWidget(widget_base.ThreadPoolText): def __init__(self, msg_base="", empty_msg="No Data", read_file="", **config): self.msg_base = msg_base self.empty_msg = empty_msg self.read_file = read_file widget_base.ThreadPoolText.__init__(self, "", **config) def poll(self): msg = "" try: if os.path.isfile(self.read_file): with open(self.read_file, 'r') as f: lines = f.readlines() if len(lines) > 0: msg = str(lines[-1]) f.close() except Exception as e: msg = f"Error: {e}" finally: if msg == "": msg = self.empty_msg return self.msg_base + msg class OpenWidgetBox(widget.WidgetBox): def __init__(self, _widgets: list[widget_base._Widget] | None = None, **config): super().__init__(_widgets=_widgets, **config) Thread(target=self.wait_open, daemon=True).start() def wait_open(self): if not self.box_is_open: while not self.configured: sleep(0.1) self.cmd_toggle() class ColorGmailChecker(widget.GmailChecker): def __init__(self, clear_foreground=green_color, unseen_foreground=red_color, **config): super().__init__(**config) self.clear_foreground=clear_foreground self.unseen_foreground=unseen_foreground def poll(self): self.gmail = imaplib.IMAP4_SSL("imap.gmail.com") self.gmail.login(self.username, self.password) answer, raw_data = self.gmail.status(self.email_path, "(MESSAGES UNSEEN)") if answer == "OK": dec = raw_data[0].decode() messages = int(re.search(r"MESSAGES\s+(\d+)", dec).group(1)) unseen = int(re.search(r"UNSEEN\s+(\d+)", dec).group(1)) if unseen == 0: self.foreground = self.clear_foreground else: self.foreground = self.unseen_foreground if self.status_only_unseen: return self.display_fmt.format(unseen) else: return self.display_fmt.format(messages, unseen) else: self.foreground = self.unseen_foreground qtile.logger.exception( "GmailChecker UNKNOWN error, answer: %s, raw_data: %s", answer, raw_data ) return "UNKNOWN ERROR" def get_sys_stat_widgets(): return [ widget.Spacer(length=5), widget.TextBox("cpu:"), widget.CPUGraph( width=30, border_width=1, border_color=dark_bg_color, frequency=5, line_width=1, samples=50, ), widget.TextBox("mem:"), widget.MemoryGraph( width=30, border_width=1, border_color=dark_bg_color, line_width=1, frequency=5, ), widget.Memory( measure_mem = "G", measure_swap = "G", ), widget.Spacer(length=15), widget.TextBox("net:"), widget.Net( format = '{down} ↓↑ {up}', padding = 0 ), ] def get_widgets_1(i): widgets = [ widget.Spacer(length=15), widget.TextBox( fontsize=16, fmt='﩯', mouse_callbacks={'Button1': lambda: qtile.cmd_spawn(my_launcher)}, ), DividerWidget(), OpenWidgetBox( widgets=[ widget.GroupBox( border_width=my_border_width, disable_drag=True, rounded=True, active=[fg_txt_color, fg_txt_color], inactive=[bg_txt_color, bg_txt_color], highlight_method="line", this_current_screen_border=fg_line_color_alt, this_screen_border=bg_line_color_alt, highlight_color=[fg_color, fg_color], visible_groups=get_full_group_names_for_screen(i), spacing=0, ), ], ), DividerWidget(), # widget.TextBox( # fontsize=16, # fmt='', # mouse_callbacks={ # 'Button1': lambda: qtile.current_window.kill(), # 'Button3': lambda: qtile.cmd_spawn(my_window_killer), # }, # ), widget.Spacer(), widget.Systray(icon_size=24), widget.Spacer(), DividerWidget(), widget.Clock( format='%a %b %d %Y, %I:%M:%S', ), DividerWidget(), OpenWidgetBox( widgets=[ widget.CheckUpdates( distro=my_distro, custom_command=my_check_updates_cmd, no_update_string="", colour_no_updates=green_color, colour_have_updates=red_color, ), widget.Spacer(length=5), widget.Canto(), widget.Spacer(length=5), ColorGmailChecker( username=my_gmail_username, password=my_gmail_pass, ), ] ), DividerWidget(), widget.CapsNumLockIndicator( frequency=0.1, ), DividerWidget(), widget.WidgetBox(widgets=get_sys_stat_widgets()), DividerWidget(), widget.TextBox( fmt='', mouse_callbacks={'Button1': lambda: qtile.cmd_spawn('playerctl position 2-')}, ), widget.Spacer(length=7), widget.TextBox( fmt='', mouse_callbacks={'Button1': lambda: qtile.cmd_spawn('playerctl position 2+')}, ), widget.Spacer(length=7), widget.TextBox( fmt='', mouse_callbacks={ 'Button1': lambda: qtile.cmd_spawn('playerctl -a pause'), 'Button3': lambda: qtile.cmd_spawn('playerctl play'), }, ), widget.Spacer(length=7), widget.TextBox("vol:"), widget.Volume(update_interval=0.1, step=1), # widget.CurrentLayoutIcon(scale=0.70), DividerWidget(), widget.TextBox( fontsize=16, fmt='', mouse_callbacks={'Button1': lambda: qtile.cmd_spawn(my_powermenu)}, ), widget.Spacer(length=15), ] if i != my_systray_screen: for w in widgets: if isinstance(w, widget.Systray): widgets.remove(w) return widgets def get_widgets_2(i): widgets = [ DividerWidget(), widget.TaskList( border=fg_line_color, unfocused_border=bg_line_color, rounded=True, ), DividerWidget(), FileReaderWidget( #file = "/tmp/tmux-bar-keysboard-pipe", #msg_base = "Keysboard: ", file = "/tmp/tmux-bar-kmonad-pipe", msg_base = "Kmonad: ", margin_y=4, padding_y=4, update_interval=0.3, mouse_callbacks={ #'Button1': lambda: run_keysboard(True), #'Button3': lambda: run_keysboard(False), 'Button1': lambda: run_kmonad(True), 'Button3': lambda: run_kmonad(False), }, ), DividerWidget(), ] return widgets groups = [ ScratchPad( "main-scratchpad", [ DropDown("term", my_terminal, opacity=0.8), DropDown("media", my_mp, opacity=1.0), ] ) ] screens = [] img_fmts = (".png", ".jpeg", ".jpg") if os.path.isfile(my_wallpapers) and my_wallpapers.endswith(img_fmts): wallpapers = [my_wallpapers] elif os.path.isdir(my_wallpapers): wallpapers = [] for f in os.listdir(my_wallpapers): img = my_wallpapers + f"/{f}" if not img.startswith(".") and img.endswith(img_fmts) and os.path.isfile(img): wallpapers.append(img) else: wallpapers = [] i = 0 for monitor in monitors: if len(monitor) > 0 and monitor != "\n": if len(wallpapers) > 0: wallpaper = wallpaper=choice(wallpapers) else: wallpaper = None screens.append( Screen( top=bar.Bar(get_widgets_1(i), 30, background=bg_color, border_color=bg_line_color, border_width=my_border_width), bottom=bar.Bar(get_widgets_2(i), 30, background=bg_color, border_color=bg_line_color, border_width=my_border_width), wallpaper=wallpaper, wallpaper_mode="stretch", ) ) for g in get_full_group_names_for_screen(i): groups.append(Group(g)) m_key = str(i) if m_key in get_keys(): keys.extend( [ Key([sup, ctrl], m_key, lazy.function(set_screen, i, True, False)), Key([sup, ctrl, alt], m_key, lazy.function(set_screen, i, True, True)), ] ) i += 1 # ---------- # # -- Vars -- # # ---------- # #dgroups_key_binder = None #dgroups_app_rules = [] #extentions = [] reconfigure_screens = True follow_mouse_focus = False bring_front_click = True cursor_warp = False auto_fullscreen = True focus_on_window_activation = "smart" wmname = "LG3D" # --------------------- # # -- Layouts & Hooks -- # # --------------------- # layouts = [ layout.Columns( border_normal=bg_line_color, border_focus=fg_line_color, border_normal_stack=bg_line_color, border_focus_stack=fg_line_color, border_on_single=True, border_width=my_border_width, margin=my_margin, num_columns=2, ratio=0.70, ) ] floating_layout = layout.Floating( float_rules=[ *layout.Floating.default_float_rules, Match(wm_class='confirmreset'), # gitk Match(wm_class='makebranch'), # gitk Match(wm_class='maketag'), # gitk Match(wm_class='ssh-askpass'), # ssh-askpass Match(title='branchdialog'), # gitk Match(title='pinentry'), # GPG key password entry ], border_normal=bg_line_color_alt, border_focus=fg_line_color_alt, border_width=my_border_width, ) dont_auto_float_rules = [] @hook.subscribe.client_new def floating_dialogs_hook(window): dialog = window.window.get_wm_type() == 'dialog' transient = window.window.get_wm_transient_for() allowed = all(c not in dont_auto_float_rules for c in window.window.get_wm_class()) if allowed and (dialog or transient): window.floating = True @hook.subscribe.screen_change def screen_change_hook(qtile): run_cmd(cfg_dir + "scripts/run/run-monitors.sh") @hook.subscribe.startup_complete def autostart_hook(): run_cmd(cfg_dir + "/autostart.sh")
chat.py
#Comando python chat.py 8000 http://192.168.0.2:8001 #Cor para o grid, mesma do fundo do atom(40,44,52) from bottle import run, get, post, view, redirect, request import requests, bottle, json, threading, time, sys import pygame #Variaveis Globais que precisa ser acessadas em várias threads myGlobalList = [] flagClosedWindos = False def initGame(): myScreen = pygame.display.set_mode((128*6, 72*6)) pygame.display.set_caption("PytronTF") runGameFlag = True while runGameFlag: for event in pygame.event.get(): if event.type == pygame.QUIT: runGameFlag = False if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: myGlobalList.append('Escape') runGameFlag = False if event.key == pygame.K_DOWN: myGlobalList.append('d') if event.key == pygame.K_UP: myGlobalList.append('u') if event.key == pygame.K_LEFT: myGlobalList.append('l') if event.key == pygame.K_RIGHT: myGlobalList.append('r') if event.key == pygame.K_w: myGlobalList.append('u') if event.key == pygame.K_s: myGlobalList.append('d') if event.key == pygame.K_a: myGlobalList.append('l') if event.key == pygame.K_d: myGlobalList.append('r') time.sleep(0.2) pygame.display.quit() pygame.quit() global flagClosedWindos flagClosedWindos = True def showList(): while not flagClosedWindos: print(myGlobalList, flagClosedWindos) time.sleep(1) try: port_ = int(sys.argv[1]) except: print("Please inform a valid port.\n") exit(0) try: peers = sys.argv[2:] print(peers) except: print("Please inform a valid list of peers.\n") exit(0) history = [] @get('/') @view('chat.html') def chat(): return {'messages': history} @get('/peers') def index(): return json.dumps(peers) @get('/history') def index(): return json.dumps(history) @get('/send_message') @view('send_message.html') def send_message(): return @post('/write_message') def add_message(): name = request.forms.get('name') msg = request.forms.get('msg') history.append([name, msg]) mmsg = input() print (history, ":TESTE:", mmsg) redirect('/') def get_peers(): while not flagClosedWindos:#true for peer in peers: try: new_peers = requests.get(peer + "/peers") new_peers = json.loads(new_peers.text) for np in new_peers: if np not in peers: peers.append(np) except: pass time.sleep(1) def receive_msg(): while not flagClosedWindos:#true for peer in peers: try: new_h = requests.get(peer + "/history") new_h = json.loads(new_h.text) for m in new_h: if m not in history: history.append(m) except: pass time.sleep(1) t_peers = threading.Thread(target = get_peers) t_chat = threading.Thread(target = receive_msg) t_gameWind = threading.Thread(target = initGame) t_show = threading.Thread(target = showList) t_peers.start() t_chat.start() t_gameWind.start() t_show.start() run(host = '172.20.84.113', port = port_)
dbench.py
#!/usr/bin/env python import os import sys import pdb ROOT = os.path.dirname( os.path.dirname( os.path.abspath(__file__))) sys.path.append(ROOT) from dynomite import Client from optparse import OptionParser from threading import Thread from Queue import Queue from time import time from random import choice ports = [11222] def main(): rq = Queue() results = {'requests': 0, 'get': [], 'put': []} options, junk = opts() workers = [] clients = int(options.clients) if clients > 1: for i in range(0, clients): t = Thread(target=run, args=(int(options.number), rq, int(options.keysize), int(options.valuesize))) workers.append(t) for w in workers: w.start() for w in workers: w.join() consolidate(rq.get(), results) print ".", else: try: consolidate(run(int(options.number), None, int(options.keysize), int(options.valuesize)), results) except: pdb.post_mortem(sys.exc_info()[2]) # return total_time = 0.0 for i in results['get']: total_time += i for i in results['put']: total_time += i print print "%s client(s) %s request(s) %f0.3s" % (options.clients, options.number, total_time) g = results['get'] g.sort() p = results['put'] p.sort() print "get avg: %f0.3ms mean: %f0.3ms 99.9: %f0.3ms" % ( (sum(g) / float(len(g))) * 1000, (g[len(g)/2]) * 1000, (g[int(len(g) * .999) -1]) * 1000) print "put avg: %f0.3ms mean: %f0.3ms 99.9: %f0.3ms" % ( (sum(p) / float(len(p))) * 1000, (p[len(p)/2]) * 1000, (p[int(len(p) * .999) -1]) * 1000) def run(num, rq, ks, vs): res = {'requests': 0, 'get': [], 'put': []} keys = "abcdefghijklmnop" client = Client('localhost', choice(ports)) client.connect() for i in range(0, num): tk = 0.0 key = ''.join([choice(keys) for i in range(0, ks)]) st = time() cur = client.get(key) if cur is None: context = '' else: context = cur[0] tk += time() - st res['get'].append(tk) newval = rval(vs) st = time() client.put(key, newval, context) tk += time() - st res['requests'] += 1 res['put'].append(tk) if rq is not None: rq.put(res) else: return res def consolidate(res, results): results['requests'] += res['requests'] results['get'].extend(res['get']) results['put'].extend(res['put']) def opts(): parser = OptionParser() parser.add_option('-n', '--number', dest='number', default='10', action='store', help='Number of requests per client') parser.add_option('-c', '--concurrency', '--clients', default='1', dest='clients', action='store', help='Number of concurrent clients') parser.add_option('-k', '--keysize', default='1', dest='keysize', action='store', help='Length of each key') parser.add_option('-v', '--valuesize', default='1024', dest='valuesize', action='store', help='Length of each value') return parser.parse_args() def rval(bsize=1024): b = [] for i in range(0, bsize): b.append(choice("abcdefghijklmnopqrstuvwxyz0123456789")) return ''.join(b) if __name__ == '__main__': main()
bridgeClient.py
#! /usr/bin/python import socket import threading from time import sleep import sys, os import inputbox import pygame from bridgeSprites import Button class userInterfaceWindow(): def __init__(self, screen): self.screen = screen self.clients = [] self.userName = inputbox.ask(screen, "Type your name ") self.buttonColor = (200,20,20) self.buttonSize = (200,50) self.buttonPos = (50,50) self.myButton = Button(self.buttonPos, self.buttonSize, self.buttonColor, self.userName) self.brokenError = False def lobby(self, clients): self.screen.fill(-1) self.buttonList = [ self.myButton ] i = 1 for client in clients: newButtonPos = (self.buttonPos[0], self.buttonPos[1] + 70*i) newUserName = client.split(";")[0] if self.userName == newUserName: continue self.buttonList.append( Button(newButtonPos, self.buttonSize, self.buttonColor, newUserName) ) i += 1 for button in self.buttonList: button.draw(self.screen) for event in pygame.event.get(): if event.type == pygame.QUIT: self.brokenError = True pygame.display.update() #pygame.time.Clock().tick(30) def askToPlay(self): mouseDownPos, mouseUpPos = None, None buttonDowned = None self.waitingForAns = False self.switch = True while True: pygame.event.clear() ev = pygame.event.wait() #print pygame.event.event_name(ev.type) mouseDownPos = None mouseUpPos = None if ev.type == pygame.KEYDOWN and ev.key == pygame.K_ESCAPE or ev.type == pygame.QUIT: break elif ev.type == pygame.MOUSEBUTTONDOWN: mouseDownPos = pygame.mouse.get_pos() elif ev.type == pygame.MOUSEBUTTONUP: mouseUpPos = pygame.mouse.get_pos() if not self.waitingForAns: isMousePressed = pygame.mouse.get_pressed()[0] for button in self.buttonList: xBdry = (button.pos[0], button.pos[0] + button.rect[2]) yBdry = (button.pos[1], button.pos[1] + button.rect[3]) if mouseDownPos: isInBdry = (xBdry[0] <= mouseDownPos[0] < xBdry[1]) and (yBdry[0] <= mouseDownPos[1] < yBdry[1]) if isMousePressed: if not buttonDowned and isInBdry: buttonDowned = button elif buttonDowned == button and not isInBdry: buttonDowned = None else: buttonDowned = None if mouseUpPos: isInBdry = (xBdry[0] <= mouseUpPos[0] < xBdry[1]) and (yBdry[0] <= mouseUpPos[1] < yBdry[1]) if buttonDowned == button and isInBdry: print "Clicked button : " + button.text display_pos = ( button.pos[0]+button.rect[2]+20, button.pos[1] ) inputbox.display_msg_custum(self.screen, display_pos, "Asked '%s' to play. Hang on a sec..." %button.text) buttonDowned = None self.waitingForAns = button.text #else: class bridgeConnection(userInterfaceWindow): def __init__(self, screen): #self.HOST = raw_input("HOST IP : ") self.HOST = "143.248.12.11" self.PORT = 50000 self.DATA_SIZE = 256 # maximum data length which can be sent in once self.myIP = myIPaddress() self.endThread = False self.startGame = False userInterfaceWindow.__init__(self, screen) self.makeConnection() self.sendData("info:connMade:%s;%s"%(self.userName, self.myIP)) self.dataGrave = [] # processed data will be saved here self.dataList = {'cmd':[],'grid':[], 'ask':[], 'pick':[], 'turn':[], 'info':[]} #Sort the type of the data if not self.soc: print "Server is not opened" print "waiting an event..." self.lobby(self.clients) self.askToPlay() def makeConnection(self): # make socket and connect to the server self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.soc.settimeout(5.0) # maximum wating time (seconds) connected = False while not connected: try: print "trying to connect " + self.HOST self.soc.connect( (self.HOST, self.PORT) ) connected = True print "Connected!" #soc.settimeout(None) break except socket.timeout: print "Exceeded time limit" connectAgain = raw_input("try again?(y/n)") if connectAgain == "y" or connectAgain == "Y": continue else: return except socket.error: print "Access denied" sleep(1) # [ NOT YET ] if QUIT command is received, call 'sys.exit' self.soc = False return # Threading allows to get data whenever it's delievered self.T = threading.Thread(target = self.receiveData) self.T.start() self.T2 = threading.Thread(target = self.selfConnectedSend) self.T2.start() def sendData(self, data): """ Send data (string type) to the server """ if len(data) <= self.DATA_SIZE: self.soc.send(data.encode('UTF-8')) #print "Data '%s' is sent successfully" %data else: print "Data packet size exceeded!" def receiveData(self): """ Receive data (string type) from the server """ while not self.endThread: try: data = self.soc.recv(self.DATA_SIZE)# receive data whose length <= DATA_SIZE print "raw data is : %s" %data for realData in data.split("^")[:-1]: if "info" in realData: self.dataList['info'].append(realData) elif "ask" in realData: self.dataList['ask'].append(readData[4:]) elif "pick" in realData: self.dataList['pick'].append(readData[5:]) elif "cmd" in realData: self.dataList['cmd'].append(realData[4:]) elif "grid" in realData: self.dataList['grid'].append(data[5:]) except socket.timeout: #print "socket timed out" continue except: print "Connection is lost" break self.dataProcessing() self.soc.close() # disconnect the connection def disconnect(self): self.endThread = True print "joining the thread..." self.T.join() self.T2.join() print "thread is joined" pygame.quit() sys.exit() def dataProcessing(self): # for reading # dataList['info'] part for data in self.dataList['info'][:]: if "info:connList" in data: self.clients = eval(data.split(":")[-1]) self.lobby(self.clients) elif "info:askPlay" in data: self.opponent = data.split(":")[-1].split(";")[0] answer = inputbox.ask(self.screen, "'%s' has asked you to play. Accept?(y/n) " %self.opponent) if answer in ["Y", "Yes", "y", "yes"]: self.sendData("info:gameAccept:%s;%s" %(self.opponent, self.userName)) self.sendData("1") self.sendData("0") else: self.opponent = None self.waitingForAns = False self.switch = True self.dataList['info'].remove(data) self.dataGrave.append(data) def selfConnectedSend(self): # for sending # if self.# is changed, send data. while not self.endThread: try: if self.waitingForAns and self.switch: self.sendData("info:askPlay:%s;%s" %(self.userName, self.waitingForAns)) self.switch = False except: pass self.soc.close() def myIPaddress(): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("gmail.com",80)) myip = s.getsockname()[0] s.close() return myip except: print "Internet disconnected?" return 0 if __name__ == "__main__": client = bridgeConnection(pygame.display.set_mode((600,600))) #client = bridgeConnection(pygame.Surface((600,600))) print "now main" sleep(3) print "end session" client.disconnect()
multi.py
import multiprocessing import logging import os import shutil import time import typing from nbcollection.ci.scanner.utils import find_build_jobs, generate_job_context, run_job_context from nbcollection.ci.constants import SCANNER_BUILD_LOG_DIR logger = logging.getLogger(__name__) def build_artifacts_concurrently(options, jobs, artifact_paths) -> typing.Dict[str, str]: if os.path.exists(SCANNER_BUILD_LOG_DIR): shutil.rmtree(SCANNER_BUILD_LOG_DIR) os.makedirs(SCANNER_BUILD_LOG_DIR) def _build_category(project_path: str, collection_name: str, category_name: str) -> None: os.environ['CHANNEL_BUILD'] = 'true' for job in find_build_jobs(project_path, [collection_name], [category_name]): print(job.collection.name, job.category.name) print('Creating Job Context: ', job.collection.name, job.category.name) job_context = generate_job_context(job) print('Running Job Context: ', job.collection.name, job.category.name) run_job_context(job_context, False) del os.environ['CHANNEL_BUILD'] job_list = [] for job in jobs: job_list.append([job.collection.name, job.category.name]) processes = [] max_workers = options.max_workers logger.info(f'Job List: {len(job_list)} - Max Workers: {max_workers}') while len(job_list) > 0 or len(processes) > 0: for proc_idx, proc in enumerate([proc for proc in processes if not proc.is_alive()]): processes.remove(proc) if len(processes) >= max_workers: time.sleep(1) continue try: collection_name, category_name = job_list.pop(0) except IndexError: continue if len(processes) >= max_workers: continue logger.info(f'Starting new Build[{collection_name}, {category_name}]') proc = multiprocessing.Process(target=_build_category, args=(options.project_path, collection_name, category_name)) proc.daemon = True proc.start() processes.append(proc)
common.py
# Copyright 2021 The Emscripten Authors. All rights reserved. # Emscripten is available under two separate licenses, the MIT license and the # University of Illinois/NCSA Open Source License. Both these licenses can be # found in the LICENSE file. from enum import Enum from functools import wraps from pathlib import Path from subprocess import PIPE, STDOUT from urllib.parse import unquote, unquote_plus from http.server import HTTPServer, SimpleHTTPRequestHandler import contextlib import difflib import hashlib import logging import multiprocessing import os import shlex import shutil import stat import string import subprocess import sys import tempfile import time import webbrowser import unittest import clang_native import jsrun from jsrun import NON_ZERO from tools.shared import TEMP_DIR, EMCC, EMXX, DEBUG, EMCONFIGURE, EMCMAKE from tools.shared import EMSCRIPTEN_TEMP_DIR from tools.shared import EM_BUILD_VERBOSE from tools.shared import get_canonical_temp_dir, try_delete, path_from_root from tools.utils import MACOS, WINDOWS from tools import shared, line_endings, building, config logger = logging.getLogger('common') # User can specify an environment variable EMTEST_BROWSER to force the browser # test suite to run using another browser command line than the default system # browser. Setting '0' as the browser disables running a browser (but we still # see tests compile) EMTEST_BROWSER = None EMTEST_DETECT_TEMPFILE_LEAKS = None EMTEST_SAVE_DIR = None # generally js engines are equivalent, testing 1 is enough. set this # to force testing on all js engines, good to find js engine bugs EMTEST_ALL_ENGINES = None EMTEST_SKIP_SLOW = None EMTEST_LACKS_NATIVE_CLANG = None EMTEST_VERBOSE = None EMTEST_REBASELINE = None TEST_ROOT = path_from_root('tests') WEBIDL_BINDER = shared.bat_suffix(path_from_root('tools/webidl_binder')) EMBUILDER = shared.bat_suffix(path_from_root('embuilder')) EMMAKE = shared.bat_suffix(path_from_root('emmake')) def delete_contents(pathname): for entry in os.listdir(pathname): try_delete(os.path.join(pathname, entry)) def test_file(*path_components): """Construct a path relative to the emscripten "tests" directory.""" return str(Path(TEST_ROOT, *path_components)) def read_file(*path_components): return Path(*path_components).read_text() def read_binary(*path_components): return Path(*path_components).read_bytes() # checks if browser testing is enabled def has_browser(): return EMTEST_BROWSER != '0' def compiler_for(filename, force_c=False): if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c: return EMXX else: return EMCC # Generic decorator that calls a function named 'condition' on the test class and # skips the test if that function returns true def skip_if(func, condition, explanation='', negate=False): assert callable(func) explanation_str = ' : %s' % explanation if explanation else '' @wraps(func) def decorated(self, *args, **kwargs): choice = self.__getattribute__(condition)() if negate: choice = not choice if choice: self.skipTest(condition + explanation_str) func(self, *args, **kwargs) return decorated def needs_dylink(func): assert callable(func) @wraps(func) def decorated(self): self.check_dylink() return func(self) return decorated def is_slow_test(func): assert callable(func) @wraps(func) def decorated(self, *args, **kwargs): if EMTEST_SKIP_SLOW: return self.skipTest('skipping slow tests') return func(self, *args, **kwargs) return decorated def disabled(note=''): assert not callable(note) return unittest.skip(note) def no_mac(note=''): assert not callable(note) if MACOS: return unittest.skip(note) return lambda f: f def no_windows(note=''): assert not callable(note) if WINDOWS: return unittest.skip(note) return lambda f: f def requires_native_clang(func): assert callable(func) def decorated(self, *args, **kwargs): if EMTEST_LACKS_NATIVE_CLANG: return self.skipTest('native clang tests are disabled') return func(self, *args, **kwargs) return decorated def require_node(func): assert callable(func) def decorated(self, *args, **kwargs): self.require_node() return func(self, *args, **kwargs) return decorated def require_v8(func): assert callable(func) def decorated(self, *args, **kwargs): self.require_v8() return func(self, *args, **kwargs) return decorated def node_pthreads(f): def decorated(self): self.set_setting('USE_PTHREADS') self.emcc_args += ['-Wno-pthreads-mem-growth'] if self.get_setting('MINIMAL_RUNTIME'): self.skipTest('node pthreads not yet supported with MINIMAL_RUNTIME') self.js_engines = [config.NODE_JS] self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory'] f(self) return decorated @contextlib.contextmanager def env_modify(updates): """A context manager that updates os.environ.""" # This could also be done with mock.patch.dict() but taking a dependency # on the mock library is probably not worth the benefit. old_env = os.environ.copy() print("env_modify: " + str(updates)) # Seting a value to None means clear the environment variable clears = [key for key, value in updates.items() if value is None] updates = {key: value for key, value in updates.items() if value is not None} os.environ.update(updates) for key in clears: if key in os.environ: del os.environ[key] try: yield finally: os.environ.clear() os.environ.update(old_env) # Decorator version of env_modify def with_env_modify(updates): def decorated(f): def modified(self): with env_modify(updates): return f(self) return modified return decorated def ensure_dir(dirname): dirname = Path(dirname) dirname.mkdir(parents=True, exist_ok=True) def limit_size(string, maxbytes=800000 * 20, maxlines=100000, max_line=5000): lines = string.splitlines() for i, line in enumerate(lines): if len(line) > max_line: lines[i] = line[:max_line] + '[..]' if len(lines) > maxlines: lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:] string = '\n'.join(lines) + '\n' if len(string) > maxbytes: string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:] return string def create_file(name, contents, binary=False): name = Path(name) assert not name.is_absolute() if binary: name.write_bytes(contents) else: name.write_text(contents) def make_executable(name): Path(name).chmod(stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) def parameterized(parameters): """ Mark a test as parameterized. Usage: @parameterized({ 'subtest1': (1, 2, 3), 'subtest2': (4, 5, 6), }) def test_something(self, a, b, c): ... # actual test body This is equivalent to defining two tests: def test_something_subtest1(self): # runs test_something(1, 2, 3) def test_something_subtest2(self): # runs test_something(4, 5, 6) """ def decorator(func): func._parameterize = parameters return func return decorator class RunnerMeta(type): @classmethod def make_test(mcs, name, func, suffix, args): """ This is a helper function to create new test functions for each parameterized form. :param name: the original name of the function :param func: the original function that we are parameterizing :param suffix: the suffix to append to the name of the function for this parameterization :param args: the positional arguments to pass to the original function for this parameterization :returns: a tuple of (new_function_name, new_function_object) """ # Create the new test function. It calls the original function with the specified args. # We use @functools.wraps to copy over all the function attributes. @wraps(func) def resulting_test(self): return func(self, *args) # Add suffix to the function name so that it displays correctly. if suffix: resulting_test.__name__ = f'{name}_{suffix}' else: resulting_test.__name__ = name # On python 3, functions have __qualname__ as well. This is a full dot-separated path to the # function. We add the suffix to it as well. resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}' return resulting_test.__name__, resulting_test def __new__(mcs, name, bases, attrs): # This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`. new_attrs = {} for attr_name, value in attrs.items(): # Check if a member of the new class has _parameterize, the tag inserted by @parameterized. if hasattr(value, '_parameterize'): # If it does, we extract the parameterization information, build new test functions. for suffix, args in value._parameterize.items(): new_name, func = mcs.make_test(attr_name, value, suffix, args) assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name new_attrs[new_name] = func else: # If not, we just copy it over to new_attrs verbatim. assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name new_attrs[attr_name] = value # We invoke type, the default metaclass, to actually create the new class, with new_attrs. return type.__new__(mcs, name, bases, new_attrs) class RunnerCore(unittest.TestCase, metaclass=RunnerMeta): # default temporary directory settings. set_temp_dir may be called later to # override these temp_dir = TEMP_DIR canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR) # This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc. # Change this to None to get stderr reporting, for debugging purposes stderr_redirect = STDOUT def is_wasm(self): return self.get_setting('WASM') != 0 def check_dylink(self): if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm(): self.skipTest('no dynamic linking with memory growth (without wasm)') if not self.is_wasm(): self.skipTest('no dynamic linking support in wasm2js yet') if '-fsanitize=address' in self.emcc_args: self.skipTest('no dynamic linking support in ASan yet') if '-fsanitize=leak' in self.emcc_args: self.skipTest('no dynamic linking support in LSan yet') def require_v8(self): if not config.V8_ENGINE or config.V8_ENGINE not in config.JS_ENGINES: if 'EMTEST_SKIP_V8' in os.environ: self.skipTest('test requires v8 and EMTEST_SKIP_V8 is set') else: self.fail('d8 required to run this test. Use EMTEST_SKIP_V8 to skip') self.js_engines = [config.V8_ENGINE] self.emcc_args.append('-sENVIRONMENT=shell') def require_node(self): if not config.NODE_JS or config.NODE_JS not in config.JS_ENGINES: if 'EMTEST_SKIP_NODE' in os.environ: self.skipTest('test requires node and EMTEST_SKIP_NODE is set') else: self.fail('node required to run this test. Use EMTEST_SKIP_NODE to skip') self.js_engines = [config.NODE_JS] def uses_memory_init_file(self): if self.get_setting('SIDE_MODULE') or (self.is_wasm() and not self.get_setting('WASM2JS')): return False elif '--memory-init-file' in self.emcc_args: return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1]) else: # side modules handle memory differently; binaryen puts the memory in the wasm module opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz')) return opt_supports def set_temp_dir(self, temp_dir): self.temp_dir = temp_dir self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir) # Explicitly set dedicated temporary directory for parallel tests os.environ['EMCC_TEMP_DIR'] = self.temp_dir @classmethod def setUpClass(cls): super().setUpClass() print('(checking sanity from test runner)') # do this after we set env stuff shared.check_sanity(force=True) def setUp(self): super().setUp() self.settings_mods = {} self.emcc_args = ['-Werror'] self.node_args = [] self.v8_args = [] self.env = {} self.temp_files_before_run = [] self.uses_es6 = False self.js_engines = config.JS_ENGINES.copy() self.wasm_engines = config.WASM_ENGINES.copy() self.banned_js_engines = [] self.use_all_engines = EMTEST_ALL_ENGINES if EMTEST_DETECT_TEMPFILE_LEAKS: for root, dirnames, filenames in os.walk(self.temp_dir): for dirname in dirnames: self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname))) for filename in filenames: self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename))) if EMTEST_SAVE_DIR: self.working_dir = os.path.join(self.temp_dir, 'emscripten_test') if os.path.exists(self.working_dir): if EMTEST_SAVE_DIR == 2: print('Not clearing existing test directory') else: print('Clearing existing test directory') # Even when EMTEST_SAVE_DIR we still try to start with an empty directoy as many tests # expect this. EMTEST_SAVE_DIR=2 can be used to keep the old contents for the new test # run. This can be useful when iterating on a given test with extra files you want to keep # around in the output directory. delete_contents(self.working_dir) else: print('Creating new test output directory') ensure_dir(self.working_dir) else: self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir) os.chdir(self.working_dir) if not EMTEST_SAVE_DIR: self.has_prev_ll = False for temp_file in os.listdir(TEMP_DIR): if temp_file.endswith('.ll'): self.has_prev_ll = True def tearDown(self): if not EMTEST_SAVE_DIR: # rmtree() fails on Windows if the current working directory is inside the tree. os.chdir(os.path.dirname(self.get_dir())) try_delete(self.get_dir()) if EMTEST_DETECT_TEMPFILE_LEAKS and not DEBUG: temp_files_after_run = [] for root, dirnames, filenames in os.walk(self.temp_dir): for dirname in dirnames: temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname))) for filename in filenames: temp_files_after_run.append(os.path.normpath(os.path.join(root, filename))) # Our leak detection will pick up *any* new temp files in the temp dir. # They may not be due to us, but e.g. the browser when running browser # tests. Until we figure out a proper solution, ignore some temp file # names that we see on our CI infrastructure. ignorable_file_prefixes = [ '/tmp/tmpaddon', '/tmp/circleci-no-output-timeout', '/tmp/wasmer' ] left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run) left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])] if len(left_over_files): print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr) for f in left_over_files: print('leaked file: ' + f, file=sys.stderr) self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!') def get_setting(self, key, default=None): return self.settings_mods.get(key, default) def set_setting(self, key, value=1): if value is None: self.clear_setting(key) self.settings_mods[key] = value def has_changed_setting(self, key): return key in self.settings_mods def clear_setting(self, key): self.settings_mods.pop(key, None) def serialize_settings(self): ret = [] for key, value in self.settings_mods.items(): if value == 1: ret.append(f'-s{key}') elif type(value) == list: ret.append(f'-s{key}={",".join(value)}') else: ret.append(f'-s{key}={value}') return ret def get_dir(self): return self.working_dir def in_dir(self, *pathelems): return os.path.join(self.get_dir(), *pathelems) def add_pre_run(self, code): create_file('prerun.js', 'Module.preRun = function() { %s }' % code) self.emcc_args += ['--pre-js', 'prerun.js'] def add_post_run(self, code): create_file('postrun.js', 'Module.postRun = function() { %s }' % code) self.emcc_args += ['--pre-js', 'postrun.js'] def add_on_exit(self, code): create_file('onexit.js', 'Module.onExit = function() { %s }' % code) self.emcc_args += ['--pre-js', 'onexit.js'] # returns the full list of arguments to pass to emcc # param @main_file whether this is the main file of the test. some arguments # (like --pre-js) do not need to be passed when building # libraries, for example def get_emcc_args(self, main_file=False): args = self.serialize_settings() + self.emcc_args if not main_file: for i, arg in enumerate(args): if arg in ('--pre-js', '--post-js'): args[i] = None args[i + 1] = None args = [arg for arg in args if arg is not None] return args def verify_es5(self, filename): es_check = shared.get_npm_cmd('es-check') # use --quiet once its available # See: https://github.com/dollarshaveclub/es-check/pull/126/ es_check_env = os.environ.copy() es_check_env['PATH'] = os.path.dirname(config.NODE_JS[0]) + os.pathsep + es_check_env['PATH'] try: shared.run_process(es_check + ['es5', os.path.abspath(filename), '--quiet'], stderr=PIPE, env=es_check_env) except subprocess.CalledProcessError as e: print(e.stderr) self.fail('es-check failed to verify ES5 output compliance') # Build JavaScript code from source code def build(self, filename, libraries=[], includes=[], force_c=False, js_outfile=True, emcc_args=[]): suffix = '.js' if js_outfile else '.wasm' compiler = [compiler_for(filename, force_c)] if compiler[0] == EMCC: # TODO(https://github.com/emscripten-core/emscripten/issues/11121) # We link with C++ stdlibs, even when linking with emcc for historical reasons. We can remove # this if this issues is fixed. compiler.append('-nostdlib++') if force_c: compiler.append('-xc') dirname, basename = os.path.split(filename) output = shared.unsuffixed(basename) + suffix cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + emcc_args + libraries if shared.suffix(filename) not in ('.i', '.ii'): # Add the location of the test file to include path. cmd += ['-I.'] cmd += ['-I' + str(include) for include in includes] self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None) self.assertExists(output) if js_outfile and not self.uses_es6: self.verify_es5(output) if js_outfile and self.uses_memory_init_file(): src = read_file(output) # side memory init file, or an empty one in the js assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src) return output def get_func(self, src, name): start = src.index('function ' + name + '(') t = start n = 0 while True: if src[t] == '{': n += 1 elif src[t] == '}': n -= 1 if n == 0: return src[start:t + 1] t += 1 assert t < len(src) def count_funcs(self, javascript_file): num_funcs = 0 start_tok = "// EMSCRIPTEN_START_FUNCS" end_tok = "// EMSCRIPTEN_END_FUNCS" start_off = 0 end_off = 0 js = read_file(javascript_file) blob = "".join(js.splitlines()) start_off = blob.find(start_tok) + len(start_tok) end_off = blob.find(end_tok) asm_chunk = blob[start_off:end_off] num_funcs = asm_chunk.count('function ') return num_funcs def count_wasm_contents(self, wasm_binary, what): out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout # output is something like # [?] : 125 for line in out.splitlines(): if '[' + what + ']' in line: ret = line.split(':')[1].strip() return int(ret) self.fail('Failed to find [%s] in wasm-opt output' % what) def get_wasm_text(self, wasm_binary): return self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout def is_exported_in_wasm(self, name, wasm): wat = self.get_wasm_text(wasm) return ('(export "%s"' % name) in wat def run_js(self, filename, engine=None, args=[], output_nicerizer=None, assert_returncode=0): # use files, as PIPE can get too full and hang us stdout = self.in_dir('stdout') stderr = self.in_dir('stderr') error = None if not engine: engine = self.js_engines[0] if engine == config.NODE_JS: engine = engine + self.node_args if engine == config.V8_ENGINE: engine = engine + self.v8_args if EMTEST_VERBOSE: print(f"Running '{filename}' under '{shared.shlex_join(engine)}'") try: jsrun.run_js(filename, engine, args, stdout=open(stdout, 'w'), stderr=open(stderr, 'w'), assert_returncode=assert_returncode) except subprocess.CalledProcessError as e: error = e # Make sure that we produced proper line endings to the .js file we are about to run. if not filename.endswith('.wasm'): self.assertEqual(line_endings.check_line_endings(filename), 0) out = read_file(stdout) err = read_file(stderr) if output_nicerizer: ret = output_nicerizer(out, err) else: ret = out + err if error or EMTEST_VERBOSE: ret = limit_size(ret) print('-- begin program output --') print(ret, end='') print('-- end program output --') if error: if assert_returncode == NON_ZERO: self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret)) else: self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret)) # We should pass all strict mode checks self.assertNotContained('strict warning:', ret) return ret def assertExists(self, filename, msg=None): if not msg: msg = 'Expected file not found: ' + filename self.assertTrue(os.path.exists(filename), msg) def assertNotExists(self, filename, msg=None): if not msg: msg = 'Unexpected file exists: ' + filename self.assertFalse(os.path.exists(filename), msg) # Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo". def assertPathsIdentical(self, path1, path2): path1 = path1.replace('\\', '/') path2 = path2.replace('\\', '/') return self.assertIdentical(path1, path2) # Tests that the given two multiline text content are identical, modulo line # ending differences (\r\n on Windows, \n on Unix). def assertTextDataIdentical(self, text1, text2, msg=None, fromfile='expected', tofile='actual'): text1 = text1.replace('\r\n', '\n') text2 = text2.replace('\r\n', '\n') return self.assertIdentical(text1, text2, msg, fromfile, tofile) def assertIdentical(self, values, y, msg=None, fromfile='expected', tofile='actual'): if type(values) not in (list, tuple): values = [values] for x in values: if x == y: return # success diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(), fromfile=fromfile, tofile=tofile) diff = ''.join([a.rstrip() + '\n' for a in diff_lines]) if EMTEST_VERBOSE: print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y))) fail_message = 'Unexpected difference:\n' + limit_size(diff) if not EMTEST_VERBOSE: fail_message += '\nFor full output run with EMTEST_VERBOSE=1.' if msg: fail_message += '\n' + msg self.fail(fail_message) def assertTextDataContained(self, text1, text2): text1 = text1.replace('\r\n', '\n') text2 = text2.replace('\r\n', '\n') return self.assertContained(text1, text2) def assertContained(self, values, string, additional_info=''): if type(values) not in [list, tuple]: values = [values] if callable(string): string = string() if not any(v in string for v in values): diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual') diff = ''.join(a.rstrip() + '\n' for a in diff) self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % ( limit_size(values[0]), limit_size(string), limit_size(diff), additional_info )) def assertNotContained(self, value, string): if callable(value): value = value() # lazy loading if callable(string): string = string() if value in string: self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % ( limit_size(value), limit_size(string), limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')])) )) def assertContainedIf(self, value, string, condition): if condition: self.assertContained(value, string) else: self.assertNotContained(value, string) def assertBinaryEqual(self, file1, file2): self.assertEqual(os.path.getsize(file1), os.path.getsize(file2)) self.assertEqual(read_binary(file1), read_binary(file2)) library_cache = {} def get_build_dir(self): ret = os.path.join(self.get_dir(), 'building') ensure_dir(ret) return ret def get_library(self, name, generated_libs, configure=['sh', './configure'], configure_args=[], make=['make'], make_args=None, env_init=None, cache_name_extra='', native=False): if env_init is None: env_init = {} if make_args is None: make_args = ['-j', str(shared.get_num_cores())] build_dir = self.get_build_dir() output_dir = self.get_dir() emcc_args = self.get_emcc_args() hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8') cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra valid_chars = "_%s%s" % (string.ascii_letters, string.digits) cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name]) if self.library_cache.get(cache_name): print('<load %s from cache> ' % cache_name, file=sys.stderr) generated_libs = [] for basename, contents in self.library_cache[cache_name]: bc_file = os.path.join(build_dir, cache_name + '_' + basename) with open(bc_file, 'wb') as f: f.write(contents) generated_libs.append(bc_file) return generated_libs print(f'<building and saving {cache_name} into cache>', file=sys.stderr) if configure is not None: # Avoid += so we don't mutate the default arg configure = configure + configure_args cflags = ' '.join(self.get_emcc_args()) env_init.setdefault('CFLAGS', cflags) env_init.setdefault('CXXFLAGS', cflags) return build_library(name, build_dir, output_dir, generated_libs, configure, make, make_args, self.library_cache, cache_name, env_init=env_init, native=native) def clear(self): delete_contents(self.get_dir()) if EMSCRIPTEN_TEMP_DIR: delete_contents(EMSCRIPTEN_TEMP_DIR) def run_process(self, cmd, check=True, **args): # Wrapper around shared.run_process. This is desirable so that the tests # can fail (in the unittest sense) rather than error'ing. # In the long run it would nice to completely remove the dependency on # core emscripten code (shared.py) here. try: return shared.run_process(cmd, check=check, **args) except subprocess.CalledProcessError as e: if check and e.returncode != 0: self.fail('subprocess exited with non-zero return code(%d): `%s`' % (e.returncode, shared.shlex_join(cmd))) def emcc(self, filename, args=[], output_filename=None, **kwargs): if output_filename is None: output_filename = filename + '.o' try_delete(output_filename) self.run_process([compiler_for(filename), filename] + args + ['-o', output_filename], **kwargs) # Shared test code between main suite and others def expect_fail(self, cmd, **args): """Run a subprocess and assert that it returns non-zero. Return the stderr of the subprocess. """ proc = self.run_process(cmd, check=False, stderr=PIPE, **args) self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr) # When we check for failure we expect a user-visible error, not a traceback. # However, on windows a python traceback can happen randomly sometimes, # due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718 if not WINDOWS or 'Access is denied' not in proc.stderr: self.assertNotContained('Traceback', proc.stderr) return proc.stderr # excercise dynamic linker. # # test that linking to shared library B, which is linked to A, loads A as well. # main is also linked to C, which is also linked to A. A is loaded/initialized only once. # # B # main < > A # C # # this test is used by both test_core and test_browser. # when run under broswer it excercises how dynamic linker handles concurrency # - because B and C are loaded in parallel. def _test_dylink_dso_needed(self, do_run): create_file('liba.cpp', r''' #include <stdio.h> #include <emscripten.h> static const char *afunc_prev; extern "C" { EMSCRIPTEN_KEEPALIVE void afunc(const char *s); } void afunc(const char *s) { printf("a: %s (prev: %s)\n", s, afunc_prev); afunc_prev = s; } struct ainit { ainit() { puts("a: loaded"); } }; static ainit _; ''') create_file('libb.c', r''' #include <emscripten.h> void afunc(const char *s); EMSCRIPTEN_KEEPALIVE void bfunc() { afunc("b"); } ''') create_file('libc.c', r''' #include <emscripten.h> void afunc(const char *s); EMSCRIPTEN_KEEPALIVE void cfunc() { afunc("c"); } ''') # _test_dylink_dso_needed can be potentially called several times by a test. # reset dylink-related options first. self.clear_setting('MAIN_MODULE') self.clear_setting('SIDE_MODULE') # XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough self.set_setting('INITIAL_MEMORY', '32mb') so = '.wasm' if self.is_wasm() else '.js' def ccshared(src, linkto=[]): cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so, '-s', 'SIDE_MODULE'] + self.get_emcc_args() cmdv += linkto self.run_process(cmdv) ccshared('liba.cpp') ccshared('libb.c', ['liba' + so]) ccshared('libc.c', ['liba' + so]) self.set_setting('MAIN_MODULE') extra_args = ['-L.', 'libb' + so, 'libc' + so] do_run(r''' #ifdef __cplusplus extern "C" { #endif void bfunc(); void cfunc(); #ifdef __cplusplus } #endif int test_main() { bfunc(); cfunc(); return 0; } ''', 'a: loaded\na: b (prev: (null))\na: c (prev: b)\n', emcc_args=extra_args) for libname in ['liba', 'libb', 'libc']: self.emcc_args += ['--embed-file', libname + so] do_run(r''' #include <assert.h> #include <dlfcn.h> #include <stddef.h> int test_main() { void *bdso, *cdso; void (*bfunc_ptr)(), (*cfunc_ptr)(); // FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working bdso = dlopen("libb%(so)s", RTLD_NOW|RTLD_GLOBAL); assert(bdso != NULL); cdso = dlopen("libc%(so)s", RTLD_NOW|RTLD_GLOBAL); assert(cdso != NULL); bfunc_ptr = (void (*)())dlsym(bdso, "bfunc"); assert(bfunc_ptr != NULL); cfunc_ptr = (void (*)())dlsym(cdso, "cfunc"); assert(cfunc_ptr != NULL); bfunc_ptr(); cfunc_ptr(); return 0; } ''' % locals(), 'a: loaded\na: b (prev: (null))\na: c (prev: b)\n') def filtered_js_engines(self, js_engines=None): if js_engines is None: js_engines = self.js_engines for engine in js_engines: assert engine in config.JS_ENGINES, "js engine does not exist in config.JS_ENGINES" assert type(engine) == list for engine in self.banned_js_engines: assert type(engine) in (list, type(None)) banned = [b[0] for b in self.banned_js_engines if b] return [engine for engine in js_engines if engine and engine[0] not in banned] def do_run(self, src, expected_output, force_c=False, **kwargs): if 'no_build' in kwargs: filename = src else: if force_c: filename = 'src.c' else: filename = 'src.cpp' with open(filename, 'w') as f: f.write(src) self._build_and_run(filename, expected_output, **kwargs) def do_runf(self, filename, expected_output=None, **kwargs): self._build_and_run(filename, expected_output, **kwargs) ## Just like `do_run` but with filename of expected output def do_run_from_file(self, filename, expected_output_filename, **kwargs): self._build_and_run(filename, read_file(expected_output_filename), **kwargs) def do_run_in_out_file_test(self, *path, **kwargs): srcfile = test_file(*path) out_suffix = kwargs.pop('out_suffix', '') outfile = shared.unsuffixed(srcfile) + out_suffix + '.out' expected = read_file(outfile) self._build_and_run(srcfile, expected, **kwargs) ## Does a complete test - builds, runs, checks output, etc. def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None, no_build=False, js_engines=None, libraries=[], includes=[], assert_returncode=0, assert_identical=False, assert_all=False, check_for_error=True, force_c=False, emcc_args=[]): logger.debug(f'_build_and_run: {filename}') if no_build: js_file = filename else: self.build(filename, libraries=libraries, includes=includes, force_c=force_c, emcc_args=emcc_args) js_file = shared.unsuffixed(os.path.basename(filename)) + '.js' self.assertExists(js_file) engines = self.filtered_js_engines(js_engines) if len(engines) > 1 and not self.use_all_engines: engines = engines[:1] # In standalone mode, also add wasm vms as we should be able to run there too. if self.get_setting('STANDALONE_WASM'): # TODO once standalone wasm support is more stable, apply use_all_engines # like with js engines, but for now as we bring it up, test in all of them if not self.wasm_engines: logger.warning('no wasm engine was found to run the standalone part of this test') engines += self.wasm_engines if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG: # compile the c file to a native executable. c = shared.unsuffixed(js_file) + '.wasm.c' executable = shared.unsuffixed(js_file) + '.exe' cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args() self.run_process(cmd, env=clang_native.get_clang_native_env()) # we can now run the executable directly, without an engine, which # we indicate with None as the engine engines += [[None]] if len(engines) == 0: self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % config.EM_CONFIG) for engine in engines: js_output = self.run_js(js_file, engine, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode) js_output = js_output.replace('\r\n', '\n') if expected_output: try: if assert_identical: self.assertIdentical(expected_output, js_output) elif assert_all: for o in expected_output: self.assertContained(o, js_output) else: self.assertContained(expected_output, js_output) if check_for_error: self.assertNotContained('ERROR', js_output) except Exception: print('(test did not pass in JS engine: %s)' % engine) raise def get_freetype_library(self): if '-Werror' in self.emcc_args: self.emcc_args.remove('-Werror') return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib']) def get_poppler_library(self, env_init=None): # The fontconfig symbols are all missing from the poppler build # e.g. FcConfigSubstitute self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0) self.emcc_args += [ '-I' + test_file('third_party/freetype/include'), '-I' + test_file('third_party/poppler/include') ] freetype = self.get_freetype_library() # Poppler has some pretty glaring warning. Suppress them to keep the # test output readable. if '-Werror' in self.emcc_args: self.emcc_args.remove('-Werror') self.emcc_args += [ '-Wno-sentinel', '-Wno-logical-not-parentheses', '-Wno-unused-private-field', '-Wno-tautological-compare', '-Wno-unknown-pragmas', ] env_init = env_init.copy() if env_init else {} env_init['FONTCONFIG_CFLAGS'] = ' ' env_init['FONTCONFIG_LIBS'] = ' ' poppler = self.get_library( os.path.join('third_party', 'poppler'), [os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')], env_init=env_init, configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared']) return poppler + freetype def get_zlib_library(self): if WINDOWS: return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), configure=['cmake', '.'], make=['cmake', '--build', '.'], make_args=[]) return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a']) # Run a server and a web page. When a test runs, we tell the server about it, # which tells the web page, which then opens a window with the test. Doing # it this way then allows the page to close() itself when done. def harness_server_func(in_queue, out_queue, port): class TestServerHandler(SimpleHTTPRequestHandler): # Request header handler for default do_GET() path in # SimpleHTTPRequestHandler.do_GET(self) below. def send_head(self): if self.path.endswith('.js'): path = self.translate_path(self.path) try: f = open(path, 'rb') except IOError: self.send_error(404, "File not found: " + path) return None self.send_response(200) self.send_header('Content-type', 'application/javascript') self.send_header('Connection', 'close') self.end_headers() return f else: return SimpleHTTPRequestHandler.send_head(self) # Add COOP, COEP, CORP, and no-caching headers def end_headers(self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Cross-Origin-Opener-Policy', 'same-origin') self.send_header('Cross-Origin-Embedder-Policy', 'require-corp') self.send_header('Cross-Origin-Resource-Policy', 'cross-origin') self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate') return SimpleHTTPRequestHandler.end_headers(self) def do_GET(self): if self.path == '/run_harness': if DEBUG: print('[server startup]') self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() self.wfile.write(read_binary(test_file('browser_harness.html'))) elif 'report_' in self.path: # the test is reporting its result. first change dir away from the # test dir, as it will be deleted now that the test is finishing, and # if we got a ping at that time, we'd return an error os.chdir(path_from_root()) # for debugging, tests may encode the result and their own url (window.location) as result|url if '|' in self.path: path, url = self.path.split('|', 1) else: path = self.path url = '?' if DEBUG: print('[server response:', path, url, ']') if out_queue.empty(): out_queue.put(path) else: # a badly-behaving test may send multiple xhrs with reported results; we just care # about the first (if we queued the others, they might be read as responses for # later tests, or maybe the test sends more than one in a racy manner). # we place 'None' in the queue here so that the outside knows something went wrong # (none is not a valid value otherwise; and we need the outside to know because if we # raise an error in here, it is just swallowed in python's webserver code - we want # the test to actually fail, which a webserver response can't do). out_queue.put(None) raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path) self.send_response(200) self.send_header('Content-type', 'text/plain') self.send_header('Cache-Control', 'no-cache, must-revalidate') self.send_header('Connection', 'close') self.send_header('Expires', '-1') self.end_headers() self.wfile.write(b'OK') elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path: ''' To get logging to the console from browser tests, add this to print/printErr/the exception handler in src/shell.html: var xhr = new XMLHttpRequest(); xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text)); xhr.send(); ''' print('[client logging:', unquote_plus(self.path), ']') self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() elif self.path == '/check': self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() if not in_queue.empty(): # there is a new test ready to be served url, dir = in_queue.get() if DEBUG: print('[queue command:', url, dir, ']') assert in_queue.empty(), 'should not be any blockage - one test runs at a time' assert out_queue.empty(), 'the single response from the last test was read' # tell the browser to load the test self.wfile.write(b'COMMAND:' + url.encode('utf-8')) # move us to the right place to serve the files for the new test os.chdir(dir) else: # the browser must keep polling self.wfile.write(b'(wait)') else: # Use SimpleHTTPServer default file serving operation for GET. if DEBUG: print('[simple HTTP serving:', unquote_plus(self.path), ']') SimpleHTTPRequestHandler.do_GET(self) def log_request(code=0, size=0): # don't log; too noisy pass # allows streaming compilation to work SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm' httpd = HTTPServer(('localhost', port), TestServerHandler) httpd.serve_forever() # test runner will kill us class Reporting(Enum): """When running browser tests we normally automatically include support code for reporting results back to the browser. This enum allows tests to decide what type of support code they need/want. """ NONE = 0 # Include the JS helpers for reporting results JS_ONLY = 1 # Include C/C++ reporting code (REPORT_RESULT mactros) as well as JS helpers FULL = 2 class BrowserCore(RunnerCore): # note how many tests hang / do not send an output. if many of these # happen, likely something is broken and it is best to abort the test # suite early, as otherwise we will wait for the timeout on every # single test (hundreds of minutes) MAX_UNRESPONSIVE_TESTS = 10 unresponsive_tests = 0 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @staticmethod def browser_open(url): if not EMTEST_BROWSER: logger.info('Using default system browser') webbrowser.open_new(url) return browser_args = shlex.split(EMTEST_BROWSER) # If the given browser is a scalar, treat it like one of the possible types # from https://docs.python.org/2/library/webbrowser.html if len(browser_args) == 1: try: # This throws if the type of browser isn't available webbrowser.get(browser_args[0]).open_new(url) logger.info('Using Emscripten browser: %s', browser_args[0]) return except webbrowser.Error: # Ignore the exception and fallback to the custom command logic pass # Else assume the given browser is a specific program with additional # parameters and delegate to that logger.info('Using Emscripten browser: %s', str(browser_args)) subprocess.Popen(browser_args + [url]) @classmethod def setUpClass(cls): super().setUpClass() cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1 cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888')) if not has_browser(): return cls.browser_timeout = 60 cls.harness_in_queue = multiprocessing.Queue() cls.harness_out_queue = multiprocessing.Queue() cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port)) cls.harness_server.start() print('[Browser harness server on process %d]' % cls.harness_server.pid) cls.browser_open('http://localhost:%s/run_harness' % cls.port) @classmethod def tearDownClass(cls): super().tearDownClass() if not has_browser(): return cls.harness_server.terminate() print('[Browser harness server terminated]') if WINDOWS: # On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit: # WindowsError: [Error 32] The process cannot access the file because it is being used by another process. time.sleep(0.1) def assert_out_queue_empty(self, who): if not self.harness_out_queue.empty(): while not self.harness_out_queue.empty(): self.harness_out_queue.get() raise Exception('excessive responses from %s' % who) # @param extra_tries: how many more times to try this test, if it fails. browser tests have # many more causes of flakiness (in particular, they do not run # synchronously, so we have a timeout, which can be hit if the VM # we run on stalls temporarily), so we let each test try more than # once by default def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1): if not has_browser(): return if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS: self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!') self.assert_out_queue_empty('previous test') if DEBUG: print('[browser launch:', html_file, ']') if expectedResult is not None: try: self.harness_in_queue.put(( 'http://localhost:%s/%s' % (self.port, html_file), self.get_dir() )) received_output = False output = '[no http server activity]' start = time.time() if timeout is None: timeout = self.browser_timeout while time.time() - start < timeout: if not self.harness_out_queue.empty(): output = self.harness_out_queue.get() received_output = True break time.sleep(0.1) if not received_output: BrowserCore.unresponsive_tests += 1 print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests) if output is None: # the browser harness reported an error already, and sent a None to tell # us to also fail the test raise Exception('failing test due to browser harness error') if output.startswith('/report_result?skipped:'): self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip()) else: # verify the result, and try again if we should do so output = unquote(output) try: self.assertContained(expectedResult, output) except Exception as e: if extra_tries > 0: print('[test error (see below), automatically retrying]') print(e) return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1) else: raise e finally: time.sleep(0.1) # see comment about Windows above self.assert_out_queue_empty('this test') else: webbrowser.open_new(os.path.abspath(html_file)) print('A web browser window should have opened a page containing the results of a part of this test.') print('You need to manually look at the page to see that it works ok: ' + message) print('(sleeping for a bit to keep the directory alive for the web browser..)') time.sleep(5) print('(moving on..)') # @manually_trigger If set, we do not assume we should run the reftest when main() is done. # Instead, call doReftest() in JS yourself at the right time. def reftest(self, expected, manually_trigger=False): # make sure the pngs used here have no color correction, using e.g. # pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile basename = os.path.basename(expected) shutil.copyfile(expected, os.path.join(self.get_dir(), basename)) reporting = read_file(test_file('browser_reporting.js')) with open('reftest.js', 'w') as out: out.write(''' function doReftest() { if (doReftest.done) return; doReftest.done = true; var img = new Image(); img.onload = function() { assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width); assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height); var canvas = document.createElement('canvas'); canvas.width = img.width; canvas.height = img.height; var ctx = canvas.getContext('2d'); ctx.drawImage(img, 0, 0); var expected = ctx.getImageData(0, 0, img.width, img.height).data; var actualUrl = Module.canvas.toDataURL(); var actualImage = new Image(); actualImage.onload = function() { /* document.body.appendChild(img); // for comparisons var div = document.createElement('div'); div.innerHTML = '^=expected, v=actual'; document.body.appendChild(div); document.body.appendChild(actualImage); // to grab it for creating the test reference */ var actualCanvas = document.createElement('canvas'); actualCanvas.width = actualImage.width; actualCanvas.height = actualImage.height; var actualCtx = actualCanvas.getContext('2d'); actualCtx.drawImage(actualImage, 0, 0); var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data; var total = 0; var width = img.width; var height = img.height; for (var x = 0; x < width; x++) { for (var y = 0; y < height; y++) { total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]); total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]); total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]); } } var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing // If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic. if (typeof reportResultToServer === 'undefined') { (function() { %s reportResultToServer(wrong); })(); } else { reportResultToServer(wrong); } }; actualImage.src = actualUrl; } img.src = '%s'; }; // Automatically trigger the reftest? if (!%s) { // Yes, automatically Module['postRun'] = doReftest; if (typeof WebGLClient !== 'undefined') { // trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread var realRAF = window.requestAnimationFrame; window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) { realRAF(function() { func(); realRAF(doReftest); }); }); // trigger reftest from canvas render too, for workers not doing GL var realWOM = worker.onmessage; worker.onmessage = function(event) { realWOM(event); if (event.data.target === 'canvas' && event.data.op === 'render') { realRAF(doReftest); } }; } } else { // Manually trigger the reftest. // The user will call it. // Add an event loop iteration to ensure rendering, so users don't need to bother. var realDoReftest = doReftest; doReftest = function() { setTimeout(realDoReftest, 1); }; } ''' % (reporting, basename, int(manually_trigger))) def compile_btest(self, args, reporting=Reporting.FULL): # Inject support code for reporting results. This adds an include a header so testcases can # use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which # contains the implementation of REPORT_RESULT (we can't just include that implementation in # the header as there may be multiple files being compiled here). args += ['-s', 'IN_TEST_HARNESS'] if reporting != Reporting.NONE: # For basic reporting we inject JS helper funtions to report result back to server. args += ['-DEMTEST_PORT_NUMBER=%d' % self.port, '--pre-js', test_file('browser_reporting.js')] if reporting == Reporting.FULL: # If C reporting (i.e. REPORT_RESULT macro) is required # also compile in report_result.cpp and forice-include report_result.h args += ['-I' + TEST_ROOT, '-include', test_file('report_result.h'), test_file('report_result.cpp')] self.run_process([EMCC] + self.get_emcc_args() + args) def btest_exit(self, filename, assert_returncode=0, *args, **kwargs): """Special case of btest that reports its result solely via exiting with a give result code. In this case we set EXIT_RUNTIME and we don't need to provide the REPORT_RESULT macro to the C code. """ self.set_setting('EXIT_RUNTIME') kwargs['reporting'] = Reporting.JS_ONLY kwargs['expected'] = 'exit:%d' % assert_returncode return self.btest(filename, *args, **kwargs) def btest(self, filename, expected=None, reference=None, reference_slack=0, manual_reference=False, post_build=None, args=None, message='.', also_proxied=False, url_suffix='', timeout=None, also_asmjs=False, manually_trigger_reftest=False, extra_tries=1, reporting=Reporting.FULL): assert expected or reference, 'a btest must either expect an output, or have a reference image' if args is None: args = [] original_args = args.copy() if not os.path.exists(filename): filename = test_file(filename) if reference: self.reference = reference expected = [str(i) for i in range(0, reference_slack + 1)] self.reftest(test_file(reference), manually_trigger=manually_trigger_reftest) if not manual_reference: args += ['--pre-js', 'reftest.js', '-s', 'GL_TESTING'] outfile = 'test.html' args += [filename, '-o', outfile] # print('all args:', args) try_delete(outfile) self.compile_btest(args, reporting=reporting) self.assertExists(outfile) if post_build: post_build() if not isinstance(expected, list): expected = [expected] self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries) # Tests can opt into being run under asmjs as well if 'WASM=0' not in original_args and (also_asmjs or self.also_asmjs): print('WASM=0') self.btest(filename, expected, reference, reference_slack, manual_reference, post_build, original_args + ['-s', 'WASM=0'], message, also_proxied=False, timeout=timeout) if also_proxied: print('proxied...') if reference: assert not manual_reference manual_reference = True assert not post_build post_build = self.post_manual_reftest # run proxied self.btest(filename, expected, reference, reference_slack, manual_reference, post_build, original_args + ['--proxy-to-worker', '-s', 'GL_TESTING'], message, timeout=timeout) ################################################################################################### def build_library(name, build_dir, output_dir, generated_libs, configure=['sh', './configure'], make=['make'], make_args=[], cache=None, cache_name=None, env_init={}, native=False): """Build a library and cache the result. We build the library file once and cache it for all our tests. (We cache in memory since the test directory is destroyed and recreated for each test. Note that we cache separately for different compilers). This cache is just during the test runner. There is a different concept of caching as well, see |Cache|. """ if type(generated_libs) is not list: generated_libs = [generated_libs] source_dir = test_file(name.replace('_native', '')) project_dir = Path(build_dir, name) if os.path.exists(project_dir): shutil.rmtree(project_dir) # Useful in debugging sometimes to comment this out, and two lines above shutil.copytree(source_dir, project_dir) generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs] if native: env = clang_native.get_clang_native_env() else: env = os.environ.copy() env.update(env_init) if configure: if configure[0] == 'cmake': configure = [EMCMAKE] + configure else: configure = [EMCONFIGURE] + configure try: with open(os.path.join(project_dir, 'configure_out'), 'w') as out: with open(os.path.join(project_dir, 'configure_err'), 'w') as err: stdout = out if EM_BUILD_VERBOSE < 2 else None stderr = err if EM_BUILD_VERBOSE < 1 else None shared.run_process(configure, env=env, stdout=stdout, stderr=stderr, cwd=project_dir) except subprocess.CalledProcessError: print('-- configure stdout --') print(read_file(Path(project_dir, 'configure_out'))) print('-- end configure stdout --') print('-- configure stderr --') print(read_file(Path(project_dir, 'configure_err'))) print('-- end configure stderr --') raise # if we run configure or cmake we don't then need any kind # of special env when we run make below env = None def open_make_out(mode='r'): return open(os.path.join(project_dir, 'make.out'), mode) def open_make_err(mode='r'): return open(os.path.join(project_dir, 'make.err'), mode) if EM_BUILD_VERBOSE >= 3: make_args += ['VERBOSE=1'] try: with open_make_out('w') as make_out: with open_make_err('w') as make_err: stdout = make_out if EM_BUILD_VERBOSE < 2 else None stderr = make_err if EM_BUILD_VERBOSE < 1 else None shared.run_process(make + make_args, stdout=stdout, stderr=stderr, env=env, cwd=project_dir) except subprocess.CalledProcessError: with open_make_out() as f: print('-- make stdout --') print(f.read()) print('-- end make stdout --') with open_make_err() as f: print('-- make stderr --') print(f.read()) print('-- end stderr --') raise if cache is not None: cache[cache_name] = [] for f in generated_libs: basename = os.path.basename(f) cache[cache_name].append((basename, read_binary(f))) return generated_libs
test_writer.py
import os import socket import tempfile import threading import time import mock import msgpack import pytest from six.moves import BaseHTTPServer from six.moves import socketserver from ddtrace.constants import KEEP_SPANS_RATE_KEY from ddtrace.internal.compat import PY3 from ddtrace.internal.compat import get_connection_response from ddtrace.internal.compat import httplib from ddtrace.internal.uds import UDSHTTPConnection from ddtrace.internal.writer import AgentWriter from ddtrace.internal.writer import LogWriter from ddtrace.internal.writer import Response from ddtrace.internal.writer import _human_size from ddtrace.span import Span from tests.utils import AnyInt from tests.utils import BaseTestCase from tests.utils import override_env class DummyOutput: def __init__(self): self.entries = [] def write(self, message): self.entries.append(message) def flush(self): pass class AgentWriterTests(BaseTestCase): N_TRACES = 11 def test_metrics_disabled(self): statsd = mock.Mock() writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False) for i in range(10): writer.write( [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] ) writer.stop() writer.join() statsd.increment.assert_not_called() statsd.distribution.assert_not_called() def test_metrics_bad_endpoint(self): statsd = mock.Mock() writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True) for i in range(10): writer.write( [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] ) writer.stop() writer.join() statsd.distribution.assert_has_calls( [ mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]), mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]), mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]), mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]), mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]), ], any_order=True, ) def test_metrics_trace_too_big(self): statsd = mock.Mock() writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True) for i in range(10): writer.write( [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] ) writer.write( [Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)] ) writer.stop() writer.join() statsd.distribution.assert_has_calls( [ mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]), mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]), mock.call("datadog.tracer.buffer.dropped.traces", 1, tags=["reason:t_too_big"]), mock.call("datadog.tracer.buffer.dropped.bytes", AnyInt(), tags=["reason:t_too_big"]), mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]), mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]), mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]), ], any_order=True, ) def test_metrics_multi(self): statsd = mock.Mock() writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True) for i in range(10): writer.write( [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] ) writer.flush_queue() statsd.distribution.assert_has_calls( [ mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]), mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]), mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]), mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]), mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]), ], any_order=True, ) statsd.reset_mock() for i in range(10): writer.write( [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] ) writer.stop() writer.join() statsd.distribution.assert_has_calls( [ mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]), mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]), mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]), mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]), mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]), ], any_order=True, ) def test_write_sync(self): statsd = mock.Mock() writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True, sync_mode=True) writer.write([Span(tracer=None, name="name", trace_id=1, span_id=j, parent_id=j - 1 or None) for j in range(5)]) statsd.distribution.assert_has_calls( [ mock.call("datadog.tracer.buffer.accepted.traces", 1, tags=[]), mock.call("datadog.tracer.buffer.accepted.spans", 5, tags=[]), mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]), mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]), mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]), ], any_order=True, ) def test_drop_reason_bad_endpoint(self): statsd = mock.Mock() writer_metrics_reset = mock.Mock() writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False) writer._metrics_reset = writer_metrics_reset for i in range(10): writer.write( [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] ) writer.stop() writer.join() writer_metrics_reset.assert_called_once() assert 1 == writer._metrics["http.errors"]["count"] assert 10 == writer._metrics["http.dropped.traces"]["count"] def test_drop_reason_trace_too_big(self): statsd = mock.Mock() writer_metrics_reset = mock.Mock() writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False) writer._metrics_reset = writer_metrics_reset for i in range(10): writer.write( [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] ) writer.write( [Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)] ) writer.stop() writer.join() writer_metrics_reset.assert_called_once() assert 1 == writer._metrics["buffer.dropped.traces"]["count"] assert ["reason:t_too_big"] == writer._metrics["buffer.dropped.traces"]["tags"] def test_drop_reason_buffer_full(self): statsd = mock.Mock() writer_metrics_reset = mock.Mock() writer = AgentWriter(agent_url="http://asdf:1234", buffer_size=5300, dogstatsd=statsd, report_metrics=False) writer._metrics_reset = writer_metrics_reset for i in range(10): writer.write( [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] ) writer.write([Span(tracer=None, name="a", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]) writer.stop() writer.join() writer_metrics_reset.assert_called_once() assert 1 == writer._metrics["buffer.dropped.traces"]["count"] assert ["reason:full"] == writer._metrics["buffer.dropped.traces"]["tags"] def test_drop_reason_encoding_error(self): n_traces = 10 statsd = mock.Mock() writer_encoder = mock.Mock() writer_encoder.__len__ = (lambda *args: n_traces).__get__(writer_encoder) writer_metrics_reset = mock.Mock() writer_encoder.encode.side_effect = Exception writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False) writer._encoder = writer_encoder writer._metrics_reset = writer_metrics_reset for i in range(n_traces): writer.write( [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] ) writer.stop() writer.join() writer_metrics_reset.assert_called_once() assert 10 == writer._metrics["encoder.dropped.traces"]["count"] def test_keep_rate(self): statsd = mock.Mock() writer_run_periodic = mock.Mock() writer_put = mock.Mock() writer_put.return_value = Response(status=200) writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False) writer.run_periodic = writer_run_periodic writer._put = writer_put traces = [ [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] for i in range(4) ] traces_too_big = [ [Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)] for i in range(4) ] # 1. We write 4 traces successfully. for trace in traces: writer.write(trace) writer.flush_queue() payload = msgpack.unpackb(writer_put.call_args.args[0]) # No previous drops. assert 0.0 == writer._drop_sma.get() # 4 traces written. assert 4 == len(payload) # 100% of traces kept (refers to the past). # No traces sent before now so 100% kept. for trace in payload: assert 1.0 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1) # 2. We fail to write 4 traces because of size limitation. for trace in traces_too_big: writer.write(trace) writer.flush_queue() # 50% of traces were dropped historically. # 4 successfully written before and 4 dropped now. assert 0.5 == writer._drop_sma.get() # put not called since no new traces are available. writer_put.assert_called_once() # 3. We write 2 traces successfully. for trace in traces[:2]: writer.write(trace) writer.flush_queue() payload = msgpack.unpackb(writer_put.call_args.args[0]) # 40% of traces were dropped historically. assert 0.4 == writer._drop_sma.get() # 2 traces written. assert 2 == len(payload) # 50% of traces kept (refers to the past). # We had 4 successfully written and 4 dropped. for trace in payload: assert 0.5 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1) # 4. We write 1 trace successfully and fail to write 3. writer.write(traces[0]) for trace in traces_too_big[:3]: writer.write(trace) writer.flush_queue() payload = msgpack.unpackb(writer_put.call_args.args[0]) # 50% of traces were dropped historically. assert 0.5 == writer._drop_sma.get() # 1 trace written. assert 1 == len(payload) # 60% of traces kept (refers to the past). # We had 4 successfully written, then 4 dropped, then 2 written. for trace in payload: assert 0.6 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1) class LogWriterTests(BaseTestCase): N_TRACES = 11 def create_writer(self): self.output = DummyOutput() writer = LogWriter(out=self.output) for i in range(self.N_TRACES): writer.write( [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(7)] ) return writer def test_log_writer(self): self.create_writer() self.assertEqual(len(self.output.entries), self.N_TRACES) def test_humansize(): assert _human_size(0) == "0B" assert _human_size(999) == "999B" assert _human_size(1000) == "1KB" assert _human_size(10000) == "10KB" assert _human_size(100000) == "100KB" assert _human_size(1000000) == "1MB" assert _human_size(10000000) == "10MB" assert _human_size(1000000000) == "1GB" class _BaseHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): error_message_format = "%(message)s\n" error_content_type = "text/plain" @staticmethod def log_message(format, *args): # noqa: A002 pass class _APIEndpointRequestHandlerTest(_BaseHTTPRequestHandler): expected_path_prefix = None def do_PUT(self): if self.expected_path_prefix is not None: assert self.path.startswith(self.expected_path_prefix) self.send_error(200, "OK") class _TimeoutAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler): def do_PUT(self): # This server sleeps longer than our timeout time.sleep(5) class _ResetAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler): def do_PUT(self): return _HOST = "0.0.0.0" _PORT = 8743 _TIMEOUT_PORT = _PORT + 1 _RESET_PORT = _TIMEOUT_PORT + 1 class UDSHTTPServer(socketserver.UnixStreamServer, BaseHTTPServer.HTTPServer): def server_bind(self): BaseHTTPServer.HTTPServer.server_bind(self) def _make_uds_server(path, request_handler): server = UDSHTTPServer(path, request_handler) t = threading.Thread(target=server.serve_forever) # Set daemon just in case something fails t.daemon = True t.start() # Wait for the server to start resp = None while resp != 200: conn = UDSHTTPConnection(server.server_address, _HOST, 2019) try: conn.request("PUT", "/") resp = get_connection_response(conn).status finally: conn.close() time.sleep(0.01) return server, t @pytest.fixture def endpoint_uds_server(): socket_name = tempfile.mktemp() handler = _APIEndpointRequestHandlerTest server, thread = _make_uds_server(socket_name, handler) handler.expected_path_prefix = "/v0." try: yield server finally: handler.expected_path_prefix = None server.shutdown() thread.join() os.unlink(socket_name) def _make_server(port, request_handler): server = BaseHTTPServer.HTTPServer((_HOST, port), request_handler) t = threading.Thread(target=server.serve_forever) # Set daemon just in case something fails t.daemon = True t.start() return server, t @pytest.fixture(scope="module") def endpoint_test_timeout_server(): server, thread = _make_server(_TIMEOUT_PORT, _TimeoutAPIEndpointRequestHandlerTest) try: yield thread finally: server.shutdown() thread.join() @pytest.fixture(scope="module") def endpoint_test_reset_server(): server, thread = _make_server(_RESET_PORT, _ResetAPIEndpointRequestHandlerTest) try: yield thread finally: server.shutdown() thread.join() @pytest.fixture def endpoint_assert_path(): handler = _APIEndpointRequestHandlerTest server, thread = _make_server(_PORT, handler) def configure(expected_path_prefix=None): handler.expected_path_prefix = expected_path_prefix return thread try: yield configure finally: handler.expected_path_prefix = None server.shutdown() thread.join() def test_agent_url_path(endpoint_assert_path): # test without base path endpoint_assert_path("/v0.") writer = AgentWriter(agent_url="http://%s:%s/" % (_HOST, _PORT)) writer._encoder.put([Span(None, "foobar")]) writer.flush_queue(raise_exc=True) # test without base path nor trailing slash writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _PORT)) writer._encoder.put([Span(None, "foobar")]) writer.flush_queue(raise_exc=True) # test with a base path endpoint_assert_path("/test/v0.") writer = AgentWriter(agent_url="http://%s:%s/test/" % (_HOST, _PORT)) writer._encoder.put([Span(None, "foobar")]) writer.flush_queue(raise_exc=True) def test_flush_connection_timeout_connect(): writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, 2019)) if PY3: exc_type = OSError else: exc_type = socket.error with pytest.raises(exc_type): writer._encoder.put([Span(None, "foobar")]) writer.flush_queue(raise_exc=True) def test_flush_connection_timeout(endpoint_test_timeout_server): writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _TIMEOUT_PORT)) with pytest.raises(socket.timeout): writer._encoder.put([Span(None, "foobar")]) writer.flush_queue(raise_exc=True) def test_flush_connection_reset(endpoint_test_reset_server): writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _RESET_PORT)) if PY3: exc_types = (httplib.BadStatusLine, ConnectionResetError) else: exc_types = (httplib.BadStatusLine,) with pytest.raises(exc_types): writer._encoder.put([Span(None, "foobar")]) writer.flush_queue(raise_exc=True) def test_flush_connection_uds(endpoint_uds_server): writer = AgentWriter(agent_url="unix://%s" % endpoint_uds_server.server_address) writer._encoder.put([Span(None, "foobar")]) writer.flush_queue(raise_exc=True) def test_flush_queue_raise(): writer = AgentWriter(agent_url="http://dne:1234") # Should not raise writer.write([]) writer.flush_queue(raise_exc=False) error = OSError if PY3 else IOError with pytest.raises(error): writer.write([]) writer.flush_queue(raise_exc=True) def test_racing_start(): writer = AgentWriter(agent_url="http://dne:1234") def do_write(i): writer.write([Span(None, str(i))]) ts = [threading.Thread(target=do_write, args=(i,)) for i in range(100)] for t in ts: t.start() for t in ts: t.join() assert len(writer._encoder) == 100 def test_additional_headers(): with override_env(dict(_DD_TRACE_WRITER_ADDITIONAL_HEADERS="additional-header:additional-value,header2:value2")): writer = AgentWriter(agent_url="http://localhost:9126") assert writer._headers["additional-header"] == "additional-value" assert writer._headers["header2"] == "value2"
pydoc.py
#!/usr/bin/env python # -*- coding: latin-1 -*- """Generate Python documentation in HTML or text for interactive use. In the Python interpreter, do "from pydoc import help" to provide online help. Calling help(thing) on a Python object documents the object. Or, at the shell command line outside of Python: Run "pydoc <name>" to show documentation on something. <name> may be the name of a function, module, package, or a dotted reference to a class or function within a module or module in a package. If the argument contains a path segment delimiter (e.g. slash on Unix, backslash on Windows) it is treated as the path to a Python source file. Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines of all available modules. Run "pydoc -p <port>" to start an HTTP server on a given port on the local machine to generate documentation web pages. For platforms without a command line, "pydoc -g" starts the HTTP server and also pops up a little window for controlling it. Run "pydoc -w <name>" to write out the HTML documentation for a module to a file named "<name>.html". Module docs for core modules are assumed to be in http://docs.python.org/X.Y/library/ This can be overridden by setting the PYTHONDOCS environment variable to a different URL or to a local directory containing the Library Reference Manual pages. """ __all__ = ['help'] __author__ = "Ka-Ping Yee <ping@lfw.org>" __date__ = "26 February 2001" __version__ = "$Revision$" __credits__ = """Guido van Rossum, for an excellent programming language. Tommy Burnette, the original creator of manpy. Paul Prescod, for all his work on onlinehelp. Richard Chamberlain, for the first implementation of textdoc. """ # Known bugs that can't be fixed here: # - imp.load_module() cannot be prevented from clobbering existing # loaded modules, so calling synopsis() on a binary module file # changes the contents of any existing module with the same name. # - If the __file__ attribute on a module is a relative path and # the current directory is changed with os.chdir(), an incorrect # path will be displayed. import sys, imp, os, re, inspect, builtins, pkgutil from reprlib import Repr from traceback import extract_tb as _extract_tb from collections import deque # --------------------------------------------------------- common routines def pathdirs(): """Convert sys.path into a list of absolute, existing, unique paths.""" dirs = [] normdirs = [] for dir in sys.path: dir = os.path.abspath(dir or '.') normdir = os.path.normcase(dir) if normdir not in normdirs and os.path.isdir(dir): dirs.append(dir) normdirs.append(normdir) return dirs def getdoc(object): """Get the doc string or comments for an object.""" result = inspect.getdoc(object) or inspect.getcomments(object) return result and re.sub('^ *\n', '', result.rstrip()) or '' def splitdoc(doc): """Split a doc string into a synopsis line (if any) and the rest.""" lines = doc.strip().split('\n') if len(lines) == 1: return lines[0], '' elif len(lines) >= 2 and not lines[1].rstrip(): return lines[0], '\n'.join(lines[2:]) return '', '\n'.join(lines) def classname(object, modname): """Get a class name and qualify it with a module name if necessary.""" name = object.__name__ if object.__module__ != modname: name = object.__module__ + '.' + name return name def isdata(object): """Check if an object is of a type that probably means it's data.""" return not (inspect.ismodule(object) or inspect.isclass(object) or inspect.isroutine(object) or inspect.isframe(object) or inspect.istraceback(object) or inspect.iscode(object)) def replace(text, *pairs): """Do a series of global replacements on a string.""" while pairs: text = pairs[1].join(text.split(pairs[0])) pairs = pairs[2:] return text def cram(text, maxlen): """Omit part of a string if needed to make it fit in a maximum length.""" if len(text) > maxlen: pre = max(0, (maxlen-3)//2) post = max(0, maxlen-3-pre) return text[:pre] + '...' + text[len(text)-post:] return text _re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE) def stripid(text): """Remove the hexadecimal id from a Python object representation.""" # The behaviour of %p is implementation-dependent in terms of case. return _re_stripid.sub(r'\1', text) def _is_some_method(obj): return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj) def allmethods(cl): methods = {} for key, value in inspect.getmembers(cl, _is_some_method): methods[key] = 1 for base in cl.__bases__: methods.update(allmethods(base)) # all your base are belong to us for key in methods.keys(): methods[key] = getattr(cl, key) return methods def _split_list(s, predicate): """Split sequence s via predicate, and return pair ([true], [false]). The return value is a 2-tuple of lists, ([x for x in s if predicate(x)], [x for x in s if not predicate(x)]) """ yes = [] no = [] for x in s: if predicate(x): yes.append(x) else: no.append(x) return yes, no def visiblename(name, all=None): """Decide whether to show documentation on a variable.""" # Certain special names are redundant. _hidden_names = ('__builtins__', '__doc__', '__file__', '__path__', '__module__', '__name__', '__slots__', '__package__', '__author__', '__credits__', '__date__', '__version__') if name in _hidden_names: return 0 # Private names are hidden, but special names are displayed. if name.startswith('__') and name.endswith('__'): return 1 if all is not None: # only document that which the programmer exported in __all__ return name in all else: return not name.startswith('_') def classify_class_attrs(object): """Wrap inspect.classify_class_attrs, with fixup for data descriptors.""" results = [] for (name, kind, cls, value) in inspect.classify_class_attrs(object): if inspect.isdatadescriptor(value): kind = 'data descriptor' results.append((name, kind, cls, value)) return results # ----------------------------------------------------- module manipulation def ispackage(path): """Guess whether a path refers to a package directory.""" if os.path.isdir(path): for ext in ('.py', '.pyc', '.pyo'): if os.path.isfile(os.path.join(path, '__init__' + ext)): return True return False def source_synopsis(file): line = file.readline() while line[:1] == '#' or not line.strip(): line = file.readline() if not line: break line = line.strip() if line[:4] == 'r"""': line = line[1:] if line[:3] == '"""': line = line[3:] if line[-1:] == '\\': line = line[:-1] while not line.strip(): line = file.readline() if not line: break result = line.split('"""')[0].strip() else: result = None return result def synopsis(filename, cache={}): """Get the one-line summary out of a module file.""" mtime = os.stat(filename).st_mtime lastupdate, result = cache.get(filename, (0, None)) if lastupdate < mtime: info = inspect.getmoduleinfo(filename) try: file = open(filename) except IOError: # module can't be opened, so skip it return None if info and 'b' in info[2]: # binary modules have to be imported try: module = imp.load_module('__temp__', file, filename, info[1:]) except: return None result = (module.__doc__ or '').splitlines()[0] del sys.modules['__temp__'] else: # text modules can be directly examined result = source_synopsis(file) file.close() cache[filename] = (mtime, result) return result class ErrorDuringImport(Exception): """Errors that occurred while trying to import something to document it.""" def __init__(self, filename, exc_info): self.filename = filename self.exc, self.value, self.tb = exc_info def __str__(self): exc = self.exc.__name__ return 'problem in %s - %s: %s' % (self.filename, exc, self.value) def importfile(path): """Import a Python source file or compiled file given its path.""" magic = imp.get_magic() file = open(path, 'r') if file.read(len(magic)) == magic: kind = imp.PY_COMPILED else: kind = imp.PY_SOURCE file.close() filename = os.path.basename(path) name, ext = os.path.splitext(filename) file = open(path, 'r') try: module = imp.load_module(name, file, path, (ext, 'r', kind)) except: raise ErrorDuringImport(path, sys.exc_info()) file.close() return module def safeimport(path, forceload=0, cache={}): """Import a module; handle errors; return None if the module isn't found. If the module *is* found but an exception occurs, it's wrapped in an ErrorDuringImport exception and reraised. Unlike __import__, if a package path is specified, the module at the end of the path is returned, not the package at the beginning. If the optional 'forceload' argument is 1, we reload the module from disk (unless it's a dynamic extension).""" try: # If forceload is 1 and the module has been previously loaded from # disk, we always have to reload the module. Checking the file's # mtime isn't good enough (e.g. the module could contain a class # that inherits from another module that has changed). if forceload and path in sys.modules: if path not in sys.builtin_module_names: # Remove the module from sys.modules and re-import to try # and avoid problems with partially loaded modules. # Also remove any submodules because they won't appear # in the newly loaded module's namespace if they're already # in sys.modules. subs = [m for m in sys.modules if m.startswith(path + '.')] for key in [path] + subs: # Prevent garbage collection. cache[key] = sys.modules[key] del sys.modules[key] module = __import__(path) except: # Did the error occur before or after the module was found? (exc, value, tb) = info = sys.exc_info() if path in sys.modules: # An error occurred while executing the imported module. raise ErrorDuringImport(sys.modules[path].__file__, info) elif exc is SyntaxError: # A SyntaxError occurred before we could execute the module. raise ErrorDuringImport(value.filename, info) elif exc is ImportError and _extract_tb(tb)[-1][2]=='safeimport': # The import error occurred directly in this function, # which means there is no such module in the path. return None else: # Some other error occurred during the importing process. raise ErrorDuringImport(path, sys.exc_info()) for part in path.split('.')[1:]: try: module = getattr(module, part) except AttributeError: return None return module # ---------------------------------------------------- formatter base class class Doc: PYTHONDOCS = os.environ.get("PYTHONDOCS", "http://docs.python.org/%d.%d/library" % sys.version_info[:2]) def document(self, object, name=None, *args): """Generate documentation for an object.""" args = (object, name) + args # 'try' clause is to attempt to handle the possibility that inspect # identifies something in a way that pydoc itself has issues handling; # think 'super' and how it is a descriptor (which raises the exception # by lacking a __name__ attribute) and an instance. if inspect.isgetsetdescriptor(object): return self.docdata(*args) if inspect.ismemberdescriptor(object): return self.docdata(*args) try: if inspect.ismodule(object): return self.docmodule(*args) if inspect.isclass(object): return self.docclass(*args) if inspect.isroutine(object): return self.docroutine(*args) except AttributeError: pass if isinstance(object, property): return self.docproperty(*args) return self.docother(*args) def fail(self, object, name=None, *args): """Raise an exception for unimplemented types.""" message = "don't know how to document object%s of type %s" % ( name and ' ' + repr(name), type(object).__name__) raise TypeError(message) docmodule = docclass = docroutine = docother = docproperty = docdata = fail def getdocloc(self, object): """Return the location of module docs or None""" try: file = inspect.getabsfile(object) except TypeError: file = '(built-in)' docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS) basedir = os.path.join(sys.exec_prefix, "lib", "python%d.%d" % sys.version_info[:2]) if (isinstance(object, type(os)) and (object.__name__ in ('errno', 'exceptions', 'gc', 'imp', 'marshal', 'posix', 'signal', 'sys', '_thread', 'zipimport') or (file.startswith(basedir) and not file.startswith(os.path.join(basedir, 'site-packages')))) and object.__name__ not in ('xml.etree', 'test.pydoc_mod')): if docloc.startswith("http://"): docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__) else: docloc = os.path.join(docloc, object.__name__ + ".html") else: docloc = None return docloc # -------------------------------------------- HTML documentation generator class HTMLRepr(Repr): """Class for safely making an HTML representation of a Python object.""" def __init__(self): Repr.__init__(self) self.maxlist = self.maxtuple = 20 self.maxdict = 10 self.maxstring = self.maxother = 100 def escape(self, text): return replace(text, '&', '&amp;', '<', '&lt;', '>', '&gt;') def repr(self, object): return Repr.repr(self, object) def repr1(self, x, level): if hasattr(type(x), '__name__'): methodname = 'repr_' + '_'.join(type(x).__name__.split()) if hasattr(self, methodname): return getattr(self, methodname)(x, level) return self.escape(cram(stripid(repr(x)), self.maxother)) def repr_string(self, x, level): test = cram(x, self.maxstring) testrepr = repr(test) if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): # Backslashes are only literal in the string and are never # needed to make any special characters, so show a raw string. return 'r' + testrepr[0] + self.escape(test) + testrepr[0] return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)', r'<font color="#c040c0">\1</font>', self.escape(testrepr)) repr_str = repr_string def repr_instance(self, x, level): try: return self.escape(cram(stripid(repr(x)), self.maxstring)) except: return self.escape('<%s instance>' % x.__class__.__name__) repr_unicode = repr_string class HTMLDoc(Doc): """Formatter class for HTML documentation.""" # ------------------------------------------- HTML formatting utilities _repr_instance = HTMLRepr() repr = _repr_instance.repr escape = _repr_instance.escape def page(self, title, contents): """Format an HTML page.""" return '''\ <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html><head><title>Python: %s</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> </head><body bgcolor="#f0f0f8"> %s </body></html>''' % (title, contents) def heading(self, title, fgcol, bgcol, extras=''): """Format a page heading.""" return ''' <table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading"> <tr bgcolor="%s"> <td valign=bottom>&nbsp;<br> <font color="%s" face="helvetica, arial">&nbsp;<br>%s</font></td ><td align=right valign=bottom ><font color="%s" face="helvetica, arial">%s</font></td></tr></table> ''' % (bgcol, fgcol, title, fgcol, extras or '&nbsp;') def section(self, title, fgcol, bgcol, contents, width=6, prelude='', marginalia=None, gap='&nbsp;'): """Format a section with a heading.""" if marginalia is None: marginalia = '<tt>' + '&nbsp;' * width + '</tt>' result = '''<p> <table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section"> <tr bgcolor="%s"> <td colspan=3 valign=bottom>&nbsp;<br> <font color="%s" face="helvetica, arial">%s</font></td></tr> ''' % (bgcol, fgcol, title) if prelude: result = result + ''' <tr bgcolor="%s"><td rowspan=2>%s</td> <td colspan=2>%s</td></tr> <tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap) else: result = result + ''' <tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap) return result + '\n<td width="100%%">%s</td></tr></table>' % contents def bigsection(self, title, *args): """Format a section with a big heading.""" title = '<big><strong>%s</strong></big>' % title return self.section(title, *args) def preformat(self, text): """Format literal preformatted text.""" text = self.escape(text.expandtabs()) return replace(text, '\n\n', '\n \n', '\n\n', '\n \n', ' ', '&nbsp;', '\n', '<br>\n') def multicolumn(self, list, format, cols=4): """Format a list of items into a multi-column list.""" result = '' rows = (len(list)+cols-1)//cols for col in range(cols): result = result + '<td width="%d%%" valign=top>' % (100//cols) for i in range(rows*col, rows*col+rows): if i < len(list): result = result + format(list[i]) + '<br>\n' result = result + '</td>' return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result def grey(self, text): return '<font color="#909090">%s</font>' % text def namelink(self, name, *dicts): """Make a link for an identifier, given name-to-URL mappings.""" for dict in dicts: if name in dict: return '<a href="%s">%s</a>' % (dict[name], name) return name def classlink(self, object, modname): """Make a link for a class.""" name, module = object.__name__, sys.modules.get(object.__module__) if hasattr(module, name) and getattr(module, name) is object: return '<a href="%s.html#%s">%s</a>' % ( module.__name__, name, classname(object, modname)) return classname(object, modname) def modulelink(self, object): """Make a link for a module.""" return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__) def modpkglink(self, modpkginfo): """Make a link for a module or package to display in an index.""" name, path, ispackage, shadowed = modpkginfo if shadowed: return self.grey(name) if path: url = '%s.%s.html' % (path, name) else: url = '%s.html' % name if ispackage: text = '<strong>%s</strong>&nbsp;(package)' % name else: text = name return '<a href="%s">%s</a>' % (url, text) def markup(self, text, escape=None, funcs={}, classes={}, methods={}): """Mark up some plain text, given a context of symbols to look for. Each context dictionary maps object names to anchor names.""" escape = escape or self.escape results = [] here = 0 pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|' r'RFC[- ]?(\d+)|' r'PEP[- ]?(\d+)|' r'(self\.)?(\w+))') while True: match = pattern.search(text, here) if not match: break start, end = match.span() results.append(escape(text[here:start])) all, scheme, rfc, pep, selfdot, name = match.groups() if scheme: url = escape(all).replace('"', '&quot;') results.append('<a href="%s">%s</a>' % (url, url)) elif rfc: url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) results.append('<a href="%s">%s</a>' % (url, escape(all))) elif pep: url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep) results.append('<a href="%s">%s</a>' % (url, escape(all))) elif text[end:end+1] == '(': results.append(self.namelink(name, methods, funcs, classes)) elif selfdot: results.append('self.<strong>%s</strong>' % name) else: results.append(self.namelink(name, classes)) here = end results.append(escape(text[here:])) return ''.join(results) # ---------------------------------------------- type-specific routines def formattree(self, tree, modname, parent=None): """Produce HTML for a class tree as given by inspect.getclasstree().""" result = '' for entry in tree: if type(entry) is type(()): c, bases = entry result = result + '<dt><font face="helvetica, arial">' result = result + self.classlink(c, modname) if bases and bases != (parent,): parents = [] for base in bases: parents.append(self.classlink(base, modname)) result = result + '(' + ', '.join(parents) + ')' result = result + '\n</font></dt>' elif type(entry) is type([]): result = result + '<dd>\n%s</dd>\n' % self.formattree( entry, modname, c) return '<dl>\n%s</dl>\n' % result def docmodule(self, object, name=None, mod=None, *ignored): """Produce HTML documentation for a module object.""" name = object.__name__ # ignore the passed-in name try: all = object.__all__ except AttributeError: all = None parts = name.split('.') links = [] for i in range(len(parts)-1): links.append( '<a href="%s.html"><font color="#ffffff">%s</font></a>' % ('.'.join(parts[:i+1]), parts[i])) linkedname = '.'.join(links + parts[-1:]) head = '<big><big><strong>%s</strong></big></big>' % linkedname try: path = inspect.getabsfile(object) url = path if sys.platform == 'win32': import nturl2path url = nturl2path.pathname2url(path) filelink = '<a href="file:%s">%s</a>' % (url, path) except TypeError: filelink = '(built-in)' info = [] if hasattr(object, '__version__'): version = str(object.__version__) if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': version = version[11:-1].strip() info.append('version %s' % self.escape(version)) if hasattr(object, '__date__'): info.append(self.escape(str(object.__date__))) if info: head = head + ' (%s)' % ', '.join(info) docloc = self.getdocloc(object) if docloc is not None: docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals() else: docloc = '' result = self.heading( head, '#ffffff', '#7799ee', '<a href=".">index</a><br>' + filelink + docloc) modules = inspect.getmembers(object, inspect.ismodule) classes, cdict = [], {} for key, value in inspect.getmembers(object, inspect.isclass): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or (inspect.getmodule(value) or object) is object): if visiblename(key, all): classes.append((key, value)) cdict[key] = cdict[value] = '#' + key for key, value in classes: for base in value.__bases__: key, modname = base.__name__, base.__module__ module = sys.modules.get(modname) if modname != name and module and hasattr(module, key): if getattr(module, key) is base: if not key in cdict: cdict[key] = cdict[base] = modname + '.html#' + key funcs, fdict = [], {} for key, value in inspect.getmembers(object, inspect.isroutine): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or inspect.isbuiltin(value) or inspect.getmodule(value) is object): if visiblename(key, all): funcs.append((key, value)) fdict[key] = '#-' + key if inspect.isfunction(value): fdict[value] = fdict[key] data = [] for key, value in inspect.getmembers(object, isdata): if visiblename(key, all): data.append((key, value)) doc = self.markup(getdoc(object), self.preformat, fdict, cdict) doc = doc and '<tt>%s</tt>' % doc result = result + '<p>%s</p>\n' % doc if hasattr(object, '__path__'): modpkgs = [] for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): modpkgs.append((modname, name, ispkg, 0)) modpkgs.sort() contents = self.multicolumn(modpkgs, self.modpkglink) result = result + self.bigsection( 'Package Contents', '#ffffff', '#aa55cc', contents) elif modules: contents = self.multicolumn( modules, lambda t: self.modulelink(t[1])) result = result + self.bigsection( 'Modules', '#ffffff', '#aa55cc', contents) if classes: classlist = [value for (key, value) in classes] contents = [ self.formattree(inspect.getclasstree(classlist, 1), name)] for key, value in classes: contents.append(self.document(value, key, name, fdict, cdict)) result = result + self.bigsection( 'Classes', '#ffffff', '#ee77aa', ' '.join(contents)) if funcs: contents = [] for key, value in funcs: contents.append(self.document(value, key, name, fdict, cdict)) result = result + self.bigsection( 'Functions', '#ffffff', '#eeaa77', ' '.join(contents)) if data: contents = [] for key, value in data: contents.append(self.document(value, key)) result = result + self.bigsection( 'Data', '#ffffff', '#55aa55', '<br>\n'.join(contents)) if hasattr(object, '__author__'): contents = self.markup(str(object.__author__), self.preformat) result = result + self.bigsection( 'Author', '#ffffff', '#7799ee', contents) if hasattr(object, '__credits__'): contents = self.markup(str(object.__credits__), self.preformat) result = result + self.bigsection( 'Credits', '#ffffff', '#7799ee', contents) return result def docclass(self, object, name=None, mod=None, funcs={}, classes={}, *ignored): """Produce HTML documentation for a class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ contents = [] push = contents.append # Cute little class to pump out a horizontal rule between sections. class HorizontalRule: def __init__(self): self.needone = 0 def maybe(self): if self.needone: push('<hr>\n') self.needone = 1 hr = HorizontalRule() # List the mro, if non-trivial. mro = deque(inspect.getmro(object)) if len(mro) > 2: hr.maybe() push('<dl><dt>Method resolution order:</dt>\n') for base in mro: push('<dd>%s</dd>\n' % self.classlink(base, object.__module__)) push('</dl>\n') def spill(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self.document(getattr(object, name), name, mod, funcs, classes, mdict, object)) push('\n') return attrs def spilldescriptors(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self._docdescriptor(name, value, mod)) return attrs def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: base = self.docother(getattr(object, name), name, mod) if hasattr(value, '__call__') or inspect.isdatadescriptor(value): doc = getattr(value, "__doc__", None) else: doc = None if doc is None: push('<dl><dt>%s</dl>\n' % base) else: doc = self.markup(getdoc(value), self.preformat, funcs, classes, mdict) doc = '<dd><tt>%s</tt>' % doc push('<dl><dt>%s%s</dl>\n' % (base, doc)) push('\n') return attrs attrs = [(name, kind, cls, value) for name, kind, cls, value in classify_class_attrs(object) if visiblename(name)] mdict = {} for key, kind, homecls, value in attrs: mdict[key] = anchor = '#' + name + '-' + key value = getattr(object, key) try: # The value may not be hashable (e.g., a data attr with # a dict or list value). mdict[value] = anchor except TypeError: pass while attrs: if mro: thisclass = mro.popleft() else: thisclass = attrs[0][2] attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) if thisclass is builtins.object: attrs = inherited continue elif thisclass is object: tag = 'defined here' else: tag = 'inherited from %s' % self.classlink(thisclass, object.__module__) tag += ':<br>\n' # Sort attrs by name. attrs.sort(key=lambda t: t[0]) # Pump out the attrs, segregated by kind. attrs = spill('Methods %s' % tag, attrs, lambda t: t[1] == 'method') attrs = spill('Class methods %s' % tag, attrs, lambda t: t[1] == 'class method') attrs = spill('Static methods %s' % tag, attrs, lambda t: t[1] == 'static method') attrs = spilldescriptors('Data descriptors %s' % tag, attrs, lambda t: t[1] == 'data descriptor') attrs = spilldata('Data and other attributes %s' % tag, attrs, lambda t: t[1] == 'data') assert attrs == [] attrs = inherited contents = ''.join(contents) if name == realname: title = '<a name="%s">class <strong>%s</strong></a>' % ( name, realname) else: title = '<strong>%s</strong> = <a name="%s">class %s</a>' % ( name, name, realname) if bases: parents = [] for base in bases: parents.append(self.classlink(base, object.__module__)) title = title + '(%s)' % ', '.join(parents) doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict) doc = doc and '<tt>%s<br>&nbsp;</tt>' % doc return self.section(title, '#000000', '#ffc8d8', contents, 3, doc) def formatvalue(self, object): """Format an argument default value as text.""" return self.grey('=' + self.repr(object)) def docroutine(self, object, name=None, mod=None, funcs={}, classes={}, methods={}, cl=None): """Produce HTML documentation for a function or method object.""" realname = object.__name__ name = name or realname anchor = (cl and cl.__name__ or '') + '-' + name note = '' skipdocs = 0 if inspect.ismethod(object): imclass = object.__self__.__class__ if cl: if imclass is not cl: note = ' from ' + self.classlink(imclass, mod) else: if object.__self__ is not None: note = ' method of %s instance' % self.classlink( object.__self__.__class__, mod) else: note = ' unbound %s method' % self.classlink(imclass,mod) object = object.__func__ if name == realname: title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname) else: if (cl and realname in cl.__dict__ and cl.__dict__[realname] is object): reallink = '<a href="#%s">%s</a>' % ( cl.__name__ + '-' + realname, realname) skipdocs = 1 else: reallink = realname title = '<a name="%s"><strong>%s</strong></a> = %s' % ( anchor, name, reallink) if inspect.isfunction(object): args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann = \ inspect.getfullargspec(object) argspec = inspect.formatargspec( args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann, formatvalue=self.formatvalue, formatannotation=inspect.formatannotationrelativeto(object)) if realname == '<lambda>': title = '<strong>%s</strong> <em>lambda</em> ' % name # XXX lambda's won't usually have func_annotations['return'] # since the syntax doesn't support but it is possible. # So removing parentheses isn't truly safe. argspec = argspec[1:-1] # remove parentheses else: argspec = '(...)' decl = title + argspec + (note and self.grey( '<font face="helvetica, arial">%s</font>' % note)) if skipdocs: return '<dl><dt>%s</dt></dl>\n' % decl else: doc = self.markup( getdoc(object), self.preformat, funcs, classes, methods) doc = doc and '<dd><tt>%s</tt></dd>' % doc return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc) def _docdescriptor(self, name, value, mod): results = [] push = results.append if name: push('<dl><dt><strong>%s</strong></dt>\n' % name) if value.__doc__ is not None: doc = self.markup(getdoc(value), self.preformat) push('<dd><tt>%s</tt></dd>\n' % doc) push('</dl>\n') return ''.join(results) def docproperty(self, object, name=None, mod=None, cl=None): """Produce html documentation for a property.""" return self._docdescriptor(name, object, mod) def docother(self, object, name=None, mod=None, *ignored): """Produce HTML documentation for a data object.""" lhs = name and '<strong>%s</strong> = ' % name or '' return lhs + self.repr(object) def docdata(self, object, name=None, mod=None, cl=None): """Produce html documentation for a data descriptor.""" return self._docdescriptor(name, object, mod) def index(self, dir, shadowed=None): """Generate an HTML index for a directory of modules.""" modpkgs = [] if shadowed is None: shadowed = {} for importer, name, ispkg in pkgutil.iter_modules([dir]): modpkgs.append((name, '', ispkg, name in shadowed)) shadowed[name] = 1 modpkgs.sort() contents = self.multicolumn(modpkgs, self.modpkglink) return self.bigsection(dir, '#ffffff', '#ee77aa', contents) # -------------------------------------------- text documentation generator class TextRepr(Repr): """Class for safely making a text representation of a Python object.""" def __init__(self): Repr.__init__(self) self.maxlist = self.maxtuple = 20 self.maxdict = 10 self.maxstring = self.maxother = 100 def repr1(self, x, level): if hasattr(type(x), '__name__'): methodname = 'repr_' + '_'.join(type(x).__name__.split()) if hasattr(self, methodname): return getattr(self, methodname)(x, level) return cram(stripid(repr(x)), self.maxother) def repr_string(self, x, level): test = cram(x, self.maxstring) testrepr = repr(test) if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): # Backslashes are only literal in the string and are never # needed to make any special characters, so show a raw string. return 'r' + testrepr[0] + test + testrepr[0] return testrepr repr_str = repr_string def repr_instance(self, x, level): try: return cram(stripid(repr(x)), self.maxstring) except: return '<%s instance>' % x.__class__.__name__ class TextDoc(Doc): """Formatter class for text documentation.""" # ------------------------------------------- text formatting utilities _repr_instance = TextRepr() repr = _repr_instance.repr def bold(self, text): """Format a string in bold by overstriking.""" return ''.join(map(lambda ch: ch + '\b' + ch, text)) def indent(self, text, prefix=' '): """Indent text by prepending a given prefix to each line.""" if not text: return '' lines = [prefix + line for line in text.split('\n')] if lines: lines[-1] = lines[-1].rstrip() return '\n'.join(lines) def section(self, title, contents): """Format a section with a given heading.""" clean_contents = self.indent(contents).rstrip() return self.bold(title) + '\n' + clean_contents + '\n\n' # ---------------------------------------------- type-specific routines def formattree(self, tree, modname, parent=None, prefix=''): """Render in text a class tree as returned by inspect.getclasstree().""" result = '' for entry in tree: if type(entry) is type(()): c, bases = entry result = result + prefix + classname(c, modname) if bases and bases != (parent,): parents = map(lambda c, m=modname: classname(c, m), bases) result = result + '(%s)' % ', '.join(parents) result = result + '\n' elif type(entry) is type([]): result = result + self.formattree( entry, modname, c, prefix + ' ') return result def docmodule(self, object, name=None, mod=None): """Produce text documentation for a given module object.""" name = object.__name__ # ignore the passed-in name synop, desc = splitdoc(getdoc(object)) result = self.section('NAME', name + (synop and ' - ' + synop)) all = getattr(object, '__all__', None) docloc = self.getdocloc(object) if docloc is not None: result = result + self.section('MODULE REFERENCE', docloc + """ The following documentation is automatically generated from the Python source files. It may be incomplete, incorrect or include features that are considered implementation detail and may vary between Python implementations. When in doubt, consult the module reference at the location listed above. """) if desc: result = result + self.section('DESCRIPTION', desc) classes = [] for key, value in inspect.getmembers(object, inspect.isclass): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or (inspect.getmodule(value) or object) is object): if visiblename(key, all): classes.append((key, value)) funcs = [] for key, value in inspect.getmembers(object, inspect.isroutine): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or inspect.isbuiltin(value) or inspect.getmodule(value) is object): if visiblename(key, all): funcs.append((key, value)) data = [] for key, value in inspect.getmembers(object, isdata): if visiblename(key, all): data.append((key, value)) modpkgs = [] modpkgs_names = set() if hasattr(object, '__path__'): for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): modpkgs_names.add(modname) if ispkg: modpkgs.append(modname + ' (package)') else: modpkgs.append(modname) modpkgs.sort() result = result + self.section( 'PACKAGE CONTENTS', '\n'.join(modpkgs)) # Detect submodules as sometimes created by C extensions submodules = [] for key, value in inspect.getmembers(object, inspect.ismodule): if value.__name__.startswith(name + '.') and key not in modpkgs_names: submodules.append(key) if submodules: submodules.sort() result = result + self.section( 'SUBMODULES', '\n'.join(submodules)) if classes: classlist = [value for key, value in classes] contents = [self.formattree( inspect.getclasstree(classlist, 1), name)] for key, value in classes: contents.append(self.document(value, key, name)) result = result + self.section('CLASSES', '\n'.join(contents)) if funcs: contents = [] for key, value in funcs: contents.append(self.document(value, key, name)) result = result + self.section('FUNCTIONS', '\n'.join(contents)) if data: contents = [] for key, value in data: contents.append(self.docother(value, key, name, maxlen=70)) result = result + self.section('DATA', '\n'.join(contents)) if hasattr(object, '__version__'): version = str(object.__version__) if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': version = version[11:-1].strip() result = result + self.section('VERSION', version) if hasattr(object, '__date__'): result = result + self.section('DATE', str(object.__date__)) if hasattr(object, '__author__'): result = result + self.section('AUTHOR', str(object.__author__)) if hasattr(object, '__credits__'): result = result + self.section('CREDITS', str(object.__credits__)) try: file = inspect.getabsfile(object) except TypeError: file = '(built-in)' result = result + self.section('FILE', file) return result def docclass(self, object, name=None, mod=None, *ignored): """Produce text documentation for a given class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ def makename(c, m=object.__module__): return classname(c, m) if name == realname: title = 'class ' + self.bold(realname) else: title = self.bold(name) + ' = class ' + realname if bases: parents = map(makename, bases) title = title + '(%s)' % ', '.join(parents) doc = getdoc(object) contents = doc and [doc + '\n'] or [] push = contents.append # List the mro, if non-trivial. mro = deque(inspect.getmro(object)) if len(mro) > 2: push("Method resolution order:") for base in mro: push(' ' + makename(base)) push('') # Cute little class to pump out a horizontal rule between sections. class HorizontalRule: def __init__(self): self.needone = 0 def maybe(self): if self.needone: push('-' * 70) self.needone = 1 hr = HorizontalRule() def spill(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self.document(getattr(object, name), name, mod, object)) return attrs def spilldescriptors(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self._docdescriptor(name, value, mod)) return attrs def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: if hasattr(value, '__call__') or inspect.isdatadescriptor(value): doc = getdoc(value) else: doc = None push(self.docother(getattr(object, name), name, mod, maxlen=70, doc=doc) + '\n') return attrs attrs = [(name, kind, cls, value) for name, kind, cls, value in classify_class_attrs(object) if visiblename(name)] while attrs: if mro: thisclass = mro.popleft() else: thisclass = attrs[0][2] attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) if thisclass is builtins.object: attrs = inherited continue elif thisclass is object: tag = "defined here" else: tag = "inherited from %s" % classname(thisclass, object.__module__) # Sort attrs by name. attrs.sort() # Pump out the attrs, segregated by kind. attrs = spill("Methods %s:\n" % tag, attrs, lambda t: t[1] == 'method') attrs = spill("Class methods %s:\n" % tag, attrs, lambda t: t[1] == 'class method') attrs = spill("Static methods %s:\n" % tag, attrs, lambda t: t[1] == 'static method') attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs, lambda t: t[1] == 'data descriptor') attrs = spilldata("Data and other attributes %s:\n" % tag, attrs, lambda t: t[1] == 'data') assert attrs == [] attrs = inherited contents = '\n'.join(contents) if not contents: return title + '\n' return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n' def formatvalue(self, object): """Format an argument default value as text.""" return '=' + self.repr(object) def docroutine(self, object, name=None, mod=None, cl=None): """Produce text documentation for a function or method object.""" realname = object.__name__ name = name or realname note = '' skipdocs = 0 if inspect.ismethod(object): imclass = object.__self__.__class__ if cl: if imclass is not cl: note = ' from ' + classname(imclass, mod) else: if object.__self__ is not None: note = ' method of %s instance' % classname( object.__self__.__class__, mod) else: note = ' unbound %s method' % classname(imclass,mod) object = object.__func__ if name == realname: title = self.bold(realname) else: if (cl and realname in cl.__dict__ and cl.__dict__[realname] is object): skipdocs = 1 title = self.bold(name) + ' = ' + realname if inspect.isfunction(object): args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann = \ inspect.getfullargspec(object) argspec = inspect.formatargspec( args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann, formatvalue=self.formatvalue, formatannotation=inspect.formatannotationrelativeto(object)) if realname == '<lambda>': title = self.bold(name) + ' lambda ' # XXX lambda's won't usually have func_annotations['return'] # since the syntax doesn't support but it is possible. # So removing parentheses isn't truly safe. argspec = argspec[1:-1] # remove parentheses else: argspec = '(...)' decl = title + argspec + note if skipdocs: return decl + '\n' else: doc = getdoc(object) or '' return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n') def _docdescriptor(self, name, value, mod): results = [] push = results.append if name: push(self.bold(name)) push('\n') doc = getdoc(value) or '' if doc: push(self.indent(doc)) push('\n') return ''.join(results) def docproperty(self, object, name=None, mod=None, cl=None): """Produce text documentation for a property.""" return self._docdescriptor(name, object, mod) def docdata(self, object, name=None, mod=None, cl=None): """Produce text documentation for a data descriptor.""" return self._docdescriptor(name, object, mod) def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None): """Produce text documentation for a data object.""" repr = self.repr(object) if maxlen: line = (name and name + ' = ' or '') + repr chop = maxlen - len(line) if chop < 0: repr = repr[:chop] + '...' line = (name and self.bold(name) + ' = ' or '') + repr if doc is not None: line += '\n' + self.indent(str(doc)) return line # --------------------------------------------------------- user interfaces def pager(text): """The first time this is called, determine what kind of pager to use.""" global pager pager = getpager() pager(text) def getpager(): """Decide what method to use for paging through text.""" if not hasattr(sys.stdout, "isatty"): return plainpager if not sys.stdin.isatty() or not sys.stdout.isatty(): return plainpager if 'PAGER' in os.environ: if sys.platform == 'win32': # pipes completely broken in Windows return lambda text: tempfilepager(plain(text), os.environ['PAGER']) elif os.environ.get('TERM') in ('dumb', 'emacs'): return lambda text: pipepager(plain(text), os.environ['PAGER']) else: return lambda text: pipepager(text, os.environ['PAGER']) if os.environ.get('TERM') in ('dumb', 'emacs'): return plainpager if sys.platform == 'win32' or sys.platform.startswith('os2'): return lambda text: tempfilepager(plain(text), 'more <') if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: return lambda text: pipepager(text, 'less') import tempfile (fd, filename) = tempfile.mkstemp() os.close(fd) try: if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: return lambda text: pipepager(text, 'more') else: return ttypager finally: os.unlink(filename) def plain(text): """Remove boldface formatting from text.""" return re.sub('.\b', '', text) def pipepager(text, cmd): """Page through text by feeding it to another program.""" pipe = os.popen(cmd, 'w') try: pipe.write(text) pipe.close() except IOError: pass # Ignore broken pipes caused by quitting the pager program. def tempfilepager(text, cmd): """Page through text by invoking a program on a temporary file.""" import tempfile filename = tempfile.mktemp() file = open(filename, 'w') file.write(text) file.close() try: os.system(cmd + ' "' + filename + '"') finally: os.unlink(filename) def ttypager(text): """Page through text on a text terminal.""" lines = plain(text).split('\n') try: import tty fd = sys.stdin.fileno() old = tty.tcgetattr(fd) tty.setcbreak(fd) getchar = lambda: sys.stdin.read(1) except (ImportError, AttributeError): tty = None getchar = lambda: sys.stdin.readline()[:-1][:1] try: r = inc = os.environ.get('LINES', 25) - 1 sys.stdout.write('\n'.join(lines[:inc]) + '\n') while lines[r:]: sys.stdout.write('-- more --') sys.stdout.flush() c = getchar() if c in ('q', 'Q'): sys.stdout.write('\r \r') break elif c in ('\r', '\n'): sys.stdout.write('\r \r' + lines[r] + '\n') r = r + 1 continue if c in ('b', 'B', '\x1b'): r = r - inc - inc if r < 0: r = 0 sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n') r = r + inc finally: if tty: tty.tcsetattr(fd, tty.TCSAFLUSH, old) def plainpager(text): """Simply print unformatted text. This is the ultimate fallback.""" sys.stdout.write(plain(text)) def describe(thing): """Produce a short description of the given thing.""" if inspect.ismodule(thing): if thing.__name__ in sys.builtin_module_names: return 'built-in module ' + thing.__name__ if hasattr(thing, '__path__'): return 'package ' + thing.__name__ else: return 'module ' + thing.__name__ if inspect.isbuiltin(thing): return 'built-in function ' + thing.__name__ if inspect.isgetsetdescriptor(thing): return 'getset descriptor %s.%s.%s' % ( thing.__objclass__.__module__, thing.__objclass__.__name__, thing.__name__) if inspect.ismemberdescriptor(thing): return 'member descriptor %s.%s.%s' % ( thing.__objclass__.__module__, thing.__objclass__.__name__, thing.__name__) if inspect.isclass(thing): return 'class ' + thing.__name__ if inspect.isfunction(thing): return 'function ' + thing.__name__ if inspect.ismethod(thing): return 'method ' + thing.__name__ return type(thing).__name__ def locate(path, forceload=0): """Locate an object by name or dotted path, importing as necessary.""" parts = [part for part in path.split('.') if part] module, n = None, 0 while n < len(parts): nextmodule = safeimport('.'.join(parts[:n+1]), forceload) if nextmodule: module, n = nextmodule, n + 1 else: break if module: object = module for part in parts[n:]: try: object = getattr(object, part) except AttributeError: return None return object else: if hasattr(builtins, path): return getattr(builtins, path) # --------------------------------------- interactive interpreter interface text = TextDoc() html = HTMLDoc() def resolve(thing, forceload=0): """Given an object or a path to an object, get the object and its name.""" if isinstance(thing, str): object = locate(thing, forceload) if not object: raise ImportError('no Python documentation found for %r' % thing) return object, thing else: return thing, getattr(thing, '__name__', None) def render_doc(thing, title='Python Library Documentation: %s', forceload=0): """Render text documentation, given an object or a path to an object.""" object, name = resolve(thing, forceload) desc = describe(object) module = inspect.getmodule(object) if name and '.' in name: desc += ' in ' + name[:name.rfind('.')] elif module and module is not object: desc += ' in module ' + module.__name__ if not (inspect.ismodule(object) or inspect.isclass(object) or inspect.isroutine(object) or inspect.isgetsetdescriptor(object) or inspect.ismemberdescriptor(object) or isinstance(object, property)): # If the passed object is a piece of data or an instance, # document its available methods instead of its value. object = type(object) desc += ' object' return title % desc + '\n\n' + text.document(object, name) def doc(thing, title='Python Library Documentation: %s', forceload=0): """Display text documentation, given an object or a path to an object.""" try: pager(render_doc(thing, title, forceload)) except (ImportError, ErrorDuringImport) as value: print(value) def writedoc(thing, forceload=0): """Write HTML documentation to a file in the current directory.""" try: object, name = resolve(thing, forceload) page = html.page(describe(object), html.document(object, name)) file = open(name + '.html', 'w', encoding='utf-8') file.write(page) file.close() print('wrote', name + '.html') except (ImportError, ErrorDuringImport) as value: print(value) def writedocs(dir, pkgpath='', done=None): """Write out HTML documentation for all modules in a directory tree.""" if done is None: done = {} for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath): writedoc(modname) return class Helper: # These dictionaries map a topic name to either an alias, or a tuple # (label, seealso-items). The "label" is the label of the corresponding # section in the .rst file under Doc/ and an index into the dictionary # in pydoc_data/topics.py. # # CAUTION: if you change one of these dictionaries, be sure to adapt the # list of needed labels in Doc/tools/sphinxext/pyspecific.py and # regenerate the pydoc_data/topics.py file by running # make pydoc-topics # in Doc/ and copying the output file into the Lib/ directory. keywords = { 'False': '', 'None': '', 'True': '', 'and': 'BOOLEAN', 'as': 'with', 'assert': ('assert', ''), 'break': ('break', 'while for'), 'class': ('class', 'CLASSES SPECIALMETHODS'), 'continue': ('continue', 'while for'), 'def': ('function', ''), 'del': ('del', 'BASICMETHODS'), 'elif': 'if', 'else': ('else', 'while for'), 'except': 'try', 'finally': 'try', 'for': ('for', 'break continue while'), 'from': 'import', 'global': ('global', 'nonlocal NAMESPACES'), 'if': ('if', 'TRUTHVALUE'), 'import': ('import', 'MODULES'), 'in': ('in', 'SEQUENCEMETHODS'), 'is': 'COMPARISON', 'lambda': ('lambda', 'FUNCTIONS'), 'nonlocal': ('nonlocal', 'global NAMESPACES'), 'not': 'BOOLEAN', 'or': 'BOOLEAN', 'pass': ('pass', ''), 'raise': ('raise', 'EXCEPTIONS'), 'return': ('return', 'FUNCTIONS'), 'try': ('try', 'EXCEPTIONS'), 'while': ('while', 'break continue if TRUTHVALUE'), 'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'), 'yield': ('yield', ''), } # Either add symbols to this dictionary or to the symbols dictionary # directly: Whichever is easier. They are merged later. _symbols_inverse = { 'STRINGS' : ("'", "'''", "r'", "b'", '"""', '"', 'r"', 'b"'), 'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&', '|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'), 'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'), 'UNARY' : ('-', '~'), 'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '**=', '//='), 'BITWISE' : ('<<', '>>', '&', '|', '^', '~'), 'COMPLEX' : ('j', 'J') } symbols = { '%': 'OPERATORS FORMATTING', '**': 'POWER', ',': 'TUPLES LISTS FUNCTIONS', '.': 'ATTRIBUTES FLOAT MODULES OBJECTS', '...': 'ELLIPSIS', ':': 'SLICINGS DICTIONARYLITERALS', '@': 'def class', '\\': 'STRINGS', '_': 'PRIVATENAMES', '__': 'PRIVATENAMES SPECIALMETHODS', '`': 'BACKQUOTES', '(': 'TUPLES FUNCTIONS CALLS', ')': 'TUPLES FUNCTIONS CALLS', '[': 'LISTS SUBSCRIPTS SLICINGS', ']': 'LISTS SUBSCRIPTS SLICINGS' } for topic, symbols_ in _symbols_inverse.items(): for symbol in symbols_: topics = symbols.get(symbol, topic) if topic not in topics: topics = topics + ' ' + topic symbols[symbol] = topics topics = { 'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS ' 'FUNCTIONS CLASSES MODULES FILES inspect'), 'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS ' 'FORMATTING TYPES'), 'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'), 'FORMATTING': ('formatstrings', 'OPERATORS'), 'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS ' 'FORMATTING TYPES'), 'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'), 'INTEGER': ('integers', 'int range'), 'FLOAT': ('floating', 'float math'), 'COMPLEX': ('imaginary', 'complex cmath'), 'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'), 'MAPPINGS': 'DICTIONARIES', 'FUNCTIONS': ('typesfunctions', 'def TYPES'), 'METHODS': ('typesmethods', 'class def CLASSES TYPES'), 'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'), 'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'), 'FRAMEOBJECTS': 'TYPES', 'TRACEBACKS': 'TYPES', 'NONE': ('bltin-null-object', ''), 'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'), 'FILES': ('bltin-file-objects', ''), 'SPECIALATTRIBUTES': ('specialattrs', ''), 'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'), 'MODULES': ('typesmodules', 'import'), 'PACKAGES': 'import', 'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN ' 'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER ' 'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES ' 'LISTS DICTIONARIES'), 'OPERATORS': 'EXPRESSIONS', 'PRECEDENCE': 'EXPRESSIONS', 'OBJECTS': ('objects', 'TYPES'), 'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS ' 'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS ' 'NUMBERMETHODS CLASSES'), 'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'), 'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'), 'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'), 'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS ' 'SPECIALMETHODS'), 'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'), 'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT ' 'SPECIALMETHODS'), 'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'), 'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'), 'DYNAMICFEATURES': ('dynamic-features', ''), 'SCOPING': 'NAMESPACES', 'FRAMES': 'NAMESPACES', 'EXCEPTIONS': ('exceptions', 'try except finally raise'), 'CONVERSIONS': ('conversions', ''), 'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'), 'SPECIALIDENTIFIERS': ('id-classes', ''), 'PRIVATENAMES': ('atom-identifiers', ''), 'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS ' 'LISTLITERALS DICTIONARYLITERALS'), 'TUPLES': 'SEQUENCES', 'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'), 'LISTS': ('typesseq-mutable', 'LISTLITERALS'), 'LISTLITERALS': ('lists', 'LISTS LITERALS'), 'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'), 'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'), 'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'), 'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'), 'SLICINGS': ('slicings', 'SEQUENCEMETHODS'), 'CALLS': ('calls', 'EXPRESSIONS'), 'POWER': ('power', 'EXPRESSIONS'), 'UNARY': ('unary', 'EXPRESSIONS'), 'BINARY': ('binary', 'EXPRESSIONS'), 'SHIFTING': ('shifting', 'EXPRESSIONS'), 'BITWISE': ('bitwise', 'EXPRESSIONS'), 'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'), 'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'), 'ASSERTION': 'assert', 'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'), 'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'), 'DELETION': 'del', 'RETURNING': 'return', 'IMPORTING': 'import', 'CONDITIONAL': 'if', 'LOOPING': ('compound', 'for while break continue'), 'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'), 'DEBUGGING': ('debugger', 'pdb'), 'CONTEXTMANAGERS': ('context-managers', 'with'), } def __init__(self, input=None, output=None): self._input = input self._output = output input = property(lambda self: self._input or sys.stdin) output = property(lambda self: self._output or sys.stdout) def __repr__(self): if inspect.stack()[1][3] == '?': self() return '' return '<pydoc.Helper instance>' _GoInteractive = object() def __call__(self, request=_GoInteractive): if request is not self._GoInteractive: self.help(request) else: self.intro() self.interact() self.output.write(''' You are now leaving help and returning to the Python interpreter. If you want to ask for help on a particular object directly from the interpreter, you can type "help(object)". Executing "help('string')" has the same effect as typing a particular string at the help> prompt. ''') def interact(self): self.output.write('\n') while True: try: request = self.getline('help> ') if not request: break except (KeyboardInterrupt, EOFError): break request = replace(request, '"', '', "'", '').strip() if request.lower() in ('q', 'quit'): break self.help(request) def getline(self, prompt): """Read one line, using input() when appropriate.""" if self.input is sys.stdin: return input(prompt) else: self.output.write(prompt) self.output.flush() return self.input.readline() def help(self, request): if type(request) is type(''): request = request.strip() if request == 'help': self.intro() elif request == 'keywords': self.listkeywords() elif request == 'symbols': self.listsymbols() elif request == 'topics': self.listtopics() elif request == 'modules': self.listmodules() elif request[:8] == 'modules ': self.listmodules(request.split()[1]) elif request in self.symbols: self.showsymbol(request) elif request in ['True', 'False', 'None']: # special case these keywords since they are objects too doc(eval(request), 'Help on %s:') elif request in self.keywords: self.showtopic(request) elif request in self.topics: self.showtopic(request) elif request: doc(request, 'Help on %s:') elif isinstance(request, Helper): self() else: doc(request, 'Help on %s:') self.output.write('\n') def intro(self): self.output.write(''' Welcome to Python %s! This is the online help utility. If this is your first time using Python, you should definitely check out the tutorial on the Internet at http://docs.python.org/tutorial/. Enter the name of any module, keyword, or topic to get help on writing Python programs and using Python modules. To quit this help utility and return to the interpreter, just type "quit". To get a list of available modules, keywords, or topics, type "modules", "keywords", or "topics". Each module also comes with a one-line summary of what it does; to list the modules whose summaries contain a given word such as "spam", type "modules spam". ''' % sys.version[:3]) def list(self, items, columns=4, width=80): items = list(sorted(items)) colw = width // columns rows = (len(items) + columns - 1) // columns for row in range(rows): for col in range(columns): i = col * rows + row if i < len(items): self.output.write(items[i]) if col < columns - 1: self.output.write(' ' + ' ' * (colw - 1 - len(items[i]))) self.output.write('\n') def listkeywords(self): self.output.write(''' Here is a list of the Python keywords. Enter any keyword to get more help. ''') self.list(self.keywords.keys()) def listsymbols(self): self.output.write(''' Here is a list of the punctuation symbols which Python assigns special meaning to. Enter any symbol to get more help. ''') self.list(self.symbols.keys()) def listtopics(self): self.output.write(''' Here is a list of available topics. Enter any topic name to get more help. ''') self.list(self.topics.keys()) def showtopic(self, topic, more_xrefs=''): try: import pydoc_data.topics except ImportError: self.output.write(''' Sorry, topic and keyword documentation is not available because the module "pydoc_data.topics" could not be found. ''') return target = self.topics.get(topic, self.keywords.get(topic)) if not target: self.output.write('no documentation found for %s\n' % repr(topic)) return if type(target) is type(''): return self.showtopic(target, more_xrefs) label, xrefs = target try: doc = pydoc_data.topics.topics[label] except KeyError: self.output.write('no documentation found for %s\n' % repr(topic)) return pager(doc.strip() + '\n') if more_xrefs: xrefs = (xrefs or '') + ' ' + more_xrefs if xrefs: import io, formatter buffer = io.StringIO() formatter.DumbWriter(buffer).send_flowing_data( 'Related help topics: ' + ', '.join(xrefs.split()) + '\n') self.output.write('\n%s\n' % buffer.getvalue()) def showsymbol(self, symbol): target = self.symbols[symbol] topic, _, xrefs = target.partition(' ') self.showtopic(topic, xrefs) def listmodules(self, key=''): if key: self.output.write(''' Here is a list of matching modules. Enter any module name to get more help. ''') apropos(key) else: self.output.write(''' Please wait a moment while I gather a list of all available modules... ''') modules = {} def callback(path, modname, desc, modules=modules): if modname and modname[-9:] == '.__init__': modname = modname[:-9] + ' (package)' if modname.find('.') < 0: modules[modname] = 1 def onerror(modname): callback(None, modname, None) ModuleScanner().run(callback, onerror=onerror) self.list(modules.keys()) self.output.write(''' Enter any module name to get more help. Or, type "modules spam" to search for modules whose descriptions contain the word "spam". ''') help = Helper() class Scanner: """A generic tree iterator.""" def __init__(self, roots, children, descendp): self.roots = roots[:] self.state = [] self.children = children self.descendp = descendp def next(self): if not self.state: if not self.roots: return None root = self.roots.pop(0) self.state = [(root, self.children(root))] node, children = self.state[-1] if not children: self.state.pop() return self.next() child = children.pop(0) if self.descendp(child): self.state.append((child, self.children(child))) return child class ModuleScanner: """An interruptible scanner that searches module synopses.""" def run(self, callback, key=None, completer=None, onerror=None): if key: key = key.lower() self.quit = False seen = {} for modname in sys.builtin_module_names: if modname != '__main__': seen[modname] = 1 if key is None: callback(None, modname, '') else: name = __import__(modname).__doc__ or '' desc = name.split('\n')[0] name = modname + ' - ' + desc if name.lower().find(key) >= 0: callback(None, modname, desc) for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror): if self.quit: break if key is None: callback(None, modname, '') else: try: loader = importer.find_module(modname) except SyntaxError: # raised by tests for bad coding cookies or BOM continue if hasattr(loader, 'get_source'): try: source = loader.get_source(modname) except UnicodeDecodeError: if onerror: onerror(modname) continue import io desc = source_synopsis(io.StringIO(source)) or '' if hasattr(loader, 'get_filename'): path = loader.get_filename(modname) else: path = None else: try: module = loader.load_module(modname) except ImportError: if onerror: onerror(modname) continue desc = (module.__doc__ or '').splitlines()[0] path = getattr(module,'__file__',None) name = modname + ' - ' + desc if name.lower().find(key) >= 0: callback(path, modname, desc) if completer: completer() def apropos(key): """Print all the one-line module summaries that contain a substring.""" def callback(path, modname, desc): if modname[-9:] == '.__init__': modname = modname[:-9] + ' (package)' print(modname, desc and '- ' + desc) def onerror(modname): pass try: import warnings except ImportError: pass else: warnings.filterwarnings('ignore') # ignore problems during import ModuleScanner().run(callback, key, onerror=onerror) # --------------------------------------------------- web browser interface def serve(port, callback=None, completer=None): import http.server, email.message, select class DocHandler(http.server.BaseHTTPRequestHandler): def send_document(self, title, contents): try: self.send_response(200) self.send_header('Content-Type', 'text/html; charset=UTF-8') self.end_headers() self.wfile.write(html.page(title, contents).encode('utf-8')) except IOError: pass def do_GET(self): path = self.path if path[-5:] == '.html': path = path[:-5] if path[:1] == '/': path = path[1:] if path and path != '.': try: obj = locate(path, forceload=1) except ErrorDuringImport as value: self.send_document(path, html.escape(str(value))) return if obj: self.send_document(describe(obj), html.document(obj, path)) else: self.send_document(path, 'no Python documentation found for %s' % repr(path)) else: heading = html.heading( '<big><big><strong>Python: Index of Modules</strong></big></big>', '#ffffff', '#7799ee') def bltinlink(name): return '<a href="%s.html">%s</a>' % (name, name) names = [x for x in sys.builtin_module_names if x != '__main__'] contents = html.multicolumn(names, bltinlink) indices = ['<p>' + html.bigsection( 'Built-in Modules', '#ffffff', '#ee77aa', contents)] seen = {} for dir in sys.path: indices.append(html.index(dir, seen)) contents = heading + ' '.join(indices) + '''<p align=right> <font color="#909090" face="helvetica, arial"><strong> pydoc</strong> by Ka-Ping Yee &lt;ping@lfw.org&gt;</font>''' self.send_document('Index of Modules', contents) def log_message(self, *args): pass class DocServer(http.server.HTTPServer): def __init__(self, port, callback): host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost' self.address = ('', port) self.url = 'http://%s:%d/' % (host, port) self.callback = callback self.base.__init__(self, self.address, self.handler) def serve_until_quit(self): import select self.quit = False while not self.quit: rd, wr, ex = select.select([self.socket.fileno()], [], [], 1) if rd: self.handle_request() def server_activate(self): self.base.server_activate(self) if self.callback: self.callback(self) DocServer.base = http.server.HTTPServer DocServer.handler = DocHandler DocHandler.MessageClass = email.message.Message try: try: DocServer(port, callback).serve_until_quit() except (KeyboardInterrupt, select.error): pass finally: if completer: completer() # ----------------------------------------------------- graphical interface def gui(): """Graphical interface (starts web server and pops up a control window).""" class GUI: def __init__(self, window, port=7464): self.window = window self.server = None self.scanner = None import tkinter self.server_frm = tkinter.Frame(window) self.title_lbl = tkinter.Label(self.server_frm, text='Starting server...\n ') self.open_btn = tkinter.Button(self.server_frm, text='open browser', command=self.open, state='disabled') self.quit_btn = tkinter.Button(self.server_frm, text='quit serving', command=self.quit, state='disabled') self.search_frm = tkinter.Frame(window) self.search_lbl = tkinter.Label(self.search_frm, text='Search for') self.search_ent = tkinter.Entry(self.search_frm) self.search_ent.bind('<Return>', self.search) self.stop_btn = tkinter.Button(self.search_frm, text='stop', pady=0, command=self.stop, state='disabled') if sys.platform == 'win32': # Trying to hide and show this button crashes under Windows. self.stop_btn.pack(side='right') self.window.title('pydoc') self.window.protocol('WM_DELETE_WINDOW', self.quit) self.title_lbl.pack(side='top', fill='x') self.open_btn.pack(side='left', fill='x', expand=1) self.quit_btn.pack(side='right', fill='x', expand=1) self.server_frm.pack(side='top', fill='x') self.search_lbl.pack(side='left') self.search_ent.pack(side='right', fill='x', expand=1) self.search_frm.pack(side='top', fill='x') self.search_ent.focus_set() font = ('helvetica', sys.platform == 'win32' and 8 or 10) self.result_lst = tkinter.Listbox(window, font=font, height=6) self.result_lst.bind('<Button-1>', self.select) self.result_lst.bind('<Double-Button-1>', self.goto) self.result_scr = tkinter.Scrollbar(window, orient='vertical', command=self.result_lst.yview) self.result_lst.config(yscrollcommand=self.result_scr.set) self.result_frm = tkinter.Frame(window) self.goto_btn = tkinter.Button(self.result_frm, text='go to selected', command=self.goto) self.hide_btn = tkinter.Button(self.result_frm, text='hide results', command=self.hide) self.goto_btn.pack(side='left', fill='x', expand=1) self.hide_btn.pack(side='right', fill='x', expand=1) self.window.update() self.minwidth = self.window.winfo_width() self.minheight = self.window.winfo_height() self.bigminheight = (self.server_frm.winfo_reqheight() + self.search_frm.winfo_reqheight() + self.result_lst.winfo_reqheight() + self.result_frm.winfo_reqheight()) self.bigwidth, self.bigheight = self.minwidth, self.bigminheight self.expanded = 0 self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight)) self.window.wm_minsize(self.minwidth, self.minheight) self.window.tk.willdispatch() import threading threading.Thread( target=serve, args=(port, self.ready, self.quit)).start() def ready(self, server): self.server = server self.title_lbl.config( text='Python documentation server at\n' + server.url) self.open_btn.config(state='normal') self.quit_btn.config(state='normal') def open(self, event=None, url=None): url = url or self.server.url try: import webbrowser webbrowser.open(url) except ImportError: # pre-webbrowser.py compatibility if sys.platform == 'win32': os.system('start "%s"' % url) elif sys.platform == 'mac': try: import ic except ImportError: pass else: ic.launchurl(url) else: rc = os.system('netscape -remote "openURL(%s)" &' % url) if rc: os.system('netscape "%s" &' % url) def quit(self, event=None): if self.server: self.server.quit = 1 self.window.quit() def search(self, event=None): key = self.search_ent.get() self.stop_btn.pack(side='right') self.stop_btn.config(state='normal') self.search_lbl.config(text='Searching for "%s"...' % key) self.search_ent.forget() self.search_lbl.pack(side='left') self.result_lst.delete(0, 'end') self.goto_btn.config(state='disabled') self.expand() import threading if self.scanner: self.scanner.quit = 1 self.scanner = ModuleScanner() threading.Thread(target=self.scanner.run, args=(self.update, key, self.done)).start() def update(self, path, modname, desc): if modname[-9:] == '.__init__': modname = modname[:-9] + ' (package)' self.result_lst.insert('end', modname + ' - ' + (desc or '(no description)')) def stop(self, event=None): if self.scanner: self.scanner.quit = 1 self.scanner = None def done(self): self.scanner = None self.search_lbl.config(text='Search for') self.search_lbl.pack(side='left') self.search_ent.pack(side='right', fill='x', expand=1) if sys.platform != 'win32': self.stop_btn.forget() self.stop_btn.config(state='disabled') def select(self, event=None): self.goto_btn.config(state='normal') def goto(self, event=None): selection = self.result_lst.curselection() if selection: modname = self.result_lst.get(selection[0]).split()[0] self.open(url=self.server.url + modname + '.html') def collapse(self): if not self.expanded: return self.result_frm.forget() self.result_scr.forget() self.result_lst.forget() self.bigwidth = self.window.winfo_width() self.bigheight = self.window.winfo_height() self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight)) self.window.wm_minsize(self.minwidth, self.minheight) self.expanded = 0 def expand(self): if self.expanded: return self.result_frm.pack(side='bottom', fill='x') self.result_scr.pack(side='right', fill='y') self.result_lst.pack(side='top', fill='both', expand=1) self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight)) self.window.wm_minsize(self.minwidth, self.bigminheight) self.expanded = 1 def hide(self, event=None): self.stop() self.collapse() import tkinter try: root = tkinter.Tk() # Tk will crash if pythonw.exe has an XP .manifest # file and the root has is not destroyed explicitly. # If the problem is ever fixed in Tk, the explicit # destroy can go. try: gui = GUI(root) root.mainloop() finally: root.destroy() except KeyboardInterrupt: pass # -------------------------------------------------- command-line interface def ispath(x): return isinstance(x, str) and x.find(os.sep) >= 0 def cli(): """Command-line interface (looks at sys.argv to decide what to do).""" import getopt class BadUsage(Exception): pass # Scripts don't get the current directory in their path by default # unless they are run with the '-m' switch if '' not in sys.path: scriptdir = os.path.dirname(sys.argv[0]) if scriptdir in sys.path: sys.path.remove(scriptdir) sys.path.insert(0, '.') try: opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w') writing = 0 for opt, val in opts: if opt == '-g': gui() return if opt == '-k': apropos(val) return if opt == '-p': try: port = int(val) except ValueError: raise BadUsage def ready(server): print('pydoc server ready at %s' % server.url) def stopped(): print('pydoc server stopped') serve(port, ready, stopped) return if opt == '-w': writing = 1 if not args: raise BadUsage for arg in args: if ispath(arg) and not os.path.exists(arg): print('file %r does not exist' % arg) break try: if ispath(arg) and os.path.isfile(arg): arg = importfile(arg) if writing: if ispath(arg) and os.path.isdir(arg): writedocs(arg) else: writedoc(arg) else: help.help(arg) except ErrorDuringImport as value: print(value) except (getopt.error, BadUsage): cmd = os.path.basename(sys.argv[0]) print("""pydoc - the Python documentation tool %s <name> ... Show text documentation on something. <name> may be the name of a Python keyword, topic, function, module, or package, or a dotted reference to a class or function within a module or module in a package. If <name> contains a '%s', it is used as the path to a Python source file to document. If name is 'keywords', 'topics', or 'modules', a listing of these things is displayed. %s -k <keyword> Search for a keyword in the synopsis lines of all available modules. %s -p <port> Start an HTTP server on the given port on the local machine. %s -g Pop up a graphical interface for finding and serving documentation. %s -w <name> ... Write out the HTML documentation for a module to a file in the current directory. If <name> contains a '%s', it is treated as a filename; if it names a directory, documentation is written for all the contents. """ % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)) if __name__ == '__main__': cli()
zhihu.py
#!/usr/bin/env python # encoding: utf-8 """ @version: 1.0 @author: lizheming @contact: nkdudu@126.com @site: lizheming.top @file: zhihu.py """ from login import islogin, login #from logo import logo import requests import cookielib from bs4 import BeautifulSoup import re import os import json import termcolor import threading import time import random import sys reload(sys) sys.setdefaultencoding('utf-8') logo = ''\ ' $$'\ ' $$$ &&&&$$$$ ##$$$$$$$$$$$$$$$$$$#$$$ \n'\ ' $$$ $$$$$$$$$$$$$$$ ##$$$$$$$$$$$$$$$$$$o; ;\n'\ ' $$$$$$$$$$$$$$$ $$$$$$$$$$$$$$$ *$$o #\n'\ ' $$$ $$$ $$$ $$$ $$$ *$$o $$$$\n'\ '$$* $$$ $$$ $$$ $$$$ *$$o $$$$\n'\ ' $$$ $$$ $$$ $$$$ *$$o $$$$\n'\ ' $$o $$$ $$$ $$$ *$$o $$$o\n'\ ';$$$$$$$$$$$$$$$$ $$$ $$$ *$$o\n'\ '$$$$$$$$$$$$$$$$$* $$$ $$$ ;$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n'\ ' $$$ $$$ $$$ *$$o\n'\ ' $$$ $$$ $$$ *$$o\n'\ ' $$$$$$$ $$$ $$$ *$$o\n'\ ' $$$; $$$$ $$$ $$$ *$$o\n'\ ' $$$$ $$$ $$$$$ $$$$$$$$$ *$$o\n'\ ' $$$$! $$ $$$$* $$$;\n'\ '$$$$$ ; $$$$$$$$$$$\n'\ '$$$$$$\n' zhihu = "https://www.zhihu.com" session = requests.Session() session.cookies = cookielib.LWPCookieJar("cookies") datas = [] tlitems = [] flag = True op_stop = False offset = 0 temp = 0 limit = 5 tid = None _xsrf = None headers = { 'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36", 'Host': "www.zhihu.com", #"Referer": "www.zhihu.com" } def mul_get_request(session, url, headers, timeout=10, n=5, **kwargs): t = 0 while n: if t == 2: print termcolor.colored("网络缓慢,请稍后..", "red") try: res = session.get(url, headers=headers, timeout=timeout, **kwargs) return res except: n -= 1 t += 1 exit() return None def mul_post_request(session, url, headers, timeout=10, n=5, **kwargs): t = 0 while n: if t == 2: print termcolor.colored("网络缓慢,请稍后..", "red") try: res = session.post(url, headers=headers, timeout=timeout, **kwargs) return res except: n -= 1 t += 1 exit() return None def loadsession(): global session try: session.cookies.load(ignore_discard="true") except: termcolor.colored("加载异常", "red") pass loadsession() l, username = islogin() if not l: if not login(): sys.exit() loadsession() def index(): global tid global _xsrf global session #res = session.get(zhihu, headers=headers) res = mul_get_request(session=session, url=zhihu, headers=headers) if not res: sys.exit() #print res.content _xsrf = re.findall(r'name="_xsrf" value="(\S+)"', res.content)[0] print res.content soup = BeautifulSoup(res.content, "html.parser") items = soup.select(".feed-item.folding.feed-item-hook") for item in items: #tid, t, l = get_item_info_another(item) from TimeLine import TLItem iitem = TLItem(item, _xsrf) tid, t, l = iitem.get_item_info() datas.append([t, l, tid]) tlitems.append(iitem) def worker(): global tid global datas global offset global session url = "https://www.zhihu.com/node/HomeFeedListV2" params = { "start": tid, "offset": 21 } data = { "method":"next", "_xsrf":_xsrf, "params":json.dumps(params) } while flag: if len(datas) - offset > 10 * limit: time.sleep(6) continue try: res = session.post(url, data, headers=headers) except: continue msgs = None try: msgs = res.json()["msg"] except: # print res.content # print "link error 1326" continue for msg in msgs: soup = BeautifulSoup(msg, "html.parser") item = soup.select(".feed-item.folding.feed-item-hook")[0] from TimeLine import TLItem iitem = TLItem(item, _xsrf) tid, t, l = iitem.get_item_info() datas.append([t, l, tid]) tlitems.append(iitem) params["start"] = tid params["offset"] += 21 data["params"] = json.dumps(params) time.sleep(6) def welcome(): clear() print termcolor.colored(logo, "cyan") print termcolor.colored("Hello {}, 欢迎使用终端版知乎".format(username), "yellow") def next_page(**kwargs): clear() global op_stop op_stop = True def pre_page(**kwargs): clear() global offset global op_stop op_stop = True offset = max(0, offset - limit*2) def pwd(): global temp global offset clear() offset -= limit temp = offset for x in range(limit): data = datas[temp + x] print offset print "\n".join(i for i in data[0] if i) + "\n" offset += 1 def bye(): global flag global op_stop flag = False op_stop = True print termcolor.colored("Bye", "cyan") print termcolor.colored("有任何建议欢迎与我联系: nkdudu@126.com", "cyan") def clear(): i = os.system("clear") def help(): info = "\n" \ "**********************************************************\n" \ "**\n" \ "** 回车: 下一页\n" \ "** next: 下一页\n" \ "** pre: 上一页\n" \ "** pwd: 当前页\n" \ "** #Num.: 选中具体TL条目进行操作(只限当前页中的条目)\n" \ "** clear: 清屏\n" \ "** quit: 退出系统\n" \ "**\n" \ "**********************************************************\n" print termcolor.colored(info, "green") def error(): print termcolor.colored("输入错误, 可通过", "red") + termcolor.colored("help", "cyan") + termcolor.colored("查看", "red") def exit(): global flag global op_stop flag = False op_stop = True print termcolor.colored("因网络故障程序退出,请检查您的网络设置", "yellow") main_ops = { "": next_page, "next": next_page, "pre": pre_page, "pwd": pwd, "clear": clear, "quit": bye, "exit": bye, "help": help } def main(): global flag global offset global temp global op_stop global thread ithread = threading.Thread(target=index) ithread.start() welcome() ithread.join() thread = threading.Thread(target=worker) thread.start() mode = re.compile(r"^\d+$") while flag: temp = offset x = 0 while x < limit: if (temp + x) >= len(datas): termcolor.colored("访问速度过快,请稍候", "magenta") continue data = datas[temp + x] print offset print "\n".join(i for i in data[0] if i) + "\n" offset += 1 x += 1 x = 0 op_stop = False while not op_stop: op = raw_input("Time Line$ ") if not re.match(mode, op.strip()): main_ops.get(op, error)() else: opn = int(op) if temp <= opn < offset: item = tlitems[opn] if item.operate(): bye() flag = False break else: print termcolor.colored("请输入正确的序号", "red") thread.join() if __name__ == "__main__": main()
_test_multiprocessing.py
# # Unit tests for the multiprocessing package # import unittest import unittest.mock import queue as pyqueue import time import io import itertools import sys import os import gc import errno import signal import array import socket import random import logging import subprocess import struct import operator import pickle import weakref import warnings import test.support import test.support.script_helper from test import support from test.support import hashlib_helper from test.support import import_helper from test.support import os_helper from test.support import socket_helper from test.support import threading_helper from test.support import warnings_helper # Skip tests if _multiprocessing wasn't built. _multiprocessing = import_helper.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. support.skip_if_broken_multiprocessing_synchronize() import threading import multiprocessing.connection import multiprocessing.dummy import multiprocessing.heap import multiprocessing.managers import multiprocessing.pool import multiprocessing.queues from multiprocessing import util try: from multiprocessing import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocessing.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: from multiprocessing import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False try: import msvcrt except ImportError: msvcrt = None def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocessing.queues.Queue): queue.close() queue.join_thread() def join_process(process): # Since multiprocessing.Process has the same API than threading.Thread # (join() and is_alive(), the support function can be reused threading_helper.join_thread(process) if os.name == "posix": from multiprocessing import resource_tracker def _resource_unlink(name, rtype): resource_tracker._CLEANUP_FUNCS[rtype](name) # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") from multiprocessing.connection import wait def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double, c_longlong except ImportError: Structure = object c_int = c_double = c_longlong = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.monotonic() try: return self.func(*args, **kwds) finally: self.elapsed = time.monotonic() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_parent_process_attributes(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) self.assertIsNone(self.parent_process()) rconn, wconn = self.Pipe(duplex=False) p = self.Process(target=self._test_send_parent_process, args=(wconn,)) p.start() p.join() parent_pid, parent_name = rconn.recv() self.assertEqual(parent_pid, self.current_process().pid) self.assertEqual(parent_pid, os.getpid()) self.assertEqual(parent_name, self.current_process().name) @classmethod def _test_send_parent_process(cls, wconn): from multiprocessing.process import parent_process wconn.send([parent_process().pid, parent_process().name]) def test_parent_process(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # Launch a child process. Make it launch a grandchild process. Kill the # child process and make sure that the grandchild notices the death of # its parent (a.k.a the child process). rconn, wconn = self.Pipe(duplex=False) p = self.Process( target=self._test_create_grandchild_process, args=(wconn, )) p.start() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "alive") p.terminate() p.join() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "not alive") @classmethod def _test_create_grandchild_process(cls, wconn): p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) p.start() time.sleep(300) @classmethod def _test_report_parent_status(cls, wconn): from multiprocessing.process import parent_process wconn.send("alive" if parent_process().is_alive() else "not alive") parent_process().join(timeout=support.SHORT_TIMEOUT) wconn.send("alive" if parent_process().is_alive() else "not alive") def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") def test_process_mainthread_native_id(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current_mainthread_native_id = threading.main_thread().native_id q = self.Queue(1) p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) p.start() child_mainthread_native_id = q.get() p.join() close_queue(q) self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) @classmethod def _test_process_mainthread_native_id(cls, q): mainthread_native_id = threading.main_thread().native_id q.put(mainthread_native_id) @classmethod def _sleep_some(cls): time.sleep(100) @classmethod def _test_sleep(cls, delay): time.sleep(delay) def _kill_process(self, meth): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._sleep_some) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) meth(p) if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() return p.exitcode def test_terminate(self): exitcode = self._kill_process(multiprocessing.Process.terminate) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGTERM) def test_kill(self): exitcode = self._kill_process(multiprocessing.Process.kill) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGKILL) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) @classmethod def _test_close(cls, rc=0, q=None): if q is not None: q.get() sys.exit(rc) def test_close(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) q = self.Queue() p = self.Process(target=self._test_close, kwargs={'q': q}) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) # Child is still alive, cannot close with self.assertRaises(ValueError): p.close() q.put(None) p.join() self.assertEqual(p.is_alive(), False) self.assertEqual(p.exitcode, 0) p.close() with self.assertRaises(ValueError): p.is_alive() with self.assertRaises(ValueError): p.join() with self.assertRaises(ValueError): p.terminate() p.close() wr = weakref.ref(p) del p gc.collect() self.assertIs(wr(), None) close_queue(q) def test_many_processes(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() N = 5 if sm == 'spawn' else 100 # Try to overwhelm the forkserver loop with events procs = [self.Process(target=self._test_sleep, args=(0.01,)) for i in range(N)] for p in procs: p.start() for p in procs: join_process(p) for p in procs: self.assertEqual(p.exitcode, 0) procs = [self.Process(target=self._sleep_some) for i in range(N)] for p in procs: p.start() time.sleep(0.001) # let the children start... for p in procs: p.terminate() for p in procs: join_process(p) if os.name != 'nt': exitcodes = [-signal.SIGTERM] if sys.platform == 'darwin': # bpo-31510: On macOS, killing a freshly started process with # SIGTERM sometimes kills the process with SIGKILL. exitcodes.append(-signal.SIGKILL) for p in procs: self.assertIn(p.exitcode, exitcodes) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() gc.collect() # For PyPy or other GCs. self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_child_fd_inflation(self, evt, q): q.put(os_helper.fd_count()) evt.wait() def test_child_fd_inflation(self): # Number of fds in child processes should not grow with the # number of running children. if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm == 'fork': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) N = 5 evt = self.Event() q = self.Queue() procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) for i in range(N)] for p in procs: p.start() try: fd_counts = [q.get() for i in range(N)] self.assertEqual(len(set(fd_counts)), 1, fd_counts) finally: evt.set() for p in procs: p.join() close_queue(q) @classmethod def _test_wait_for_threads(self, evt): def func1(): time.sleep(0.5) evt.set() def func2(): time.sleep(20) evt.clear() threading.Thread(target=func1).start() threading.Thread(target=func2, daemon=True).start() def test_wait_for_threads(self): # A child process should wait for non-daemonic threads to end # before exiting if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) evt = self.Event() proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocessing.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, encoding="utf-8") as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("test_multiprocessing.py", err) self.assertIn("1/0 # MARKER", err) @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, 1) with open(testfn, encoding="utf-8") as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) cases = [ ((True,), 1), ((False,), 0), ((8,), 8), ((None,), 0), ((), 0), ] for args, expected in cases: with self.subTest(args=args): p = self.Process(target=sys.exit, args=args) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, expected) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with os_helper.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w', encoding="utf-8") as f: f.write("""if 1: import multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with import_helper.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocessing.Queue() start = time.monotonic() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = time.monotonic() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with test.support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) close_queue(q) with test.support.captured_stderr(): # bpo-33078: verify that the queue size is correctly handled # on errors. q = self.Queue(maxsize=1) q.put(NotSerializable()) q.put(True) try: self.assertEqual(q.qsize(), 1) except NotImplementedError: # qsize is not available on all platform as it # relies on sem_getvalue pass self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Check that the size of the queue is correct self.assertTrue(q.empty()) close_queue(q) def test_queue_feeder_on_queue_feeder_error(self): # bpo-30006: verify feeder handles exceptions using the # _on_queue_feeder_error hook. if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): """Mock unserializable object""" def __init__(self): self.reduce_was_called = False self.on_queue_feeder_error_was_called = False def __reduce__(self): self.reduce_was_called = True raise AttributeError class SafeQueue(multiprocessing.queues.Queue): """Queue with overloaded _on_queue_feeder_error hook""" @staticmethod def _on_queue_feeder_error(e, obj): if (isinstance(e, AttributeError) and isinstance(obj, NotSerializable)): obj.on_queue_feeder_error_was_called = True not_serializable_obj = NotSerializable() # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): q = SafeQueue(ctx=multiprocessing.get_context()) q.put(not_serializable_obj) # Verify that q is still functioning correctly q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) def test_closed_queue_put_get_exceptions(self): for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): q.close() with self.assertRaisesRegex(ValueError, 'is closed'): q.put('foo') with self.assertRaisesRegex(ValueError, 'is closed'): q.get() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def assertReachesEventually(self, func, value): for i in range(10): try: if func() == value: break except NotImplementedError: break time.sleep(DELTA) time.sleep(DELTA) self.assertReturnsIfImplemented(value, func) def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken self.assertReachesEventually(lambda: get_value(woken), 6) # check state is not mucked up self.check_invariant(cond) def test_notify_n(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake some of them up cond.acquire() cond.notify(n=2) cond.release() # check 2 have woken self.assertReachesEventually(lambda: get_value(woken), 2) # wake the rest of them cond.acquire() cond.notify(n=4) cond.release() self.assertReachesEventually(lambda: get_value(woken), 6) # doesn't do anything more cond.acquire() cond.notify(n=3) cond.release() self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.1 dt = time.monotonic() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = time.monotonic() - dt # borrow logic in assertTimeout() from test/lock_tests.py if not result and expected * 0.6 < dt < expected * 10.0: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() # # Tests for Barrier - adapted from tests in test/lock_tests.py # # Many of the tests for threading.Barrier use a list as an atomic # counter: a value is appended to increment the counter, and the # length of the list gives the value. We use the class DummyList # for the same purpose. class _DummyList(object): def __init__(self): wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) lock = multiprocessing.Lock() self.__setstate__((wrapper, lock)) self._lengthbuf[0] = 0 def __setstate__(self, state): (self._wrapper, self._lock) = state self._lengthbuf = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._wrapper, self._lock) def append(self, _): with self._lock: self._lengthbuf[0] += 1 def __len__(self): with self._lock: return self._lengthbuf[0] def _wait(): # A crude wait/yield function not relying on synchronization primitives. time.sleep(0.01) class Bunch(object): """ A bunch of threads. """ def __init__(self, namespace, f, args, n, wait_before_exit=False): """ Construct a bunch of `n` threads running the same function `f`. If `wait_before_exit` is True, the threads won't terminate until do_finish() is called. """ self.f = f self.args = args self.n = n self.started = namespace.DummyList() self.finished = namespace.DummyList() self._can_exit = namespace.Event() if not wait_before_exit: self._can_exit.set() threads = [] for i in range(n): p = namespace.Process(target=self.task) p.daemon = True p.start() threads.append(p) def finalize(threads): for p in threads: p.join() self._finalizer = weakref.finalize(self, finalize, threads) def task(self): pid = os.getpid() self.started.append(pid) try: self.f(*self.args) finally: self.finished.append(pid) self._can_exit.wait(30) assert self._can_exit.is_set() def wait_for_started(self): while len(self.started) < self.n: _wait() def wait_for_finished(self): while len(self.finished) < self.n: _wait() def do_finish(self): self._can_exit.set() def close(self): self._finalizer() class AppendTrue(object): def __init__(self, obj): self.obj = obj def __call__(self): self.obj.append(True) class _TestBarrier(BaseTestCase): """ Tests for Barrier objects. """ N = 5 defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout def setUp(self): self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) def tearDown(self): self.barrier.abort() self.barrier = None def DummyList(self): if self.TYPE == 'threads': return [] elif self.TYPE == 'manager': return self.manager.list() else: return _DummyList() def run_threads(self, f, args): b = Bunch(self, f, args, self.N-1) try: f(*args) b.wait_for_finished() finally: b.close() @classmethod def multipass(cls, barrier, results, n): m = barrier.parties assert m == cls.N for i in range(n): results[0].append(True) assert len(results[1]) == i * m barrier.wait() results[1].append(True) assert len(results[0]) == (i + 1) * m barrier.wait() try: assert barrier.n_waiting == 0 except NotImplementedError: pass assert not barrier.broken def test_barrier(self, passes=1): """ Test that a barrier is passed in lockstep """ results = [self.DummyList(), self.DummyList()] self.run_threads(self.multipass, (self.barrier, results, passes)) def test_barrier_10(self): """ Test that a barrier works for 10 consecutive runs """ return self.test_barrier(10) @classmethod def _test_wait_return_f(cls, barrier, queue): res = barrier.wait() queue.put(res) def test_wait_return(self): """ test the return value from barrier.wait """ queue = self.Queue() self.run_threads(self._test_wait_return_f, (self.barrier, queue)) results = [queue.get() for i in range(self.N)] self.assertEqual(results.count(0), 1) close_queue(queue) @classmethod def _test_action_f(cls, barrier, results): barrier.wait() if len(results) != 1: raise RuntimeError def test_action(self): """ Test the 'action' callback """ results = self.DummyList() barrier = self.Barrier(self.N, action=AppendTrue(results)) self.run_threads(self._test_action_f, (barrier, results)) self.assertEqual(len(results), 1) @classmethod def _test_abort_f(cls, barrier, results1, results2): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() def test_abort(self): """ Test that an abort will put the barrier in a broken state """ results1 = self.DummyList() results2 = self.DummyList() self.run_threads(self._test_abort_f, (self.barrier, results1, results2)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertTrue(self.barrier.broken) @classmethod def _test_reset_f(cls, barrier, results1, results2, results3): i = barrier.wait() if i == cls.N//2: # Wait until the other threads are all in the barrier. while barrier.n_waiting < cls.N-1: time.sleep(0.001) barrier.reset() else: try: barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) # Now, pass the barrier again barrier.wait() results3.append(True) def test_reset(self): """ Test that a 'reset' on a barrier frees the waiting threads """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() self.run_threads(self._test_reset_f, (self.barrier, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_abort_and_reset_f(cls, barrier, barrier2, results1, results2, results3): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() # Synchronize and reset the barrier. Must synchronize first so # that everyone has left it when we reset, and after so that no # one enters it before the reset. if barrier2.wait() == cls.N//2: barrier.reset() barrier2.wait() barrier.wait() results3.append(True) def test_abort_and_reset(self): """ Test that a barrier can be reset after being broken. """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() barrier2 = self.Barrier(self.N) self.run_threads(self._test_abort_and_reset_f, (self.barrier, barrier2, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_timeout_f(cls, barrier, results): i = barrier.wait() if i == cls.N//2: # One thread is late! time.sleep(1.0) try: barrier.wait(0.5) except threading.BrokenBarrierError: results.append(True) def test_timeout(self): """ Test wait(timeout) """ results = self.DummyList() self.run_threads(self._test_timeout_f, (self.barrier, results)) self.assertEqual(len(results), self.barrier.parties) @classmethod def _test_default_timeout_f(cls, barrier, results): i = barrier.wait(cls.defaultTimeout) if i == cls.N//2: # One thread is later than the default timeout time.sleep(1.0) try: barrier.wait() except threading.BrokenBarrierError: results.append(True) def test_default_timeout(self): """ Test the barrier's default timeout """ barrier = self.Barrier(self.N, timeout=0.5) results = self.DummyList() self.run_threads(self._test_default_timeout_f, (barrier, results)) self.assertEqual(len(results), barrier.parties) def test_single_thread(self): b = self.Barrier(1) b.wait() b.wait() @classmethod def _test_thousand_f(cls, barrier, passes, conn, lock): for i in range(passes): barrier.wait() with lock: conn.send(i) def test_thousand(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) passes = 1000 lock = self.Lock() conn, child_conn = self.Pipe(False) for j in range(self.N): p = self.Process(target=self._test_thousand_f, args=(self.barrier, passes, child_conn, lock)) p.start() self.addCleanup(p.join) for i in range(passes): for j in range(self.N): self.assertEqual(conn.recv(), i) # # # class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('q', 2 ** 33, 2 ** 34), ('c', latin('x'), latin('y')) ] def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocessing.sharedctypes") @classmethod def _test(cls, values): for sv, cv in zip(values, cls.codes_values): sv.value = cv[2] def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.daemon = True proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) def test_rawvalue(self): self.test_value(raw=True) def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() obj1 = val1.get_obj() val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() obj2 = val2.get_obj() lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() obj3 = val3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def f(cls, seq): for i in range(1, len(seq)): seq[i] += seq[i-1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.daemon = True p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_array_from_size(self): size = 10 # Test for zeroing (see issue #11675). # The repetition below strengthens the test by increasing the chances # of previously allocated non-zero memory being used for the new array # on the 2nd and 3rd loops. for _ in range(3): arr = self.Array('i', size) self.assertEqual(len(arr), size) self.assertEqual(list(arr), [0] * size) arr[:] = range(10) self.assertEqual(list(arr), list(range(10))) del arr @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', list(range(10))) lock1 = arr1.get_lock() obj1 = arr1.get_obj() arr2 = self.Array('i', list(range(10)), lock=None) lock2 = arr2.get_lock() obj2 = arr2.get_obj() lock = self.Lock() arr3 = self.Array('i', list(range(10)), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) # # # class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(list(range(10))) self.assertEqual(a[:], list(range(10))) b = self.list() self.assertEqual(b[:], []) b.extend(list(range(5))) self.assertEqual(b[:], list(range(5))) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2,3,4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], list(range(10))) d = [a, b] e = self.list(d) self.assertEqual( [element[:] for element in e], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) def test_list_iter(self): a = self.list(list(range(10))) it = iter(a) self.assertEqual(list(it), list(range(10))) self.assertEqual(list(it), []) # exhausted # list modified during iteration it = iter(a) a[0] = 100 self.assertEqual(next(it), 100) def test_list_proxy_in_list(self): a = self.list([self.list(range(3)) for _i in range(3)]) self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) a[0][-1] = 55 self.assertEqual(a[0][:], [0, 1, 55]) for i in range(1, 3): self.assertEqual(a[i][:], [0, 1, 2]) self.assertEqual(a[1].pop(), 2) self.assertEqual(len(a[1]), 2) for i in range(0, 3, 2): self.assertEqual(len(a[i]), 3) del a b = self.list() b.append(b) del b def test_dict(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) def test_dict_iter(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) it = iter(d) self.assertEqual(list(it), indices) self.assertEqual(list(it), []) # exhausted # dictionary changed size during iteration it = iter(d) d.clear() self.assertRaises(RuntimeError, next, it) def test_dict_proxy_nested(self): pets = self.dict(ferrets=2, hamsters=4) supplies = self.dict(water=10, feed=3) d = self.dict(pets=pets, supplies=supplies) self.assertEqual(supplies['water'], 10) self.assertEqual(d['supplies']['water'], 10) d['supplies']['blankets'] = 5 self.assertEqual(supplies['blankets'], 5) self.assertEqual(d['supplies']['blankets'], 5) d['supplies']['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) del pets del supplies self.assertEqual(d['pets']['ferrets'], 2) d['supplies']['blankets'] = 11 self.assertEqual(d['supplies']['blankets'], 11) pets = d['pets'] supplies = d['supplies'] supplies['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) d.clear() self.assertEqual(len(d), 0) self.assertEqual(supplies['water'], 7) self.assertEqual(pets['hamsters'], 4) l = self.list([pets, supplies]) l[0]['marmots'] = 1 self.assertEqual(pets['marmots'], 1) self.assertEqual(l[0]['marmots'], 1) del pets del supplies self.assertEqual(l[0]['marmots'], 1) outer = self.list([[88, 99], l]) self.assertIsInstance(outer[0], list) # Not a ListProxy self.assertEqual(outer[-1][-1]['feed'], 3) def test_nested_queue(self): a = self.list() # Test queue inside list a.append(self.Queue()) a[0].put(123) self.assertEqual(a[0].get(), 123) b = self.dict() # Test queue inside dict b[0] = self.Queue() b[0].put(456) self.assertEqual(b[0].get(), 456) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) # # # def sqr(x, wait=0.0): time.sleep(wait) return x*x def mul(x, y): return x*y def raise_large_valuerror(wait): time.sleep(wait) raise ValueError("x" * 1024**2) def identity(x): return x class CountedObject(object): n_instances = 0 def __new__(cls): cls.n_instances += 1 return object.__new__(cls) def __del__(self): type(self).n_instances -= 1 class SayWhenError(ValueError): pass def exception_throwing_generator(total, when): if when == -1: raise SayWhenError("Somebody said when") for i in range(total): if i == when: raise SayWhenError("Somebody said when") yield i class _TestPool(BaseTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.pool = cls.Pool(4) @classmethod def tearDownClass(cls): cls.pool.terminate() cls.pool.join() cls.pool = None super().tearDownClass() def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), list(map(sqr, list(range(100))))) def test_starmap(self): psmap = self.pool.starmap tuples = list(zip(range(10), range(9,-1, -1))) self.assertEqual(psmap(mul, tuples), list(itertools.starmap(mul, tuples))) tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(psmap(mul, tuples, chunksize=20), list(itertools.starmap(mul, tuples))) def test_starmap_async(self): tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(self.pool.starmap_async(mul, tuples).get(), list(itertools.starmap(mul, tuples))) def test_map_async(self): self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), list(map(sqr, list(range(10))))) def test_map_async_callbacks(self): call_args = self.manager.list() if self.TYPE == 'manager' else [] self.pool.map_async(int, ['1'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(1, len(call_args)) self.assertEqual([1], call_args[0]) self.pool.map_async(int, ['a'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(2, len(call_args)) self.assertIsInstance(call_args[1], ValueError) def test_map_unplicklable(self): # Issue #19425 -- failure to pickle should not cause a hang if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class A(object): def __reduce__(self): raise RuntimeError('cannot pickle') with self.assertRaises(RuntimeError): self.pool.map(sqr, [A()]*10) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except multiprocessing.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_map_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) # again, make sure it's reentrant with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(10, 3), 1) class SpecialIterable: def __iter__(self): return self def __next__(self): raise SayWhenError def __len__(self): return 1 with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) get = TimingWrapper(res.get) self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, list(range(10))) self.assertEqual(list(it), list(map(sqr, list(range(10))))) it = self.pool.imap(sqr, list(range(10))) for i in range(10): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) it = self.pool.imap(sqr, list(range(1000)), chunksize=100) for i in range(1000): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) def test_imap_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) for i in range(3): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) # SayWhenError seen at start of problematic chunk's results it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) for i in range(6): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) for i in range(4): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, list(range(10))) self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) def test_imap_unordered_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap_unordered(sqr, exception_throwing_generator(10, 3), 1) expected_values = list(map(sqr, list(range(10)))) with self.assertRaises(SayWhenError): # imap_unordered makes it difficult to anticipate the SayWhenError for i in range(10): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) it = self.pool.imap_unordered(sqr, exception_throwing_generator(20, 7), 2) expected_values = list(map(sqr, list(range(20)))) with self.assertRaises(SayWhenError): for i in range(20): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) def test_make_pool(self): expected_error = (RemoteError if self.TYPE == 'manager' else ValueError) self.assertRaises(expected_error, self.Pool, -1) self.assertRaises(expected_error, self.Pool, 0) if self.TYPE != 'manager': p = self.Pool(3) try: self.assertEqual(3, len(p._pool)) finally: p.close() p.join() def test_terminate(self): result = self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() # Sanity check the pool didn't wait for all tasks to finish self.assertLess(join.elapsed, 2.0) def test_empty_iterable(self): # See Issue 12157 p = self.Pool(1) self.assertEqual(p.map(sqr, []), []) self.assertEqual(list(p.imap(sqr, [])), []) self.assertEqual(list(p.imap_unordered(sqr, [])), []) self.assertEqual(p.map_async(sqr, []).get(), []) p.close() p.join() def test_context(self): if self.TYPE == 'processes': L = list(range(10)) expected = [sqr(i) for i in L] with self.Pool(2) as p: r = p.map_async(sqr, L) self.assertEqual(r.get(), expected) p.join() self.assertRaises(ValueError, p.map_async, sqr, L) @classmethod def _test_traceback(cls): raise RuntimeError(123) # some comment def test_traceback(self): # We want ensure that the traceback from the child process is # contained in the traceback raised in the main process. if self.TYPE == 'processes': with self.Pool(1) as p: try: p.apply(self._test_traceback) except Exception as e: exc = e else: self.fail('expected RuntimeError') p.join() self.assertIs(type(exc), RuntimeError) self.assertEqual(exc.args, (123,)) cause = exc.__cause__ self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) self.assertIn('raise RuntimeError(123) # some comment', cause.tb) with test.support.captured_stderr() as f1: try: raise exc except RuntimeError: sys.excepthook(*sys.exc_info()) self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) # _helper_reraises_exception should not make the error # a remote exception with self.Pool(1) as p: try: p.map(sqr, exception_throwing_generator(1, -1), 1) except Exception as e: exc = e else: self.fail('expected SayWhenError') self.assertIs(type(exc), SayWhenError) self.assertIs(exc.__cause__, None) p.join() @classmethod def _test_wrapped_exception(cls): raise RuntimeError('foo') def test_wrapped_exception(self): # Issue #20980: Should not wrap exception when using thread pool with self.Pool(1) as p: with self.assertRaises(RuntimeError): p.apply(self._test_wrapped_exception) p.join() def test_map_no_failfast(self): # Issue #23992: the fail-fast behaviour when an exception is raised # during map() would make Pool.join() deadlock, because a worker # process would fill the result queue (after the result handler thread # terminated, hence not draining it anymore). t_start = time.monotonic() with self.assertRaises(ValueError): with self.Pool(2) as p: try: p.map(raise_large_valuerror, [0, 1]) finally: time.sleep(0.5) p.close() p.join() # check that we indeed waited for all jobs self.assertGreater(time.monotonic() - t_start, 0.9) def test_release_task_refs(self): # Issue #29861: task arguments and results should not be kept # alive after we are done with them. objs = [CountedObject() for i in range(10)] refs = [weakref.ref(o) for o in objs] self.pool.map(identity, objs) del objs gc.collect() # For PyPy or other GCs. time.sleep(DELTA) # let threaded cleanup code run self.assertEqual(set(wr() for wr in refs), {None}) # With a process pool, copies of the objects are returned, check # they were released too. self.assertEqual(CountedObject.n_instances, 0) def test_enter(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) with pool: pass # call pool.terminate() # pool is no longer running with self.assertRaises(ValueError): # bpo-35477: pool.__enter__() fails if the pool is not running with pool: pass pool.join() def test_resource_warning(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) pool.terminate() pool.join() # force state to RUN to emit ResourceWarning in __del__() pool._state = multiprocessing.pool.RUN with warnings_helper.check_warnings( ('unclosed running multiprocessing pool', ResourceWarning)): pool = None support.gc_collect() def raising(): raise KeyError("key") def unpickleable_result(): return lambda: 42 class _TestPoolWorkerErrors(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_async_error_callback(self): p = multiprocessing.Pool(2) scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(raising, error_callback=errback) self.assertRaises(KeyError, res.get) self.assertTrue(scratchpad[0]) self.assertIsInstance(scratchpad[0], KeyError) p.close() p.join() def test_unpickleable_result(self): from multiprocessing.pool import MaybeEncodingError p = multiprocessing.Pool(2) # Make sure we don't lose pool processes because of encoding errors. for iteration in range(20): scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(unpickleable_result, error_callback=errback) self.assertRaises(MaybeEncodingError, res.get) wrapped = scratchpad[0] self.assertTrue(wrapped) self.assertIsInstance(scratchpad[0], MaybeEncodingError) self.assertIsNotNone(wrapped.exc) self.assertIsNotNone(wrapped.value) p.close() p.join() class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): p = multiprocessing.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive # (countdown * DELTA = 5 seconds max startup process time) countdown = 50 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [w.pid for w in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() def test_pool_worker_lifetime_early_close(self): # Issue #10332: closing a pool whose workers have limited lifetimes # before all the tasks completed would make join() hang. p = multiprocessing.Pool(3, maxtasksperchild=1) results = [] for i in range(6): results.append(p.apply_async(sqr, (i, 0.3))) p.close() p.join() # check the results for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) def test_worker_finalization_via_atexit_handler_of_multiprocessing(self): # tests cases against bpo-38744 and bpo-39360 cmd = '''if 1: from multiprocessing import Pool problem = None class A: def __init__(self): self.pool = Pool(processes=1) def test(): global problem problem = A() problem.pool.map(float, tuple(range(10))) if __name__ == "__main__": test() ''' rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd) self.assertEqual(rc, 0) # # Test of creating a customized manager class # from multiprocessing.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in range(10): yield i*i class IteratorProxy(BaseProxy): _exposed_ = ('__next__',) def __iter__(self): return self def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() self.common(manager) manager.shutdown() # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context(self): with MyManager() as manager: self.common(manager) # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context_prestarted(self): manager = MyManager() manager.start() with manager: self.common(manager) self.assertEqual(manager._process.exitcode, 0) def common(self, manager): foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i*i for i in range(10)]) # # Test of connecting to a remote server and using xmlrpclib for serialization # _queue = pyqueue.Queue() def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) values = ['hello world', None, True, 2.25, 'hall\xe5 v\xe4rlden', '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', b'hall\xe5 v\xe4rlden', ] result = values[:] @classmethod def _putter(cls, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple queue.put(tuple(cls.values)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER ) manager.start() self.addCleanup(manager.shutdown) p = self.Process(target=self._putter, args=(manager.address, authkey)) p.daemon = True p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() self.assertEqual(queue.get(), self.result) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue @hashlib_helper.requires_hashdigest('md5') class _TestManagerRestart(BaseTestCase): @classmethod def _putter(cls, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER) try: srvr = manager.get_server() addr = srvr.address # Close the connection.Listener socket which gets opened as a part # of manager.get_server(). It's not needed for the test. srvr.listener.close() manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() p.join() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue finally: if hasattr(manager, "shutdown"): manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) try: manager.start() self.addCleanup(manager.shutdown) except OSError as e: if e.errno != errno.EADDRINUSE: raise # Retry after some time, in case the old socket was lingering # (sporadic failure on buildbots) time.sleep(1.0) manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) if hasattr(manager, "shutdown"): self.addCleanup(manager.shutdown) # # # SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _echo(cls, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', list(range(4))) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0]*10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0]*10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except multiprocessing.BufferTooShort as e: self.assertEqual(e.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(-1), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) time.sleep(.1) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(OSError, reader.send, 2) self.assertRaises(OSError, writer.recv) self.assertRaises(OSError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7+8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) @classmethod def _is_fd_assigned(cls, fd): try: os.fstat(fd) except OSError as e: if e.errno == errno.EBADF: return False raise else: return True @classmethod def _writefd(cls, conn, data, create_dummy_fds=False): if create_dummy_fds: for i in range(0, 256): if not cls._is_fd_assigned(i): os.dup2(conn.fileno(), i) fd = reduction.recv_handle(conn) if msvcrt: fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) os.write(fd, data) os.close(fd) @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") def test_fd_transfer(self): if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"foo")) p.daemon = True p.start() self.addCleanup(os_helper.unlink, os_helper.TESTFN) with open(os_helper.TESTFN, "wb") as f: fd = f.fileno() if msvcrt: fd = msvcrt.get_osfhandle(fd) reduction.send_handle(conn, fd, p.pid) p.join() with open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"foo") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") @unittest.skipIf(MAXFD <= 256, "largest assignable fd number is too small") @unittest.skipUnless(hasattr(os, "dup2"), "test needs os.dup2()") def test_large_fd_transfer(self): # With fd > 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(os_helper.unlink, os_helper.TESTFN) with open(os_helper.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) @unittest.skipUnless(util.abstract_sockets_supported, "test needs abstract socket support") def test_abstract_socket(self): with self.connection.Listener("\0something") as listener: with self.connection.Client(listener.address) as client: with listener.accept() as d: client.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, listener.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @hashlib_helper.requires_hashdigest('md5') class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocessing import resource_sharer resource_sharer.stop(timeout=support.LONG_TIMEOUT) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.create_server((socket_helper.HOST, 0)) conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): super().setUp() # Make pristine heap for these tests self.old_heap = multiprocessing.heap.BufferWrapper._heap multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() def tearDown(self): multiprocessing.heap.BufferWrapper._heap = self.old_heap super().tearDown() def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] del b # verify the state of the heap with heap._lock: all = [] free = 0 occupied = 0 for L in list(heap._len_to_seq.values()): # count all free blocks in arenas for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) free += (stop-start) for arena, arena_blocks in heap._allocated_blocks.items(): # count all allocated blocks in arenas for start, stop in arena_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) self.assertEqual(free + occupied, sum(arena.size for arena in heap._arenas)) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] if arena != narena: # Two different arenas self.assertEqual(stop, heap._arenas[arena].size) # last block self.assertEqual(nstart, 0) # first block else: # Same arena: two adjacent blocks self.assertEqual(stop, nstart) # test free'ing all blocks random.shuffle(blocks) while blocks: blocks.pop() self.assertEqual(heap._n_frees, heap._n_mallocs) self.assertEqual(len(heap._pending_free_blocks), 0) self.assertEqual(len(heap._arenas), 0) self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) self.assertEqual(len(heap._len_to_seq), 0) def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double), ('z', c_longlong,) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocessing.sharedctypes") @classmethod def _double(cls, x, y, z, foo, arr, string): x.value *= 2 y.value *= 2 z.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) z = Value(c_longlong, 2 ** 33, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(z.value, 2 ** 34) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0, 2 ** 33) bar = copy(foo) foo.x = 0 foo.y = 0 foo.z = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) self.assertEqual(bar.z, 2 ** 33) @unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory") @hashlib_helper.requires_hashdigest('md5') class _TestSharedMemory(BaseTestCase): ALLOWED_TYPES = ('processes',) @staticmethod def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): if isinstance(shmem_name_or_obj, str): local_sms = shared_memory.SharedMemory(shmem_name_or_obj) else: local_sms = shmem_name_or_obj local_sms.buf[:len(binary_data)] = binary_data local_sms.close() @unittest.skipIf(sys.platform == "win32", "test is broken on Windows") def test_shared_memory_basics(self): sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512) self.addCleanup(sms.unlink) # Verify attributes are readable. self.assertEqual(sms.name, 'test01_tsmb') self.assertGreaterEqual(sms.size, 512) self.assertGreaterEqual(len(sms.buf), sms.size) # Verify __repr__ self.assertIn(sms.name, str(sms)) self.assertIn(str(sms.size), str(sms)) # Test pickling sms.buf[0:6] = b'pickle' pickled_sms = pickle.dumps(sms) sms2 = pickle.loads(pickled_sms) self.assertEqual(sms.name, sms2.name) self.assertEqual(sms.size, sms2.size) self.assertEqual(bytes(sms.buf[0:6]), bytes(sms2.buf[0:6]), b'pickle') # Modify contents of shared memory segment through memoryview. sms.buf[0] = 42 self.assertEqual(sms.buf[0], 42) # Attach to existing shared memory segment. also_sms = shared_memory.SharedMemory('test01_tsmb') self.assertEqual(also_sms.buf[0], 42) also_sms.close() # Attach to existing shared memory segment but specify a new size. same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size) self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. same_sms.close() # Creating Shared Memory Segment with -ve size with self.assertRaises(ValueError): shared_memory.SharedMemory(create=True, size=-2) # Attaching Shared Memory Segment without a name with self.assertRaises(ValueError): shared_memory.SharedMemory(create=False) # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocessing.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = ['test01_fn', 'test02_fn'] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) if shared_memory._USE_POSIX: # Posix Shared Memory can only be unlinked once. Here we # test an implementation detail that is not observed across # all supported platforms (since WindowsNamedSharedMemory # manages unlinking on its own and unlink() does nothing). # True release of shared memory segment does not necessarily # happen until process exits, depending on the OS platform. with self.assertRaises(FileNotFoundError): sms_uno = shared_memory.SharedMemory( 'test01_dblunlink', create=True, size=5000 ) try: self.assertGreaterEqual(sms_uno.size, 5000) sms_duo = shared_memory.SharedMemory('test01_dblunlink') sms_duo.unlink() # First shm_unlink() call. sms_duo.close() sms_uno.close() finally: sms_uno.unlink() # A second shm_unlink() call is bad. with self.assertRaises(FileExistsError): # Attempting to create a new shared memory segment with a # name that is already in use triggers an exception. there_can_only_be_one_sms = shared_memory.SharedMemory( 'test01_tsmb', create=True, size=512 ) if shared_memory._USE_POSIX: # Requesting creation of a shared memory segment with the option # to attach to an existing segment, if that name is currently in # use, should not trigger an exception. # Note: Using a smaller size could possibly cause truncation of # the existing segment but is OS platform dependent. In the # case of MacOS/darwin, requesting a smaller size is disallowed. class OptionalAttachSharedMemory(shared_memory.SharedMemory): _flags = os.O_CREAT | os.O_RDWR ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb') self.assertEqual(ok_if_exists_sms.size, sms.size) ok_if_exists_sms.close() # Attempting to attach to an existing shared memory segment when # no segment exists with the supplied name triggers an exception. with self.assertRaises(FileNotFoundError): nonexisting_sms = shared_memory.SharedMemory('test01_notthere') nonexisting_sms.unlink() # Error should occur on prior line. sms.close() # Test creating a shared memory segment with negative size with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=-1) # Test creating a shared memory segment with size 0 with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=0) # Test creating a shared memory segment without size argument with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True) def test_shared_memory_across_processes(self): # bpo-40135: don't define shared memory block's name in case of # the failure when we run multiprocessing tests in parallel. sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) # Verify remote attachment to existing block by name is working. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms.name, b'howdy') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'howdy') # Verify pickling of SharedMemory instance also works. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms, b'HELLO') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'HELLO') sms.close() @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") def test_shared_memory_SharedMemoryServer_ignores_sigint(self): # bpo-36368: protect SharedMemoryManager server process from # KeyboardInterrupt signals. smm = multiprocessing.managers.SharedMemoryManager() smm.start() # make sure the manager works properly at the beginning sl = smm.ShareableList(range(10)) # the manager's server should ignore KeyboardInterrupt signals, and # maintain its connection with the current process, and success when # asked to deliver memory segments. os.kill(smm._process.pid, signal.SIGINT) sl2 = smm.ShareableList(range(10)) # test that the custom signal handler registered in the Manager does # not affect signal handling in the parent process. with self.assertRaises(KeyboardInterrupt): os.kill(os.getpid(), signal.SIGINT) smm.shutdown() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): # bpo-36867: test that a SharedMemoryManager uses the # same resource_tracker process as its parent. cmd = '''if 1: from multiprocessing.managers import SharedMemoryManager smm = SharedMemoryManager() smm.start() sl = smm.ShareableList(range(10)) smm.shutdown() ''' rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd) # Before bpo-36867 was fixed, a SharedMemoryManager not using the same # resource_tracker process as its parent would make the parent's # tracker complain about sl being leaked even though smm.shutdown() # properly released sl. self.assertFalse(err) def test_shared_memory_SharedMemoryManager_basics(self): smm1 = multiprocessing.managers.SharedMemoryManager() with self.assertRaises(ValueError): smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started smm1.start() lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) self.assertEqual(len(doppleganger_list0), 5) doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) held_name = lom[0].name smm1.shutdown() if sys.platform != "win32": # Calls to unlink() have no effect on Windows platform; shared # memory will only be released once final process exits. with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_shm = shared_memory.SharedMemory(name=held_name) with multiprocessing.managers.SharedMemoryManager() as smm2: sl = smm2.ShareableList("howdy") shm = smm2.SharedMemory(size=128) held_name = sl.shm.name if sys.platform != "win32": with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_sl = shared_memory.ShareableList(name=held_name) def test_shared_memory_ShareableList_basics(self): sl = shared_memory.ShareableList( ['howdy', b'HoWdY', -273.154, 100, None, True, 42] ) self.addCleanup(sl.shm.unlink) # Verify __repr__ self.assertIn(sl.shm.name, str(sl)) self.assertIn(str(list(sl)), str(sl)) # Index Out of Range (get) with self.assertRaises(IndexError): sl[7] # Index Out of Range (set) with self.assertRaises(IndexError): sl[7] = 2 # Assign value without format change (str -> str) current_format = sl._get_packing_format(0) sl[0] = 'howdy' self.assertEqual(current_format, sl._get_packing_format(0)) # Verify attributes are readable. self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') # Exercise len(). self.assertEqual(len(sl), 7) # Exercise index(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') with self.assertRaises(ValueError): sl.index('100') self.assertEqual(sl.index(100), 3) # Exercise retrieving individual values. self.assertEqual(sl[0], 'howdy') self.assertEqual(sl[-2], True) # Exercise iterability. self.assertEqual( tuple(sl), ('howdy', b'HoWdY', -273.154, 100, None, True, 42) ) # Exercise modifying individual values. sl[3] = 42 self.assertEqual(sl[3], 42) sl[4] = 'some' # Change type at a given position. self.assertEqual(sl[4], 'some') self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[4] = 'far too many' self.assertEqual(sl[4], 'some') sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data self.assertEqual(sl[0], 'encodés') self.assertEqual(sl[1], b'HoWdY') # no spillage with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data self.assertEqual(sl[1], b'HoWdY') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[1] = b'123456789' self.assertEqual(sl[1], b'HoWdY') # Exercise count(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') self.assertEqual(sl.count(42), 2) self.assertEqual(sl.count(b'HoWdY'), 1) self.assertEqual(sl.count(b'adios'), 0) # Exercise creating a duplicate. sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate') try: self.assertNotEqual(sl.shm.name, sl_copy.shm.name) self.assertEqual('test03_duplicate', sl_copy.shm.name) self.assertEqual(list(sl), list(sl_copy)) self.assertEqual(sl.format, sl_copy.format) sl_copy[-1] = 77 self.assertEqual(sl_copy[-1], 77) self.assertNotEqual(sl[-1], 77) sl_copy.shm.close() finally: sl_copy.shm.unlink() # Obtain a second handle on the same ShareableList. sl_tethered = shared_memory.ShareableList(name=sl.shm.name) self.assertEqual(sl.shm.name, sl_tethered.shm.name) sl_tethered[-1] = 880 self.assertEqual(sl[-1], 880) sl_tethered.shm.close() sl.shm.close() # Exercise creating an empty ShareableList. empty_sl = shared_memory.ShareableList() try: self.assertEqual(len(empty_sl), 0) self.assertEqual(empty_sl.format, '') self.assertEqual(empty_sl.count('any'), 0) with self.assertRaises(ValueError): empty_sl.index(None) empty_sl.shm.close() finally: empty_sl.shm.unlink() def test_shared_memory_ShareableList_pickling(self): sl = shared_memory.ShareableList(range(10)) self.addCleanup(sl.shm.unlink) serialized_sl = pickle.dumps(sl) deserialized_sl = pickle.loads(serialized_sl) self.assertTrue( isinstance(deserialized_sl, shared_memory.ShareableList) ) self.assertTrue(deserialized_sl[-1], 9) self.assertFalse(sl is deserialized_sl) deserialized_sl[4] = "changed" self.assertEqual(sl[4], "changed") # Verify data is not being put into the pickled representation. name = 'a' * len(sl.shm.name) larger_sl = shared_memory.ShareableList(range(400)) self.addCleanup(larger_sl.shm.unlink) serialized_larger_sl = pickle.dumps(larger_sl) self.assertTrue(len(serialized_sl) == len(serialized_larger_sl)) larger_sl.shm.close() deserialized_sl.shm.close() sl.shm.close() def test_shared_memory_cleaned_after_process_termination(self): cmd = '''if 1: import os, time, sys from multiprocessing import shared_memory # Create a shared_memory segment, and send the segment name sm = shared_memory.SharedMemory(create=True, size=10) sys.stdout.write(sm.name + '\\n') sys.stdout.flush() time.sleep(100) ''' with subprocess.Popen([sys.executable, '-E', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: name = p.stdout.readline().strip().decode() # killing abruptly processes holding reference to a shared memory # segment should not leak the given memory segment. p.terminate() p.wait() deadline = time.monotonic() + support.LONG_TIMEOUT t = 0.1 while time.monotonic() < deadline: time.sleep(t) t = min(t*2, 5) try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break else: raise AssertionError("A SharedMemory segment was leaked after" " a process was abruptly terminated.") if os.name == 'posix': # A warning was emitted by the subprocess' own # resource_tracker (on Windows, shared memory segments # are released automatically by the OS). err = p.stderr.read().decode() self.assertIn( "resource_tracker: There appear to be 1 leaked " "shared_memory objects to clean up at shutdown", err) # # # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): gc.collect() # For PyPy or other GCs. self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a gc.collect() # For PyPy or other GCs. b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called gc.collect() # For PyPy or other GCs. c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with threading_helper.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocessing.__file__) pattern = os.path.join(glob.escape(folder), '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocessing.' + m for m in modules] modules.remove('multiprocessing.__init__') modules.append('multiprocessing') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocessing.popen_fork') modules.remove('multiprocessing.popen_forkserver') modules.remove('multiprocessing.popen_spawn_posix') else: modules.remove('multiprocessing.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocessing.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocessing.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() p.close() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() p.close() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocessing.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocessing.connection.Connection, -1) @hashlib_helper.requires_hashdigest('md5') class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 @hashlib_helper.requires_hashdigest('md5') class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocessing.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random()*0.1) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocessing.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random()*0.1) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocessing.connection import wait l = socket.create_server((socket_helper.HOST, 0)) addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocessing.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) def test_wait_timeout(self): from multiprocessing.connection import wait expected = 5 a, b = multiprocessing.Pipe() start = time.monotonic() res = wait([a, b], expected) delta = time.monotonic() - start self.assertEqual(res, []) self.assertLess(delta, expected * 2) self.assertGreater(delta, expected * 0.5) b.send(None) start = time.monotonic() res = wait([a, b], 20) delta = time.monotonic() - start self.assertEqual(res, [a]) self.assertLess(delta, 0.4) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) def test_wait_integer(self): from multiprocessing.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocessing.Semaphore(0) a, b = multiprocessing.Pipe() p = multiprocessing.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = time.monotonic() res = wait([a, p.sentinel, b], expected + 20) delta = time.monotonic() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocessing.connection import wait a, b = multiprocessing.Pipe() t = time.monotonic() res = wait([a], timeout=-1) t = time.monotonic() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls): import json r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def test_flags(self): import json # start child process using unusual flags prog = ('from test._test_multiprocessing import TestFlags; ' + 'TestFlags.run_in_child()') data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() join_process(p) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocessing.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = test.support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = test.support.script_helper.assert_python_ok(name, sm) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() join_process(p) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocessing.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocessing.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() join_process(p) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocessing.Pipe() fd = self.get_high_socket_fd() try: p = multiprocessing.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() join_process(p) finally: self.close(fd) writer.close() reader.close() if multiprocessing.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocessing.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocessing.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocessing.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocessing.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['spawn', 'fork'] or methods == ['fork', 'spawn', 'forkserver'] or methods == ['spawn', 'fork', 'forkserver']) def test_preload_resources(self): if multiprocessing.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = test.support.script_helper.assert_python_ok(name) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestResourceTracker(unittest.TestCase): def test_resource_tracker(self): # # Check that killing process does not leak named semaphores # cmd = '''if 1: import time, os, tempfile import multiprocessing as mp from multiprocessing import resource_tracker from multiprocessing.shared_memory import SharedMemory mp.set_start_method("spawn") rand = tempfile._RandomNameSequence() def create_and_register_resource(rtype): if rtype == "semaphore": lock = mp.Lock() return lock, lock._semlock.name elif rtype == "shared_memory": sm = SharedMemory(create=True, size=10) return sm, sm._name else: raise ValueError( "Resource type {{}} not understood".format(rtype)) resource1, rname1 = create_and_register_resource("{rtype}") resource2, rname2 = create_and_register_resource("{rtype}") os.write({w}, rname1.encode("ascii") + b"\\n") os.write({w}, rname2.encode("ascii") + b"\\n") time.sleep(10) ''' for rtype in resource_tracker._CLEANUP_FUNCS: with self.subTest(rtype=rtype): if rtype == "noop": # Artefact resource type used by the resource_tracker continue r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd.format(w=w, rtype=rtype)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _resource_unlink(name1, rtype) p.terminate() p.wait() deadline = time.monotonic() + support.LONG_TIMEOUT while time.monotonic() < deadline: time.sleep(.5) try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, but OSX seems to give # EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break else: raise AssertionError( f"A {rtype} resource was leaked after a process was " f"abruptly terminated.") err = p.stderr.read().decode('utf-8') p.stderr.close() expected = ('resource_tracker: There appear to be 2 leaked {} ' 'objects'.format( rtype)) self.assertRegex(err, expected) self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) def check_resource_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocessing.resource_tracker import _resource_tracker pid = _resource_tracker._pid if pid is not None: os.kill(pid, signal.SIGKILL) support.wait_process(pid, exitcode=-signal.SIGKILL) with warnings.catch_warnings(): warnings.simplefilter("ignore") _resource_tracker.ensure_running() pid = _resource_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocessing.get_context("spawn") with warnings.catch_warnings(record=True) as all_warn: warnings.simplefilter("always") sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) if should_die: self.assertEqual(len(all_warn), 1) the_warn = all_warn[0] self.assertTrue(issubclass(the_warn.category, UserWarning)) self.assertTrue("resource_tracker: process died" in str(the_warn.message)) else: self.assertEqual(len(all_warn), 0) def test_resource_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGINT, False) def test_resource_tracker_sigterm(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGTERM, False) def test_resource_tracker_sigkill(self): # Uncatchable signal. self.check_resource_tracker_death(signal.SIGKILL, True) @staticmethod def _is_resource_tracker_reused(conn, pid): from multiprocessing.resource_tracker import _resource_tracker _resource_tracker.ensure_running() # The pid should be None in the child process, expect for the fork # context. It should not be a new value. reused = _resource_tracker._pid in (None, pid) reused &= _resource_tracker._check_alive() conn.send(reused) def test_resource_tracker_reused(self): from multiprocessing.resource_tracker import _resource_tracker _resource_tracker.ensure_running() pid = _resource_tracker._pid r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._is_resource_tracker_reused, args=(w, pid)) p.start() is_resource_tracker_reused = r.recv() # Clean up p.join() w.close() r.close() self.assertTrue(is_resource_tracker_reused) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocessing.SimpleQueue() child_can_start = multiprocessing.Event() parent_can_continue = multiprocessing.Event() proc = multiprocessing.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() def test_close(self): queue = multiprocessing.SimpleQueue() queue.close() # closing a queue twice should not fail queue.close() # Test specific to CPython since it tests private attributes @test.support.cpython_only def test_closed(self): queue = multiprocessing.SimpleQueue() queue.close() self.assertTrue(queue._reader.closed) self.assertTrue(queue._writer.closed) class TestPoolNotLeakOnFailure(unittest.TestCase): def test_release_unused_processes(self): # Issue #19675: During pool creation, if we can't create a process, # don't leak already created ones. will_fail_in = 3 forked_processes = [] class FailingForkProcess: def __init__(self, **kwargs): self.name = 'Fake Process' self.exitcode = None self.state = None forked_processes.append(self) def start(self): nonlocal will_fail_in if will_fail_in <= 0: raise OSError("Manually induced OSError") will_fail_in -= 1 self.state = 'started' def terminate(self): self.state = 'stopping' def join(self): if self.state == 'stopping': self.state = 'stopped' def is_alive(self): return self.state == 'started' or self.state == 'stopping' with self.assertRaisesRegex(OSError, 'Manually induced OSError'): p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( Process=FailingForkProcess)) p.close() p.join() self.assertFalse( any(process.is_alive() for process in forked_processes)) @hashlib_helper.requires_hashdigest('md5') class TestSyncManagerTypes(unittest.TestCase): """Test all the types which can be shared between a parent and a child process by using a manager which acts as an intermediary between them. In the following unit-tests the base type is created in the parent process, the @classmethod represents the worker process and the shared object is readable and editable between the two. # The child. @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.append(6) # The parent. def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert o[1] == 6 """ manager_class = multiprocessing.managers.SyncManager def setUp(self): self.manager = self.manager_class() self.manager.start() self.proc = None def tearDown(self): if self.proc is not None and self.proc.is_alive(): self.proc.terminate() self.proc.join() self.manager.shutdown() self.manager = None self.proc = None @classmethod def setUpClass(cls): support.reap_children() tearDownClass = setUpClass def wait_proc_exit(self): # Only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395). join_process(self.proc) start_time = time.monotonic() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = time.monotonic() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocessing.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break def run_worker(self, worker, obj): self.proc = multiprocessing.Process(target=worker, args=(obj, )) self.proc.daemon = True self.proc.start() self.wait_proc_exit() self.assertEqual(self.proc.exitcode, 0) @classmethod def _test_event(cls, obj): assert obj.is_set() obj.wait() obj.clear() obj.wait(0.001) def test_event(self): o = self.manager.Event() o.set() self.run_worker(self._test_event, o) assert not o.is_set() o.wait(0.001) @classmethod def _test_lock(cls, obj): obj.acquire() def test_lock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_lock, o) o.release() self.assertRaises(RuntimeError, o.release) # already released @classmethod def _test_rlock(cls, obj): obj.acquire() obj.release() def test_rlock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_rlock, o) @classmethod def _test_semaphore(cls, obj): obj.acquire() def test_semaphore(self, sname="Semaphore"): o = getattr(self.manager, sname)() self.run_worker(self._test_semaphore, o) o.release() def test_bounded_semaphore(self): self.test_semaphore(sname="BoundedSemaphore") @classmethod def _test_condition(cls, obj): obj.acquire() obj.release() def test_condition(self): o = self.manager.Condition() self.run_worker(self._test_condition, o) @classmethod def _test_barrier(cls, obj): assert obj.parties == 5 obj.reset() def test_barrier(self): o = self.manager.Barrier(5) self.run_worker(self._test_barrier, o) @classmethod def _test_pool(cls, obj): # TODO: fix https://bugs.python.org/issue35919 with obj: pass def test_pool(self): o = self.manager.Pool(processes=4) self.run_worker(self._test_pool, o) @classmethod def _test_queue(cls, obj): assert obj.qsize() == 2 assert obj.full() assert not obj.empty() assert obj.get() == 5 assert not obj.empty() assert obj.get() == 6 assert obj.empty() def test_queue(self, qname="Queue"): o = getattr(self.manager, qname)(2) o.put(5) o.put(6) self.run_worker(self._test_queue, o) assert o.empty() assert not o.full() def test_joinable_queue(self): self.test_queue("JoinableQueue") @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.count(5) == 1 assert obj.index(5) == 0 obj.sort() obj.reverse() for x in obj: pass assert len(obj) == 1 assert obj.pop(0) == 5 def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_dict(cls, obj): assert len(obj) == 1 assert obj['foo'] == 5 assert obj.get('foo') == 5 assert list(obj.items()) == [('foo', 5)] assert list(obj.keys()) == ['foo'] assert list(obj.values()) == [5] assert obj.copy() == {'foo': 5} assert obj.popitem() == ('foo', 5) def test_dict(self): o = self.manager.dict() o['foo'] = 5 self.run_worker(self._test_dict, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_value(cls, obj): assert obj.value == 1 assert obj.get() == 1 obj.set(2) def test_value(self): o = self.manager.Value('i', 1) self.run_worker(self._test_value, o) self.assertEqual(o.value, 2) self.assertEqual(o.get(), 2) @classmethod def _test_array(cls, obj): assert obj[0] == 0 assert obj[1] == 1 assert len(obj) == 2 assert list(obj) == [0, 1] def test_array(self): o = self.manager.Array('i', [0, 1]) self.run_worker(self._test_array, o) @classmethod def _test_namespace(cls, obj): assert obj.x == 0 assert obj.y == 1 def test_namespace(self): o = self.manager.Namespace() o.x = 0 o.y = 1 self.run_worker(self._test_namespace, o) class MiscTestCase(unittest.TestCase): def test__all__(self): # Just make sure names in not_exported are excluded support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, not_exported=['SUBDEBUG', 'SUBWARNING']) # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocessing.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) if processes: test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocessing.Process connection = multiprocessing.connection current_process = staticmethod(multiprocessing.current_process) parent_process = staticmethod(multiprocessing.parent_process) active_children = staticmethod(multiprocessing.active_children) Pool = staticmethod(multiprocessing.Pool) Pipe = staticmethod(multiprocessing.Pipe) Queue = staticmethod(multiprocessing.Queue) JoinableQueue = staticmethod(multiprocessing.JoinableQueue) Lock = staticmethod(multiprocessing.Lock) RLock = staticmethod(multiprocessing.RLock) Semaphore = staticmethod(multiprocessing.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) Condition = staticmethod(multiprocessing.Condition) Event = staticmethod(multiprocessing.Event) Barrier = staticmethod(multiprocessing.Barrier) Value = staticmethod(multiprocessing.Value) Array = staticmethod(multiprocessing.Array) RawValue = staticmethod(multiprocessing.RawValue) RawArray = staticmethod(multiprocessing.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocessing.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocessing.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) start_time = time.monotonic() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = time.monotonic() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocessing.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. test.support.environment_altered = True support.print_warning('Shared objects which still exist ' 'at manager shutdown:') support.print_warning(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocessing.dummy.Process connection = multiprocessing.dummy.connection current_process = staticmethod(multiprocessing.dummy.current_process) active_children = staticmethod(multiprocessing.dummy.active_children) Pool = staticmethod(multiprocessing.dummy.Pool) Pipe = staticmethod(multiprocessing.dummy.Pipe) Queue = staticmethod(multiprocessing.dummy.Queue) JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) Lock = staticmethod(multiprocessing.dummy.Lock) RLock = staticmethod(multiprocessing.dummy.RLock) Semaphore = staticmethod(multiprocessing.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) Condition = staticmethod(multiprocessing.dummy.Condition) Event = staticmethod(multiprocessing.dummy.Event) Barrier = staticmethod(multiprocessing.dummy.Barrier) Value = staticmethod(multiprocessing.dummy.Value) Array = staticmethod(multiprocessing.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass if type_ == 'manager': Temp = hashlib_helper.requires_hashdigest('md5')(Temp) Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocessing.set_forkserver_preload(PRELOAD) multiprocessing.process._cleanup() dangling[0] = multiprocessing.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocessing.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() multiprocessing.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocessing.process._dangling) - set(dangling[0]) if processes: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocessing.util._cleanup_tests() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule
debug_data_multiplexer.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A wrapper around DebugDataReader used for retrieving tfdbg v2 data.""" import threading from tensorboard import errors # Dummy run name for the debugger. # Currently, the `DebuggerV2ExperimentMultiplexer` class is tied to a single # logdir, which holds at most one DebugEvent file set in the tfdbg v2 (tfdbg2 # for short) format. # TODO(cais): When tfdbg2 allows there to be multiple DebugEvent file sets in # the same logdir, replace this magic string with actual run names. DEFAULT_DEBUGGER_RUN_NAME = "__default_debugger_run__" # Default number of alerts per monitor type. # Limiting the number of alerts is based on the consideration that usually # only the first few alerting events are the most critical and the subsequent # ones are either repetitions of the earlier ones or caused by the earlier ones. DEFAULT_PER_TYPE_ALERT_LIMIT = 1000 # Default interval between successive calls to `DebugDataReader.update()``. DEFAULT_RELOAD_INTERVAL_SEC = 30 def run_repeatedly_in_background(target, interval_sec): """Run a target task repeatedly in the background. In the context of this module, `target` is the `update()` method of the underlying reader for tfdbg2-format data. This method is mocked by unit tests for deterministic behaviors during testing. Args: target: The target task to run in the background, a callable with no args. interval_sec: Time interval between repeats, in seconds. Returns: - A `threading.Event` object that can be used to interrupt an ongoing waiting interval between successive runs of `target`. To interrupt the interval, call the `set()` method of the object. - The `threading.Thread` object on which `target` is run repeatedly. """ event = threading.Event() def _run_repeatedly(): while True: target() event.wait(interval_sec) event.clear() # Use `daemon=True` to make sure the thread doesn't block program exit. thread = threading.Thread(target=_run_repeatedly, daemon=True) thread.start() return event, thread def _alert_to_json(alert): # TODO(cais): Replace this with Alert.to_json() when supported by the # backend. from tensorflow.python.debug.lib import debug_events_monitors if isinstance(alert, debug_events_monitors.InfNanAlert): return { "alert_type": "InfNanAlert", "op_type": alert.op_type, "output_slot": alert.output_slot, # TODO(cais): Once supported by backend, add 'op_name' key # for intra-graph execution events. "size": alert.size, "num_neg_inf": alert.num_neg_inf, "num_pos_inf": alert.num_pos_inf, "num_nan": alert.num_nan, "execution_index": alert.execution_index, "graph_execution_trace_index": alert.graph_execution_trace_index, } else: raise TypeError("Unrecognized alert subtype: %s" % type(alert)) def parse_tensor_name(tensor_name): """Helper function that extracts op name and slot from tensor name.""" output_slot = 0 if ":" in tensor_name: op_name, output_slot = tensor_name.split(":") output_slot = int(output_slot) else: op_name = tensor_name return op_name, output_slot class DebuggerV2EventMultiplexer(object): """A class used for accessing tfdbg v2 DebugEvent data on local filesystem. This class is a short-term hack, mirroring the EventMultiplexer for the main TensorBoard plugins (e.g., scalar, histogram and graphs.) As such, it only implements the methods relevant to the Debugger V2 pluggin. TODO(cais): Integrate it with EventMultiplexer and use the integrated class from MultiplexerDataProvider for a single path of accessing debugger and non-debugger data. """ def __init__(self, logdir): """Constructor for the `DebugEventMultiplexer`. Args: logdir: Path to the directory to load the tfdbg v2 data from. """ self._logdir = logdir self._reader = None self._reader_lock = threading.Lock() self._reload_needed_event = None # Create the reader for the tfdbg2 data in the lodir as soon as # the backend of the debugger-v2 plugin is created, so it doesn't need # to wait for the first request from the FE to start loading data. self._tryCreateReader() def _tryCreateReader(self): """Try creating reader for tfdbg2 data in the logdir. If the reader has already been created, a new one will not be created and this function is a no-op. If a reader has not been created, create it and start periodic calls to `update()` on a separate thread. """ if self._reader: return with self._reader_lock: if not self._reader: try: # TODO(cais): Avoid conditional imports and instead use # plugin loader to gate the loading of this entire plugin. from tensorflow.python.debug.lib import debug_events_reader from tensorflow.python.debug.lib import ( debug_events_monitors, ) except ImportError: # This ensures graceful behavior when tensorflow install is # unavailable or when the installed tensorflow version does not # contain the required modules. return try: self._reader = debug_events_reader.DebugDataReader( self._logdir ) except AttributeError: # Gracefully fail for users without the required API changes to # debug_events_reader.DebugDataReader introduced in # TF 2.1.0.dev20200103. This should be safe to remove when # TF 2.2 is released. return except ValueError: # When no DebugEvent file set is found in the logdir, a # `ValueError` is thrown. return self._monitors = [ debug_events_monitors.InfNanMonitor( self._reader, limit=DEFAULT_PER_TYPE_ALERT_LIMIT ) ] self._reload_needed_event, _ = run_repeatedly_in_background( self._reader.update, DEFAULT_RELOAD_INTERVAL_SEC ) def _reloadReader(self): """If a reader exists and has started period updating, unblock the update. The updates are performed periodically with a sleep interval between successive calls to the reader's update() method. Calling this method interrupts the sleep immediately if one is ongoing. """ if self._reload_needed_event: self._reload_needed_event.set() def FirstEventTimestamp(self, run): """Return the timestamp of the first DebugEvent of the given run. This may perform I/O if no events have been loaded yet for the run. Args: run: A string name of the run for which the timestamp is retrieved. This currently must be hardcoded as `DEFAULT_DEBUGGER_RUN_NAME`, as each logdir contains at most one DebugEvent file set (i.e., a run of a tfdbg2-instrumented TensorFlow program.) Returns: The wall_time of the first event of the run, which will be in seconds since the epoch as a `float`. """ if self._reader is None: raise ValueError("No tfdbg2 runs exists.") if run != DEFAULT_DEBUGGER_RUN_NAME: raise ValueError( "Expected run name to be %s, but got %s" % (DEFAULT_DEBUGGER_RUN_NAME, run) ) return self._reader.starting_wall_time() def PluginRunToTagToContent(self, plugin_name): raise NotImplementedError( "DebugDataMultiplexer.PluginRunToTagToContent() has not been " "implemented yet." ) def Runs(self): """Return all the tfdbg2 run names in the logdir watched by this instance. The `Run()` method of this class is specialized for the tfdbg2-format DebugEvent files. As a side effect, this method unblocks the underlying reader's period reloading if a reader exists. This lets the reader update at a higher frequency than the default one with 30-second sleeping period between reloading when data is being queried actively from this instance. Note that this `Runs()` method is used by all other public data-access methods of this class (e.g., `ExecutionData()`, `GraphExecutionData()`). Hence calls to those methods will lead to accelerated data reloading of the reader. Returns: If tfdbg2-format data exists in the `logdir` of this object, returns: ``` {runName: { "debugger-v2": [tag1, tag2, tag3] } } ``` where `runName` is the hard-coded string `DEFAULT_DEBUGGER_RUN_NAME` string. This is related to the fact that tfdbg2 currently contains at most one DebugEvent file set per directory. If no tfdbg2-format data exists in the `logdir`, an empty `dict`. """ # Call `_tryCreateReader()` here to cover the possibility of tfdbg2 # data start being written to the logdir after the tensorboard backend # starts. self._tryCreateReader() if self._reader: # If a _reader exists, unblock its reloading (on a separate thread) # immediately. self._reloadReader() return { DEFAULT_DEBUGGER_RUN_NAME: { # TODO(cais): Add the semantically meaningful tag names such as # 'execution_digests_book', 'alerts_book' "debugger-v2": [] } } else: return {} def _checkBeginEndIndices(self, begin, end, total_count): if begin < 0: raise errors.InvalidArgumentError( "Invalid begin index (%d)" % begin ) if end > total_count: raise errors.InvalidArgumentError( "end index (%d) out of bounds (%d)" % (end, total_count) ) if end >= 0 and end < begin: raise errors.InvalidArgumentError( "end index (%d) is unexpectedly less than begin index (%d)" % (end, begin) ) if end < 0: # This means all digests. end = total_count return end def Alerts(self, run, begin, end, alert_type_filter=None): """Get alerts from the debugged TensorFlow program. Args: run: The tfdbg2 run to get Alerts from. begin: Beginning alert index. end: Ending alert index. alert_type_filter: Optional filter string for alert type, used to restrict retrieved alerts data to a single type. If used, `begin` and `end` refer to the beginning and ending indices within the filtered alert type. """ from tensorflow.python.debug.lib import debug_events_monitors runs = self.Runs() if run not in runs: # TODO(cais): This should generate a 400 response instead. return None alerts = [] alerts_breakdown = dict() alerts_by_type = dict() for monitor in self._monitors: monitor_alerts = monitor.alerts() if not monitor_alerts: continue alerts.extend(monitor_alerts) # TODO(cais): Replace this with Alert.to_json() when # monitor.alert_type() is available. if isinstance(monitor, debug_events_monitors.InfNanMonitor): alert_type = "InfNanAlert" else: alert_type = "__MiscellaneousAlert__" alerts_breakdown[alert_type] = len(monitor_alerts) alerts_by_type[alert_type] = monitor_alerts num_alerts = len(alerts) if alert_type_filter is not None: if alert_type_filter not in alerts_breakdown: raise errors.InvalidArgumentError( "Filtering of alerts failed: alert type %s does not exist" % alert_type_filter ) alerts = alerts_by_type[alert_type_filter] end = self._checkBeginEndIndices(begin, end, len(alerts)) return { "begin": begin, "end": end, "alert_type": alert_type_filter, "num_alerts": num_alerts, "alerts_breakdown": alerts_breakdown, "per_type_alert_limit": DEFAULT_PER_TYPE_ALERT_LIMIT, "alerts": [_alert_to_json(alert) for alert in alerts[begin:end]], } def ExecutionDigests(self, run, begin, end): """Get ExecutionDigests. Args: run: The tfdbg2 run to get `ExecutionDigest`s from. begin: Beginning execution index. end: Ending execution index. Returns: A JSON-serializable object containing the `ExecutionDigest`s and related meta-information """ runs = self.Runs() if run not in runs: return None # TODO(cais): For scalability, use begin and end kwargs when available in # `DebugDataReader.execution()`.` execution_digests = self._reader.executions(digest=True) end = self._checkBeginEndIndices(begin, end, len(execution_digests)) return { "begin": begin, "end": end, "num_digests": len(execution_digests), "execution_digests": [ digest.to_json() for digest in execution_digests[begin:end] ], } def ExecutionData(self, run, begin, end): """Get Execution data objects (Detailed, non-digest form). Args: run: The tfdbg2 run to get `ExecutionDigest`s from. begin: Beginning execution index. end: Ending execution index. Returns: A JSON-serializable object containing the `ExecutionDigest`s and related meta-information """ runs = self.Runs() if run not in runs: return None execution_digests = self._reader.executions(digest=True) end = self._checkBeginEndIndices(begin, end, len(execution_digests)) execution_digests = execution_digests[begin:end] executions = self._reader.executions(digest=False, begin=begin, end=end) return { "begin": begin, "end": end, "executions": [execution.to_json() for execution in executions], } def GraphExecutionDigests(self, run, begin, end, trace_id=None): """Get `GraphExecutionTraceDigest`s. Args: run: The tfdbg2 run to get `GraphExecutionTraceDigest`s from. begin: Beginning graph-execution index. end: Ending graph-execution index. Returns: A JSON-serializable object containing the `ExecutionDigest`s and related meta-information """ runs = self.Runs() if run not in runs: return None # TODO(cais): Implement support for trace_id once the joining of eager # execution and intra-graph execution is supported by DebugDataReader. if trace_id is not None: raise NotImplementedError( "trace_id support for GraphExecutionTraceDigest is " "not implemented yet." ) graph_exec_digests = self._reader.graph_execution_traces(digest=True) end = self._checkBeginEndIndices(begin, end, len(graph_exec_digests)) return { "begin": begin, "end": end, "num_digests": len(graph_exec_digests), "graph_execution_digests": [ digest.to_json() for digest in graph_exec_digests[begin:end] ], } def GraphExecutionData(self, run, begin, end, trace_id=None): """Get `GraphExecutionTrace`s. Args: run: The tfdbg2 run to get `GraphExecutionTrace`s from. begin: Beginning graph-execution index. end: Ending graph-execution index. Returns: A JSON-serializable object containing the `ExecutionDigest`s and related meta-information """ runs = self.Runs() if run not in runs: return None # TODO(cais): Implement support for trace_id once the joining of eager # execution and intra-graph execution is supported by DebugDataReader. if trace_id is not None: raise NotImplementedError( "trace_id support for GraphExecutionTraceData is " "not implemented yet." ) digests = self._reader.graph_execution_traces(digest=True) end = self._checkBeginEndIndices(begin, end, len(digests)) graph_executions = self._reader.graph_execution_traces( digest=False, begin=begin, end=end ) return { "begin": begin, "end": end, "graph_executions": [ graph_exec.to_json() for graph_exec in graph_executions ], } def GraphInfo(self, run, graph_id): """Get the information regarding a TensorFlow graph. Args: run: Name of the run. graph_id: Debugger-generated ID of the graph in question. This information is available in the return values of `GraphOpInfo`, `GraphExecution`, etc. Returns: A JSON-serializable object containing the information regarding the TensorFlow graph. Raises: NotFoundError if the graph_id is not known to the debugger. """ runs = self.Runs() if run not in runs: return None try: graph = self._reader.graph_by_id(graph_id) except KeyError: raise errors.NotFoundError( 'There is no graph with ID "%s"' % graph_id ) return graph.to_json() def GraphOpInfo(self, run, graph_id, op_name): """Get the information regarding a graph op's creation. Args: run: Name of the run. graph_id: Debugger-generated ID of the graph that contains the op in question. This ID is available from other methods of this class, e.g., the return value of `GraphExecutionDigests()`. op_name: Name of the op. Returns: A JSON-serializable object containing the information regarding the op's creation and its immediate inputs and consumers. Raises: NotFoundError if the graph_id or op_name does not exist. """ runs = self.Runs() if run not in runs: return None try: graph = self._reader.graph_by_id(graph_id) except KeyError: raise errors.NotFoundError( 'There is no graph with ID "%s"' % graph_id ) try: op_creation_digest = graph.get_op_creation_digest(op_name) except KeyError: raise errors.NotFoundError( 'There is no op named "%s" in graph with ID "%s"' % (op_name, graph_id) ) data_object = self._opCreationDigestToDataObject( op_creation_digest, graph ) # Populate data about immediate inputs. for input_spec in data_object["inputs"]: try: input_op_digest = graph.get_op_creation_digest( input_spec["op_name"] ) except KeyError: input_op_digest = None if input_op_digest: input_spec["data"] = self._opCreationDigestToDataObject( input_op_digest, graph ) # Populate data about immediate consuming ops. for slot_consumer_specs in data_object["consumers"]: for consumer_spec in slot_consumer_specs: try: digest = graph.get_op_creation_digest( consumer_spec["op_name"] ) except KeyError: digest = None if digest: consumer_spec["data"] = self._opCreationDigestToDataObject( digest, graph ) return data_object def _opCreationDigestToDataObject(self, op_creation_digest, graph): if op_creation_digest is None: return None json_object = op_creation_digest.to_json() del json_object["graph_id"] json_object["graph_ids"] = self._getGraphStackIds( op_creation_digest.graph_id ) # TODO(cais): "num_outputs" should be populated in to_json() instead. json_object["num_outputs"] = op_creation_digest.num_outputs del json_object["input_names"] json_object["inputs"] = [] for input_tensor_name in op_creation_digest.input_names or []: input_op_name, output_slot = parse_tensor_name(input_tensor_name) json_object["inputs"].append( {"op_name": input_op_name, "output_slot": output_slot} ) json_object["consumers"] = [] for _ in range(json_object["num_outputs"]): json_object["consumers"].append([]) for src_slot, consumer_op_name, dst_slot in graph.get_op_consumers( json_object["op_name"] ): json_object["consumers"][src_slot].append( {"op_name": consumer_op_name, "input_slot": dst_slot} ) return json_object def _getGraphStackIds(self, graph_id): """Retrieve the IDs of all outer graphs of a graph. Args: graph_id: Id of the graph being queried with respect to its outer graphs context. Returns: A list of graph_ids, ordered from outermost to innermost, including the input `graph_id` argument as the last item. """ graph_ids = [graph_id] graph = self._reader.graph_by_id(graph_id) while graph.outer_graph_id: graph_ids.insert(0, graph.outer_graph_id) graph = self._reader.graph_by_id(graph.outer_graph_id) return graph_ids def SourceFileList(self, run): runs = self.Runs() if run not in runs: return None return self._reader.source_file_list() def SourceLines(self, run, index): runs = self.Runs() if run not in runs: return None try: host_name, file_path = self._reader.source_file_list()[index] except IndexError: raise errors.NotFoundError( "There is no source-code file at index %d" % index ) return { "host_name": host_name, "file_path": file_path, "lines": self._reader.source_lines(host_name, file_path), } def StackFrames(self, run, stack_frame_ids): runs = self.Runs() if run not in runs: return None stack_frames = [] for stack_frame_id in stack_frame_ids: if stack_frame_id not in self._reader._stack_frame_by_id: raise errors.NotFoundError( "Cannot find stack frame with ID %s" % stack_frame_id ) # TODO(cais): Use public method (`stack_frame_by_id()`) when # available. # pylint: disable=protected-access stack_frames.append(self._reader._stack_frame_by_id[stack_frame_id]) # pylint: enable=protected-access return {"stack_frames": stack_frames}
HardwareThread.py
# -*- coding: utf-8 -*- # # Singleton Thread class to control the alarm alert hardware # # Copyright (c) 2015 carlosperate http://carlosperate.github.io # # Licensed under The MIT License (MIT), a copy can be found in the LICENSE file # from __future__ import (unicode_literals, absolute_import, print_function, division) import sys import time import types import random import threading try: from LightUpHardware import HardwareLightBulb from LightUpHardware import HardwareSwitch from LightUpHardware import HardwareLamp except ImportError: import HardwareLightBulb import HardwareSwitch import HardwareLamp class HardwareThread(object): """ This class uses the singleton pattern to control the hardware connected to the system. The class static public variables are controlled using accessors, and are not accessible from class instances on purpose to highlight the fact that the data belongs to the class, so: instance = HardwareThread() <-- returns a singleton instance HardwareThread.lamp_time <-- correct instance.lamp_time <-- incorrect, AttributeError instance._HardwareThread__lamp_time <-- works, but naughty """ __singleton = None __lamp_time = None __lamp_duration = None __room_light_time = None __room_light_duration = None __coffee_time = None __total_time = None __running = False __thread = None __threads = [] # # metaclass methods to apply singleton pattern and set accessors # class __HardwareThreadMetaclass(type): """ The property accesors of the HardwareThread class would only be applied to instance variables and not to the class static variables, so we set the variables with accessors in the metaclass and they will be able act as HardwareThread class static variables with accessors for input sanitation and to stop editing the data while the thread is running. """ def __new__(mcs, name, bases, dct): hw_th_instance = type.__new__(mcs, name, bases, dct) # Set the metaclass variables and attach accessors mcs.lamp_time = property( hw_th_instance._HardwareThread__get_lamp_time.im_func, hw_th_instance._HardwareThread__set_lamp_time.im_func) mcs.lamp_duration = property( hw_th_instance._HardwareThread__get_lamp_duration.im_func, hw_th_instance._HardwareThread__set_lamp_duration.im_func) mcs.room_light_time = property( hw_th_instance._HardwareThread__get_room_light_time.im_func, hw_th_instance._HardwareThread__set_room_light_time.im_func) mcs.room_light_duration = property( hw_th_instance._HardwareThread__get_room_light_duration.im_func, hw_th_instance._HardwareThread__set_room_light_duration.im_func) mcs.coffee_time = property( hw_th_instance._HardwareThread__get_coffee_time.im_func, hw_th_instance._HardwareThread__set_coffee_time.im_func) mcs.total_time = property( hw_th_instance._HardwareThread__get_total_time.im_func, hw_th_instance._HardwareThread__set_total_time.im_func) return hw_th_instance __metaclass__ = __HardwareThreadMetaclass def __new__(cls, lamp=None, room_light=None, coffee_time=None, total_time=None): """ The new constructor is edited directly to be able to control this class instance creation and apply a singleton pattern. Set strict control of the constructor arguments to: :param lamp: List or Tuple with the ime, in seconds, for the lamp procedure to start and its duration. :param room_light: List or Tuple with the ime, in seconds, for the room lights procedure to start and its duration :param coffee_time: Integer time, in seconds, for the coffee procedure to start. :param total_time: Integer, total time for the entire entire hardware control process to take. :return: HardwareThread singleton instance. """ # Create singleton instance if __singleton is None if not cls.__singleton: cls.__singleton = super(HardwareThread, cls).__new__(cls) # Stop users from adding attributes, as they can accidentally add # the class static variables as instance attributes cls.__original_setattr = cls.__setattr__ def set_attribute_filter(self, key, value): if key in ('lamp_time', 'lamp_duration', 'room_light_time', 'room_light_duration', 'coffee_time', 'total_time'): raise AttributeError( 'Cannot add %s attribute to HardwareThread instance.' % key) else: self.__original_setattr(key, value) cls.__setattr__ = set_attribute_filter # The constructor arguments are optional and might change class # variables every time the singleton instance in invoked if lamp is not None: if (isinstance(lamp, types.TupleType) or isinstance(lamp, types.ListType)) and (len(lamp) == 2): cls.lamp_time = lamp[0] cls.lamp_duration = lamp[1] else: print('ERROR: Provided lamp data is not list/tuple of the ' + ('right format (launch time, duration): %s' % str(lamp)) + ('\nKept default: (%s, %s)' % (cls.lamp_time, cls.lamp_duration)), file=sys.stderr) if room_light is not None: if (isinstance(room_light, types.TupleType) or isinstance(room_light, types.ListType)) and \ (len(room_light) == 2): cls.room_light_time = room_light[0] cls.room_light_duration = room_light[1] else: print('ERROR: Provided room light is not list/tuple of the ' + ('right format (launch time, duration): %s' % str(lamp)) + ('\nKept default: (%s, %s)' % (cls.room_light_time, cls.room_light_duration)), file=sys.stderr) if coffee_time is not None: cls.coffee_time = coffee_time if total_time is not None: cls.total_time = total_time return cls.__singleton def __init__(self, *args, **kwargs): """ No Initiliser, as everything taken care of in the constructor. """ pass @classmethod def _drop(cls): """ Drop the instance and restore the set attribute method. """ try: cls.__setattr__ = cls.__original_setattr except AttributeError: print('ERROR: Trying to drop singleton not initialised (setattr).', file=sys.stderr) if cls.__singleton: cls.__singleton = None else: print('ERROR: Trying to drop singleton not initialised (instance).', file=sys.stderr) cls.__lamp_time = None cls.__lamp_duration = None cls.__room_light_time = None cls.__room_light_duration = None cls.__coffee_time = None cls.__total_time = None cls.__running = False cls.__thread = None cls.__threads = [] # # Accesors # @classmethod def __get_lamp_time(cls): return cls.__lamp_time @classmethod def __set_lamp_time(cls, new_lamp_time): """ Only sets value if thread is not running. Checks input is an integer. :param new_lamp_time: New lamp time, in seconds, to trigger. """ if cls.__thread and cls.__thread.isAlive(): print('Cannot change properties while thread is running.') else: if isinstance(new_lamp_time, types.IntType): cls.__lamp_time = new_lamp_time else: print('ERROR: Provided lamp_time is not an integer: %s' % new_lamp_time, file=sys.stderr) @classmethod def __get_lamp_duration(cls): return cls.__lamp_duration @classmethod def __set_lamp_duration(cls, new_lamp_duration): """ Only sets value if thread is not running. Checks input is an integer. :param new_lamp_duration: New lamp duration, in seconds, to last. """ if cls.__thread and cls.__thread.isAlive(): print('Cannot change properties while thread is running.') else: if isinstance(new_lamp_duration, types.IntType): cls.__lamp_duration = new_lamp_duration else: print('ERROR: Provided lamp_duration is not an integer: %s' % new_lamp_duration, file=sys.stderr) @classmethod def __get_room_light_time(cls): return cls.__room_light_time @classmethod def __set_room_light_time(cls, new_room_light_time): """ Only sets value if thread is not running. Checks input is an integer. :param new_room_light_time: New room light time, in seconds, to trigger. """ if cls.__thread and cls.__thread.isAlive(): print('Cannot change properties while thread is running.') else: if isinstance(new_room_light_time, types.IntType): cls.__room_light_time = new_room_light_time else: print('ERROR: Provided room_light_time is not an integer: %s' % new_room_light_time, file=sys.stderr) @classmethod def __get_room_light_duration(cls): return cls.__room_light_duration @classmethod def __set_room_light_duration(cls, new_room_light_duration): """ Only sets value if thread is not running. Checks input is an integer. :param new_room_light_duration: New room light duration, in seconds, to last. """ if cls.__thread and cls.__thread.isAlive(): print('Cannot change properties while thread is running.') else: if isinstance(new_room_light_duration, types.IntType): cls.__room_light_duration = new_room_light_duration else: print('ERROR: Provided room_light_duration is not an integer:' + ' %s' % new_room_light_duration, file=sys.stderr) @classmethod def __get_coffee_time(cls): return cls.__coffee_time @classmethod def __set_coffee_time(cls, new_coffee_time): """ Only sets value if thread is not running. Checks input is an integer. :param new_coffee_time: New coffee time, in seconds, to trigger. """ if cls.__thread and cls.__thread.isAlive(): print('Cannot change properties while thread is running.') else: if isinstance(new_coffee_time, types.IntType): cls.__coffee_time = new_coffee_time else: print('ERROR: Provided coffee_time is not an integer: %s' % new_coffee_time, file=sys.stderr) @classmethod def __get_total_time(cls): return cls.__total_time @classmethod def __set_total_time(cls, new_total_time): """ Only sets value if thread is not running. Checks input is an integer. :param new_total_time: New total runtime, in seconds. """ if cls.__thread and cls.__thread.isAlive(): print('Cannot change properties while thread is running.') else: if isinstance(new_total_time, types.IntType): cls.__total_time = new_total_time else: print('ERROR: Provided total_time is not an integer: %s' % new_total_time, file=sys.stderr) # # class member methods # @classmethod def check_variables(cls): """ Checks that all variables are set to something :return: Boolean indicating the good state of the variables """ all_good = True if cls.lamp_time is None: print('HardwareThread ERROR: Variable lamp_time has not been set.', file=sys.stderr) all_good = False if cls.lamp_duration is None: print('HardwareThread ERROR: Variable lamp_duration has not been ' 'set.', file=sys.stderr) all_good = False if cls.room_light_time is None: print('HardwareThread ERROR: Variable room_light_time has not been ' 'set.', file=sys.stderr) all_good = False if cls.room_light_duration is None: print('HardwareThread ERROR: Variable room_light_duration has not ' 'been set.', file=sys.stderr) all_good = False if cls.coffee_time is None: print('HardwareThread ERROR: Variable coffee_time has not been ' 'set.', file=sys.stderr) all_good = False if cls.total_time is None: print('HardwareThread ERROR: Variable total_time has not been set.', file=sys.stderr) all_good = False # Check that the total running time is == or >= than total_time if all_good is True and \ ((cls.total_time < (cls.lamp_time + cls.lamp_duration)) or (cls.total_time < (cls.room_light_time + cls.room_light_duration)) or (cls.total_time < cls.coffee_time)): print('WARNING: The total runtime of the HardwareThread is lower' + 'than the sum of its components !', file=sys.stderr) return all_good # # Thread methods # @classmethod def _launch_lamp(cls): """ Creates and starts the thread to gradually turn on lamp. """ t = threading.Thread( name='LampThread', target=HardwareLamp.gradual_light_on, args=(cls.lamp_duration,)) t.daemon = True cls.__threads.append(t) t.start() @classmethod def _launch_room_light(cls): """ Creates and starts the thread to gradually turn on the room light. """ t = threading.Thread( name='LightThread', target=HardwareLightBulb.gradual_light_on, args=(cls.room_light_duration,)) t.daemon = True cls.__threads.append(t) t.start() @classmethod def _launch_coffee(cls): """ Creates and starts the thread to turn on the coffee machine. """ t = threading.Thread( name='SwitchThread', target=HardwareSwitch.safe_on) t.daemon = True cls.__threads.append(t) t.start() @classmethod def __run(cls): """ Loop function to run as long as total_time indicates, in seconds. It launches the individual hardware threads at the times indicated by their variables. """ start_time = time.time() time_lamp = start_time + cls.lamp_time time_room = start_time + cls.room_light_time time_coffee = start_time + cls.coffee_time end_time = start_time + cls.total_time lamp_launched = False room_launched = False coffee_launched = False # Time controlled loop to launch the required hardware functions current_time = time.time() while current_time < end_time: if time_lamp < current_time and lamp_launched is False: lamp_launched = True cls._launch_lamp() if time_room < current_time and room_launched is False: room_launched = True cls._launch_room_light() if time_coffee < current_time and coffee_launched is False: coffee_launched = True cls._launch_coffee() time.sleep(0.01) current_time = time.time() # Don't wait for the threads to join, as it would overrun the requested # runtime. Ending this thread will kill its children (daemon=True). print('HardwareThread run finished.') cls.__running = False @classmethod def start(cls): """ Launches the HardwareThread thread only if the variables have been set and there is no other thread running already. This method is not re-entrant by design, as it locks relaunching until previous threads are done. """ # Check if required variables are set variables_ok = cls.check_variables() if variables_ok is False: return # Setting a lock for safe reentry, not unlocked here, as it will exit as # soon as the thread is launched, so unlocked at the end of cls.__run() if cls.__running is True: print("WARNING: LightUp Hardware already running, thread waiting.", file=sys.stderr) while cls.__running is True: time.sleep(float(random.randint(1, 100)) / 1000.0) cls.__running = True # Launch thread print('Running the Hardware Thread:\n\t' 'Lamp will gradually increase brightness in %s seconds, for %s ' 'seconds\n\t' 'Room light will gradually increase brightness in %s secs, for %s' ' seconds\n\t' 'Coffee machine will start brewing in %s seconds\n\t' 'Total runtime will be %s seconds' % (cls.lamp_time, cls.lamp_duration, cls.room_light_time, cls.room_light_duration, cls.coffee_time, cls.total_time)) cls.__thread = threading.Thread( name='HardwareThread run', target=cls.__run) cls.__thread.daemon = True cls.__thread.start() @classmethod def isAlive(cls): """ Provides easy and familiar check of the main thread Alive state. :return: Boolean indicating the Alive state of the Hardware thread """ if cls.__thread: return cls.__thread.isAlive() else: return False
custom.py
# pylint: disable=too-many-lines # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function import binascii import datetime import errno import json import os import os.path import platform import re import ssl import stat import subprocess import sys import tempfile import threading import time import uuid import base64 import webbrowser from six.moves.urllib.request import urlopen # pylint: disable=import-error from six.moves.urllib.error import URLError # pylint: disable=import-error from math import isnan import requests from knack.log import get_logger from knack.util import CLIError from knack.prompting import prompt_pass, NoTTYException import yaml # pylint: disable=import-error from dateutil.relativedelta import relativedelta # pylint: disable=import-error from dateutil.parser import parse # pylint: disable=import-error from msrestazure.azure_exceptions import CloudError import colorama # pylint: disable=import-error from tabulate import tabulate # pylint: disable=import-error from azure.cli.core.api import get_config_dir from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id from azure.cli.core.keys import is_valid_ssh_rsa_public_key from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait from azure.cli.core.commands import LongRunningOperation from azure.graphrbac.models import (ApplicationCreateParameters, PasswordCredential, KeyCredential, ServicePrincipalCreateParameters, GetObjectsParameters) from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ContainerServiceLinuxProfile from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ManagedClusterWindowsProfile from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ContainerServiceNetworkProfile from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ManagedClusterServicePrincipalProfile from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ContainerServiceSshConfiguration from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ContainerServiceSshPublicKey from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ManagedCluster from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ManagedClusterAADProfile from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ManagedClusterAddonProfile from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ManagedClusterAgentPoolProfile from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import AgentPool from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ContainerServiceStorageProfileTypes from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ManagedClusterIdentity from .vendored_sdks.azure_mgmt_preview_aks.v2020_02_01.models import ManagedClusterAPIServerAccessProfile from ._client_factory import cf_resource_groups from ._client_factory import get_auth_management_client from ._client_factory import get_graph_rbac_management_client from ._client_factory import cf_resources from ._client_factory import get_resource_by_name from ._client_factory import cf_container_registry_service from ._client_factory import cf_storage from ._helpers import _populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided, update_load_balancer_profile, create_load_balancer_profile) from ._consts import CONST_INGRESS_APPGW_ADDON_NAME from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME from ._consts import CONST_INGRESS_APPGW_SUBNET_PREFIX, CONST_INGRESS_APPGW_SUBNET_ID from ._consts import CONST_INGRESS_APPGW_SHARED, CONST_INGRESS_APPGW_WATCH_NAMESPACE from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE logger = get_logger(__name__) def which(binary): path_var = os.getenv('PATH') if platform.system() == 'Windows': binary = binary + '.exe' parts = path_var.split(';') else: parts = path_var.split(':') for part in parts: bin_path = os.path.join(part, binary) if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK): return bin_path return None def wait_then_open(url): """ Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL. """ for _ in range(1, 10): try: urlopen(url, context=_ssl_context()) except URLError: time.sleep(1) break webbrowser.open_new_tab(url) def wait_then_open_async(url): """ Spawns a thread that waits for a bit then opens a URL. """ t = threading.Thread(target=wait_then_open, args=({url})) t.daemon = True t.start() def _ssl_context(): if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'): try: return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6 except AttributeError: return ssl.SSLContext(ssl.PROTOCOL_TLSv1) return ssl.create_default_context() def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret): # use get_progress_controller hook = cli_ctx.get_progress_controller(True) hook.add(messsage='Creating service principal', value=0, total_val=1.0) logger.info('Creating service principal') # always create application with 5 years expiration start_date = datetime.datetime.utcnow() end_date = start_date + relativedelta(years=5) result = create_application(rbac_client.applications, name, url, [url], password=client_secret, start_date=start_date, end_date=end_date) service_principal = result.app_id # pylint: disable=no-member for x in range(0, 10): hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0) try: create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client) break # TODO figure out what exception AAD throws here sometimes. except Exception as ex: # pylint: disable=broad-except logger.info(ex) time.sleep(2 + 2 * x) else: return False hook.add(message='Finished service principal creation', value=1.0, total_val=1.0) logger.info('Finished service principal creation') return service_principal def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0) logger.info('Waiting for AAD role to propagate') for x in range(0, 10): hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0) try: # TODO: break this out into a shared utility library create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope) break except CloudError as ex: if ex.message == 'The role assignment already exists.': break logger.info(ex.message) except: # pylint: disable=bare-except pass time.sleep(delay + delay * x) else: return False hook.add(message='AAD role propagation done', value=1.0, total_val=1.0) logger.info('AAD role propagation done') return True def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0) logger.info('Waiting for AAD role to delete') for x in range(0, 10): hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0) try: delete_role_assignments(cli_ctx, role=role, assignee=service_principal, scope=scope) break except CLIError as ex: raise ex except CloudError as ex: logger.info(ex) time.sleep(delay + delay * x) else: return False hook.add(message='AAD role deletion done', value=1.0, total_val=1.0) logger.info('AAD role deletion done') return True def _get_default_dns_prefix(name, resource_group_name, subscription_id): # Use subscription id to provide uniqueness and prevent DNS name clashes name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10] if not name_part[0].isalpha(): name_part = (str('a') + name_part)[0:10] resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16] return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6]) # pylint: disable=too-many-locals def store_acs_service_principal(subscription_id, client_secret, service_principal, file_name='acsServicePrincipal.json'): obj = {} if client_secret: obj['client_secret'] = client_secret if service_principal: obj['service_principal'] = service_principal config_path = os.path.join(get_config_dir(), file_name) full_config = load_service_principals(config_path=config_path) if not full_config: full_config = {} full_config[subscription_id] = obj with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600), 'w+') as spFile: json.dump(full_config, spFile) def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'): config_path = os.path.join(get_config_dir(), file_name) config = load_service_principals(config_path) if not config: return None return config.get(subscription_id) def load_service_principals(config_path): if not os.path.exists(config_path): return None fd = os.open(config_path, os.O_RDONLY) try: with os.fdopen(fd) as f: return shell_safe_json_parse(f.read()) except: # pylint: disable=bare-except return None def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait, subscription_id=None): from azure.mgmt.resource.resources import ResourceManagementClient from azure.mgmt.resource.resources.models import DeploymentProperties properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments if validate: logger.info('==== BEGIN TEMPLATE ====') logger.info(json.dumps(template, indent=2)) logger.info('==== END TEMPLATE ====') return smc.validate(resource_group_name, deployment_name, properties) return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties) def create_application(client, display_name, homepage, identifier_uris, available_to_other_tenants=False, password=None, reply_urls=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None): from azure.graphrbac.models import GraphErrorException password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type, key_usage=key_usage, start_date=start_date, end_date=end_date) app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants, display_name=display_name, identifier_uris=identifier_uris, homepage=homepage, reply_urls=reply_urls, key_credentials=key_creds, password_credentials=password_creds) try: return client.create(app_create_param) except GraphErrorException as ex: if 'insufficient privileges' in str(ex).lower(): link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long raise CLIError("Directory permission is needed for the current user to register the application. " "For how to configure, please refer '{}'. Original error: {}".format(link, ex)) raise def _build_application_creds(password=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None): if password and key_value: raise CLIError('specify either --password or --key-value, but not both.') if not start_date: start_date = datetime.datetime.utcnow() elif isinstance(start_date, str): start_date = parse(start_date) if not end_date: end_date = start_date + relativedelta(years=1) elif isinstance(end_date, str): end_date = parse(end_date) key_type = key_type or 'AsymmetricX509Cert' key_usage = key_usage or 'Verify' password_creds = None key_creds = None if password: password_creds = [PasswordCredential(start_date=start_date, end_date=end_date, key_id=str(uuid.uuid4()), value=password)] elif key_value: key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value, key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)] return (password_creds, key_creds) def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None): if rbac_client is None: rbac_client = get_graph_rbac_management_client(cli_ctx) if resolve_app: try: uuid.UUID(identifier) result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier))) except ValueError: result = list(rbac_client.applications.list( filter="identifierUris/any(s:s eq '{}')".format(identifier))) if not result: # assume we get an object id result = [rbac_client.applications.get(identifier)] app_id = result[0].app_id else: app_id = identifier return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True)) def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None): return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope, resolve_assignee=is_service_principal) def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True): from azure.cli.core.profiles import ResourceType, get_sdk factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments definitions_client = factory.role_definitions scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id) role_id = _resolve_role_id(role, scope, definitions_client) # If the cluster has service principal resolve the service principal client id to get the object id, # if not use MSI object id. object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION, 'RoleAssignmentCreateParameters', mod='models', operation_group='role_assignments') parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id) assignment_name = uuid.uuid4() custom_headers = None return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers) def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None, scope=None, include_inherited=False, yes=None): factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments definitions_client = factory.role_definitions ids = ids or [] if ids: if assignee or role or resource_group_name or scope or include_inherited: raise CLIError('When assignment ids are used, other parameter values are not required') for i in ids: assignments_client.delete_by_id(i) return if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]): from knack.prompting import prompt_y_n msg = 'This will delete all role assignments under the subscription. Are you sure?' if not prompt_y_n(msg, default="n"): return scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id) assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client, scope, assignee, role, include_inherited, include_groups=False) if assignments: for a in assignments: assignments_client.delete_by_id(a.id) def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0) logger.info('Waiting for AAD role to delete') for x in range(0, 10): hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0) try: delete_role_assignments(cli_ctx, role=role, assignee=service_principal, scope=scope) break except CLIError as ex: raise ex except CloudError as ex: logger.info(ex) time.sleep(delay + delay * x) else: return False hook.add(message='AAD role deletion done', value=1.0, total_val=1.0) logger.info('AAD role deletion done') return True def _search_role_assignments(cli_ctx, assignments_client, definitions_client, scope, assignee, role, include_inherited, include_groups): assignee_object_id = None if assignee: assignee_object_id = _resolve_object_id(cli_ctx, assignee) # always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups if scope: assignments = list(assignments_client.list_for_scope( scope=scope, filter='atScope()')) elif assignee_object_id: if include_groups: f = "assignedTo('{}')".format(assignee_object_id) else: f = "principalId eq '{}'".format(assignee_object_id) assignments = list(assignments_client.list(filter=f)) else: assignments = list(assignments_client.list()) if assignments: assignments = [a for a in assignments if ( not scope or include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or _get_role_property(a, 'scope').lower() == scope.lower() )] if role: role_id = _resolve_role_id(role, scope, definitions_client) assignments = [i for i in assignments if _get_role_property( i, 'role_definition_id') == role_id] if assignee_object_id: assignments = [i for i in assignments if _get_role_property( i, 'principal_id') == assignee_object_id] return assignments def _get_role_property(obj, property_name): if isinstance(obj, dict): return obj[property_name] return getattr(obj, property_name) def _build_role_scope(resource_group_name, scope, subscription_id): subscription_scope = '/subscriptions/' + subscription_id if scope: if resource_group_name: err = 'Resource group "{}" is redundant because scope is supplied' raise CLIError(err.format(resource_group_name)) elif resource_group_name: scope = subscription_scope + '/resourceGroups/' + resource_group_name else: scope = subscription_scope return scope def _resolve_role_id(role, scope, definitions_client): role_id = None try: uuid.UUID(role) role_id = role except ValueError: pass if not role_id: # retrieve role id role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role))) if not role_defs: raise CLIError("Role '{}' doesn't exist.".format(role)) if len(role_defs) > 1: ids = [r.id for r in role_defs] err = "More than one role matches the given name '{}'. Please pick a value from '{}'" raise CLIError(err.format(role, ids)) role_id = role_defs[0].id return role_id def _resolve_object_id(cli_ctx, assignee): client = get_graph_rbac_management_client(cli_ctx) result = None if assignee.find('@') >= 0: # looks like a user principal name result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee))) if not result: result = list(client.service_principals.list( filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee))) if not result: # assume an object id, let us verify it result = _get_object_stubs(client, [assignee]) # 2+ matches should never happen, so we only check 'no match' here if not result: raise CLIError("No matches in graph database for '{}'".format(assignee)) return result[0].object_id def _get_object_stubs(graph_client, assignees): params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees) return list(graph_client.objects.get_objects_by_object_ids(params)) def subnet_role_assignment_exists(cli_ctx, scope): network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7" factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'): if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id): return True return False def _update_dict(dict1, dict2): cp = dict1.copy() cp.update(dict2) return cp def aks_browse(cmd, # pylint: disable=too-many-statements client, resource_group_name, name, disable_browser=False, listen_address='127.0.0.1', listen_port='8001'): if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') # verify the kube-dashboard addon was not disabled instance = client.get(resource_group_name, name) addon_profiles = instance.addon_profiles or {} addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True)) if not addon_profile.enabled: raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n' 'To use "az aks browse" first enable the add-on\n' 'by running "az aks enable-addons --addons kube-dashboard".') _, browse_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) # find the dashboard pod's name try: dashboard_pod = subprocess.check_output( ["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name", "--selector", "k8s-app=kubernetes-dashboard"], universal_newlines=True) except subprocess.CalledProcessError as err: raise CLIError('Could not find dashboard pod: {}'.format(err)) if dashboard_pod: # remove any "pods/" or "pod/" prefix from the name dashboard_pod = str(dashboard_pod).split('/')[-1].strip() else: raise CLIError("Couldn't find the Kubernetes dashboard pod.") # find the port try: dashboard_port = subprocess.check_output( ["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--selector", "k8s-app=kubernetes-dashboard", "--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"] ) # output format: b"'{port}'" dashboard_port = int((dashboard_port.decode('utf-8').replace("'", ""))) except subprocess.CalledProcessError as err: raise CLIError('Could not find dashboard port: {}'.format(err)) # use https if dashboard container is using https if dashboard_port == 8443: protocol = 'https' else: protocol = 'http' proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port) dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url, protocol) # launch kubectl port-forward locally to access the remote dashboard if in_cloud_console(): # TODO: better error handling here. response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port)) result = json.loads(response.text) dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format( result['url'], protocol) term_id = os.environ.get('ACC_TERM_ID') if term_id: response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id), json={"url": dashboardURL}) logger.warning('To view the console, please open %s in a new tab', dashboardURL) else: logger.warning('Proxy running on %s', proxy_url) logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async(dashboardURL) try: try: subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address", listen_address, "--port", listen_port], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as err: if err.output.find(b'unknown flag: --address'): if listen_address != '127.0.0.1': logger.warning('"--address" is only supported in kubectl v1.13 and later.') logger.warning('The "--listen-address" argument will be ignored.') subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port]) except KeyboardInterrupt: # Let command processing finish gracefully after the user presses [Ctrl+C] pass finally: if in_cloud_console(): requests.post('http://localhost:8888/closeport/8001') def _trim_nodepoolname(nodepool_name): if not nodepool_name: return "nodepool1" return nodepool_name[:12] def _add_monitoring_role_assignment(result, cluster_resource_id, cmd): service_principal_msi_id = None # Check if service principal exists, if it does, assign permissions to service principal # Else, provide permissions to MSI if ( hasattr(result, 'service_principal_profile') and hasattr(result.service_principal_profile, 'client_id') and result.service_principal_profile.client_id != 'msi' ): logger.info('valid service principal exists, using it') service_principal_msi_id = result.service_principal_profile.client_id is_service_principal = True elif ( (hasattr(result, 'addon_profiles')) and ('omsagent' in result.addon_profiles) and (hasattr(result.addon_profiles['omsagent'], 'identity')) and (hasattr(result.addon_profiles['omsagent'].identity, 'object_id')) ): logger.info('omsagent MSI exists, using it') service_principal_msi_id = result.addon_profiles['omsagent'].identity.object_id is_service_principal = False if service_principal_msi_id is not None: if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher', service_principal_msi_id, is_service_principal, scope=cluster_resource_id): logger.warning('Could not create a role assignment for Monitoring addon. ' 'Are you an Owner on this subscription?') else: logger.warning('Could not find service principal or user assigned MSI for role' 'assignment') def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches client, resource_group_name, name, ssh_key_value, dns_name_prefix=None, location=None, admin_username="azureuser", windows_admin_username=None, windows_admin_password=None, kubernetes_version='', node_vm_size="Standard_DS2_v2", node_osdisk_size=0, node_osdisk_diskencryptionset_id=None, node_count=3, nodepool_name="nodepool1", nodepool_tags=None, nodepool_labels=None, service_principal=None, client_secret=None, no_ssh_key=False, disable_rbac=None, enable_rbac=None, enable_vmss=None, vm_set_type=None, skip_subnet_role_assignment=False, enable_cluster_autoscaler=False, cluster_autoscaler_profile=None, network_plugin=None, network_policy=None, pod_cidr=None, service_cidr=None, dns_service_ip=None, docker_bridge_address=None, load_balancer_sku=None, load_balancer_managed_outbound_ip_count=None, load_balancer_outbound_ips=None, load_balancer_outbound_ip_prefixes=None, load_balancer_outbound_ports=None, load_balancer_idle_timeout=None, outbound_type=None, enable_addons=None, workspace_resource_id=None, min_count=None, max_count=None, vnet_subnet_id=None, max_pods=0, aad_client_app_id=None, aad_server_app_id=None, aad_server_app_secret=None, aad_tenant_id=None, tags=None, node_zones=None, generate_ssh_keys=False, # pylint: disable=unused-argument enable_pod_security_policy=False, node_resource_group=None, attach_acr=None, enable_private_cluster=False, enable_managed_identity=False, api_server_authorized_ip_ranges=None, aks_custom_headers=None, appgw_name=None, appgw_subnet_prefix=None, appgw_id=None, appgw_subnet_id=None, appgw_shared=None, appgw_watch_namespace=None, no_wait=False): if not no_ssh_key: try: if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value): raise ValueError() except (TypeError, ValueError): shortened_key = truncate_text(ssh_key_value) raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key)) subscription_id = get_subscription_id(cmd.cli_ctx) if not dns_name_prefix: dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id) rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) if location is None: location = rg_location # Flag to be removed, kept for back-compatibility only. Remove the below section # when we deprecate the enable-vmss flag if enable_vmss: if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower(): raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'. format(vm_set_type)) vm_set_type = "VirtualMachineScaleSets" vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version) load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version) if api_server_authorized_ip_ranges and load_balancer_sku == "basic": raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer') agent_pool_profile = ManagedClusterAgentPoolProfile( name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it tags=nodepool_tags, node_labels=nodepool_labels, count=int(node_count), vm_size=node_vm_size, os_type="Linux", vnet_subnet_id=vnet_subnet_id, availability_zones=node_zones, max_pods=int(max_pods) if max_pods else None, type=vm_set_type ) if node_osdisk_size: agent_pool_profile.os_disk_size_gb = int(node_osdisk_size) _check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile) linux_profile = None # LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified. if not no_ssh_key: ssh_config = ContainerServiceSshConfiguration( public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)]) linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config) windows_profile = None if windows_admin_username: if windows_admin_password is None: try: windows_admin_password = prompt_pass(msg='windows-admin-password: ', confirm=True) except NoTTYException: raise CLIError('Please specify both username and password in non-interactive mode.') windows_profile = ManagedClusterWindowsProfile( admin_username=windows_admin_username, admin_password=windows_admin_password) principal_obj = _ensure_aks_service_principal(cmd.cli_ctx, service_principal=service_principal, client_secret=client_secret, subscription_id=subscription_id, dns_name_prefix=dns_name_prefix, location=location, name=name) service_principal_profile = ManagedClusterServicePrincipalProfile( client_id=principal_obj.get("service_principal"), secret=principal_obj.get("client_secret")) if attach_acr: if enable_managed_identity: if no_wait: raise CLIError('When --attach-acr and --enable-managed-identity are both specified, ' '--no-wait is not allowed, please wait until the whole operation succeeds.') else: # Attach acr operation will be handled after the cluster is created pass else: _ensure_aks_acr(cmd.cli_ctx, client_id=service_principal_profile.client_id, acr_name_or_id=attach_acr, subscription_id=subscription_id) if (vnet_subnet_id and not skip_subnet_role_assignment and not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)): scope = vnet_subnet_id if not _add_role_assignment( cmd.cli_ctx, 'Network Contributor', service_principal_profile.client_id, scope=scope): logger.warning('Could not create a role assignment for subnet. ' 'Are you an Owner on this subscription?') load_balancer_profile = create_load_balancer_profile( load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips, load_balancer_outbound_ip_prefixes, load_balancer_outbound_ports, load_balancer_idle_timeout) outbound_type = _set_outbound_type(outbound_type, network_plugin, load_balancer_sku, load_balancer_profile) network_profile = None if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]): if not network_plugin: raise CLIError('Please explicitly specify the network plugin type') if pod_cidr and network_plugin == "azure": raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified') network_profile = ContainerServiceNetworkProfile( network_plugin=network_plugin, pod_cidr=pod_cidr, service_cidr=service_cidr, dns_service_ip=dns_service_ip, docker_bridge_cidr=docker_bridge_address, network_policy=network_policy, load_balancer_sku=load_balancer_sku.lower(), load_balancer_profile=load_balancer_profile, outbound_type=outbound_type ) else: if load_balancer_sku.lower() == "standard" or load_balancer_profile: network_profile = ContainerServiceNetworkProfile( network_plugin="kubenet", load_balancer_sku=load_balancer_sku.lower(), load_balancer_profile=load_balancer_profile, outbound_type=outbound_type, ) addon_profiles = _handle_addons_args( cmd, enable_addons, subscription_id, resource_group_name, {}, workspace_resource_id, appgw_name, appgw_subnet_prefix, appgw_id, appgw_subnet_id, appgw_shared, appgw_watch_namespace ) monitoring = False if 'omsagent' in addon_profiles: monitoring = True _ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent']) if CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles: if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config: appgw_id = addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] from msrestazure.tools import parse_resource_id, resource_id appgw_id_dict = parse_resource_id(appgw_id) appgw_group_id = resource_id( subscription=appgw_id_dict["subscription"], resource_group=appgw_id_dict["resource_group"]) if not _add_role_assignment(cmd.cli_ctx, 'Contributor', service_principal_profile.client_id, scope=appgw_group_id): logger.warning('Could not create a role assignment for application gateway: {appgw_id} ' 'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. ' 'Are you an Owner on this subscription?') if CONST_INGRESS_APPGW_SUBNET_ID in addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config: subnet_id = addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_SUBNET_ID] from msrestazure.tools import parse_resource_id, resource_id if not _add_role_assignment(cmd.cli_ctx, 'Contributor', service_principal_profile.client_id, scope=subnet_id): logger.warning('Could not create a role assignment for subnet: {subnet_id} ' 'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. ' 'Are you an Owner on this subscription?') aad_profile = None if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]): aad_profile = ManagedClusterAADProfile( client_app_id=aad_client_app_id, server_app_id=aad_server_app_id, server_app_secret=aad_server_app_secret, tenant_id=aad_tenant_id ) # Check that both --disable-rbac and --enable-rbac weren't provided if all([disable_rbac, enable_rbac]): raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.') api_server_access_profile = None if api_server_authorized_ip_ranges: api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges) identity = None if enable_managed_identity: identity = ManagedClusterIdentity( type="SystemAssigned" ) enable_rbac = True if disable_rbac: enable_rbac = False mc = ManagedCluster( location=location, tags=tags, dns_prefix=dns_name_prefix, kubernetes_version=kubernetes_version, enable_rbac=enable_rbac, agent_pool_profiles=[agent_pool_profile], linux_profile=linux_profile, windows_profile=windows_profile, service_principal_profile=service_principal_profile, network_profile=network_profile, addon_profiles=addon_profiles, aad_profile=aad_profile, auto_scaler_profile=cluster_autoscaler_profile, enable_pod_security_policy=bool(enable_pod_security_policy), identity=identity, disk_encryption_set_id=node_osdisk_diskencryptionset_id, api_server_access_profile=api_server_access_profile) if node_resource_group: mc.node_resource_group = node_resource_group if enable_private_cluster: if load_balancer_sku.lower() != "standard": raise CLIError("Please use standard load balancer for private cluster") mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile( enable_private_cluster=True ) headers = {} if aks_custom_headers is not None: if aks_custom_headers != "": for pair in aks_custom_headers.split(','): parts = pair.split('=') if len(parts) != 2: raise CLIError('custom headers format is incorrect') headers[parts[0]] = parts[1] # Due to SPN replication latency, we do a few retries here max_retry = 30 retry_exception = Exception(None) for _ in range(0, max_retry): try: logger.info('AKS cluster is creating, please wait...') if monitoring: # adding a wait here since we rely on the result for role assignment created_cluster = LongRunningOperation(cmd.cli_ctx)(client.create_or_update( resource_group_name=resource_group_name, resource_name=name, parameters=mc, custom_headers=headers)) cloud_name = cmd.cli_ctx.cloud.name # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud if cloud_name.lower() == 'azurecloud': from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) _add_monitoring_role_assignment(created_cluster, cluster_resource_id, cmd) else: created_cluster = sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=name, parameters=mc, custom_headers=headers).result() if enable_managed_identity and attach_acr: # Attach ACR to cluster enabled managed identity if created_cluster.identity_profile is None or \ created_cluster.identity_profile["kubeletidentity"] is None: logger.warning('Your cluster is successfully created, but we failed to attach acr to it, ' 'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool ' 'in MC_ resource group to give it permission to pull from ACR.') else: kubelet_identity_client_id = created_cluster.identity_profile["kubeletidentity"].client_id _ensure_aks_acr(cmd.cli_ctx, client_id=kubelet_identity_client_id, acr_name_or_id=attach_acr, subscription_id=subscription_id) return created_cluster except CloudError as ex: retry_exception = ex if 'not found in Active Directory tenant' in ex.message: time.sleep(3) else: raise ex raise retry_exception def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals client, resource_group_name, name, enable_cluster_autoscaler=False, disable_cluster_autoscaler=False, update_cluster_autoscaler=False, cluster_autoscaler_profile=None, min_count=None, max_count=None, no_wait=False, load_balancer_managed_outbound_ip_count=None, load_balancer_outbound_ips=None, load_balancer_outbound_ip_prefixes=None, load_balancer_outbound_ports=None, load_balancer_idle_timeout=None, api_server_authorized_ip_ranges=None, enable_pod_security_policy=False, disable_pod_security_policy=False, attach_acr=None, detach_acr=None): update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler update_acr = attach_acr is not None or detach_acr is not None update_pod_security = enable_pod_security_policy or disable_pod_security_policy update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips, load_balancer_outbound_ip_prefixes, load_balancer_outbound_ports, load_balancer_idle_timeout) # pylint: disable=too-many-boolean-expressions if not update_autoscaler and \ cluster_autoscaler_profile is None and \ not update_acr and \ not update_lb_profile \ and api_server_authorized_ip_ranges is None and \ not update_pod_security and \ not update_lb_profile: raise CLIError('Please specify "--enable-cluster-autoscaler" or ' '"--disable-cluster-autoscaler" or ' '"--update-cluster-autoscaler" or ' '"--cluster-autoscaler-profile" or ' '"--enable-pod-security-policy" or ' '"--disable-pod-security-policy" or ' '"--api-server-authorized-ip-ranges" or ' '"--attach-acr" or ' '"--detach-acr" or ' '"--load-balancer-managed-outbound-ip-count" or ' '"--load-balancer-outbound-ips" or ' '"--load-balancer-outbound-ip-prefixes"') instance = client.get(resource_group_name, name) if update_autoscaler and len(instance.agent_pool_profiles) > 1: raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command ' 'to update per node pool auto scaler settings') node_count = instance.agent_pool_profiles[0].count if min_count is None or max_count is None: if enable_cluster_autoscaler or update_cluster_autoscaler: raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler or ' '--update-cluster-autoscaler set.') if min_count is not None and max_count is not None: if int(min_count) > int(max_count): raise CLIError('value of min-count should be less than or equal to value of max-count.') if int(node_count) < int(min_count) or int(node_count) > int(max_count): raise CLIError("current node count '{}' is not in the range of min-count and max-count.".format(node_count)) if enable_cluster_autoscaler: if instance.agent_pool_profiles[0].enable_auto_scaling: logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n' 'Please run "az aks update --update-cluster-autoscaler" ' 'if you want to update min-count or max-count.') return None instance.agent_pool_profiles[0].min_count = int(min_count) instance.agent_pool_profiles[0].max_count = int(max_count) instance.agent_pool_profiles[0].enable_auto_scaling = True if update_cluster_autoscaler: if not instance.agent_pool_profiles[0].enable_auto_scaling: raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n' 'Run "az aks update --enable-cluster-autoscaler" ' 'to enable cluster with min-count and max-count.') instance.agent_pool_profiles[0].min_count = int(min_count) instance.agent_pool_profiles[0].max_count = int(max_count) if disable_cluster_autoscaler: if not instance.agent_pool_profiles[0].enable_auto_scaling: logger.warning('Cluster autoscaler is already disabled for this managed cluster.') return None instance.agent_pool_profiles[0].enable_auto_scaling = False instance.agent_pool_profiles[0].min_count = None instance.agent_pool_profiles[0].max_count = None if not cluster_autoscaler_profile: instance.auto_scaler_profile = {} else: instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__, dict((key.replace("-", "_"), value) for (key, value) in cluster_autoscaler_profile.items())) \ if instance.auto_scaler_profile else cluster_autoscaler_profile if enable_pod_security_policy and disable_pod_security_policy: raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy ' 'at the same time.') if enable_pod_security_policy: instance.enable_pod_security_policy = True if disable_pod_security_policy: instance.enable_pod_security_policy = False if update_lb_profile: instance.network_profile.load_balancer_profile = update_load_balancer_profile( load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips, load_balancer_outbound_ip_prefixes, load_balancer_outbound_ports, load_balancer_idle_timeout, instance.network_profile.load_balancer_profile) if attach_acr and detach_acr: raise CLIError('Cannot specify "--attach-acr" and "--detach-acr" at the same time.') subscription_id = get_subscription_id(cmd.cli_ctx) client_id = "" if instance.identity is not None and instance.identity.type == "SystemAssigned": if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None: raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. ' 'Please do not set --attach-acr or --detach-acr. ' 'You can manually grant or revoke permission to the identity named ' '<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.') client_id = instance.identity_profile["kubeletidentity"].client_id else: client_id = instance.service_principal_profile.client_id if not client_id: raise CLIError('Cannot get the AKS cluster\'s service principal.') if attach_acr: _ensure_aks_acr(cmd.cli_ctx, client_id=client_id, acr_name_or_id=attach_acr, subscription_id=subscription_id) if detach_acr: _ensure_aks_acr(cmd.cli_ctx, client_id=client_id, acr_name_or_id=detach_acr, subscription_id=subscription_id, detach=True) # empty string is valid as it disables ip whitelisting if api_server_authorized_ip_ranges is not None: instance.api_server_access_profile = \ _populate_api_server_access_profile(api_server_authorized_ip_ranges, instance) return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument mc = client.get(resource_group_name, name) return _remove_nulls([mc])[0] def _remove_nulls(managed_clusters): """ Remove some often-empty fields from a list of ManagedClusters, so the JSON representation doesn't contain distracting null fields. This works around a quirk of the SDK for python behavior. These fields are not sent by the server, but get recreated by the CLI's own "to_dict" serialization. """ attrs = ['tags'] ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id'] sp_attrs = ['secret'] for managed_cluster in managed_clusters: for attr in attrs: if getattr(managed_cluster, attr, None) is None: delattr(managed_cluster, attr) if managed_cluster.agent_pool_profiles is not None: for ap_profile in managed_cluster.agent_pool_profiles: for attr in ap_attrs: if getattr(ap_profile, attr, None) is None: delattr(ap_profile, attr) for attr in sp_attrs: if getattr(managed_cluster.service_principal_profile, attr, None) is None: delattr(managed_cluster.service_principal_profile, attr) return managed_clusters def aks_get_credentials(cmd, # pylint: disable=unused-argument client, resource_group_name, name, admin=False, user='clusterUser', path=os.path.join(os.path.expanduser('~'), '.kube', 'config'), overwrite_existing=False, context_name=None): credentialResults = None if admin: credentialResults = client.list_cluster_admin_credentials(resource_group_name, name) else: if user.lower() == 'clusteruser': credentialResults = client.list_cluster_user_credentials(resource_group_name, name) elif user.lower() == 'clustermonitoringuser': credentialResults = client.list_cluster_monitoring_user_credentials(resource_group_name, name) else: raise CLIError("The user is invalid.") if not credentialResults: raise CLIError("No Kubernetes credentials found.") try: kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8') _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name) except (IndexError, ValueError): raise CLIError("Fail to find kubeconfig file.") ADDONS = { 'http_application_routing': 'httpApplicationRouting', 'monitoring': 'omsagent', 'virtual-node': 'aciConnector', 'azure-policy': 'azurepolicy', 'kube-dashboard': 'kubeDashboard', 'ingress-appgw': CONST_INGRESS_APPGW_ADDON_NAME } # pylint: disable=line-too-long def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals client, resource_group_name, name, storage_account=None, sas_token=None, container_logs=None, kube_objects=None, node_logs=None): colorama.init() mc = client.get(resource_group_name, name) if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') storage_account_id = None if storage_account is None: print("No storage account specified. Try getting storage account from diagnostic settings") storage_account_id = get_storage_account_from_diag_settings(cmd.cli_ctx, resource_group_name, name) if storage_account_id is None: raise CLIError("A storage account must be specified, since there isn't one in the diagnostic settings.") from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id if storage_account_id is None: if not is_valid_resource_id(storage_account): storage_account_id = resource_id( subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, namespace='Microsoft.Storage', type='storageAccounts', name=storage_account ) else: storage_account_id = storage_account if is_valid_resource_id(storage_account_id): try: parsed_storage_account = parse_resource_id(storage_account_id) except CloudError as ex: raise CLIError(ex.message) else: raise CLIError("Invalid storage account id %s" % storage_account_id) storage_account_name = parsed_storage_account['name'] readonly_sas_token = None if sas_token is None: storage_client = cf_storage(cmd.cli_ctx, parsed_storage_account['subscription']) storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'], storage_account_name) kwargs = { 'account_name': storage_account_name, 'account_key': storage_account_keys.keys[0].value } cloud_storage_client = cloud_storage_account_service_factory(cmd.cli_ctx, kwargs) sas_token = cloud_storage_client.generate_shared_access_signature( 'b', 'sco', 'rwdlacup', datetime.datetime.utcnow() + datetime.timedelta(days=1)) readonly_sas_token = cloud_storage_client.generate_shared_access_signature( 'b', 'sco', 'rl', datetime.datetime.utcnow() + datetime.timedelta(days=1)) readonly_sas_token = readonly_sas_token.strip('?') from knack.prompting import prompt_y_n print() print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and ' f'save them to the storage account ' f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as ' f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.') print() print('If you share access to that storage account to Azure support, you consent to the terms outlined' f' in {format_hyperlink("http://aka.ms/DiagConsent")}.') print() if not prompt_y_n('Do you confirm?', default="n"): return print() print("Getting credentials for cluster %s " % name) _, temp_kubeconfig_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path) print() print("Starts collecting diag info for cluster %s " % name) sas_token = sas_token.strip('?') deployment_yaml = urlopen( "https://raw.githubusercontent.com/Azure/aks-periscope/v0.2/deployment/aks-periscope.yaml").read().decode() deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>", (base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii')) deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>", (base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii')) yaml_lines = deployment_yaml.splitlines() for index, line in enumerate(yaml_lines): if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None: yaml_lines[index] = line + ' ' + container_logs if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None: yaml_lines[index] = line + ' ' + kube_objects if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None: yaml_lines[index] = line + ' ' + node_logs deployment_yaml = '\n'.join(yaml_lines) fd, temp_yaml_path = tempfile.mkstemp() temp_yaml_file = os.fdopen(fd, 'w+t') try: temp_yaml_file.write(deployment_yaml) temp_yaml_file.flush() temp_yaml_file.close() try: print() print("Cleaning up aks-periscope resources if existing") subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "serviceaccount,configmap,daemonset,secret", "--all", "-n", "aks-periscope", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "ClusterRoleBinding", "aks-periscope-role-binding", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "ClusterRoleBinding", "aks-periscope-role-binding-view", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "ClusterRole", "aks-periscope-role", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "--all", "apd", "-n", "aks-periscope", "--ignore-not-found"], stderr=subprocess.DEVNULL) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "CustomResourceDefinition", "diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"], stderr=subprocess.STDOUT) print() print("Deploying aks-periscope") subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f", temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as err: raise CLIError(err.output) finally: os.remove(temp_yaml_path) print() normalized_fqdn = mc.fqdn.replace('.', '-') token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \ f"{normalized_fqdn}?{token_in_storage_account_url}" print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}') print() print(f'You can download Azure Stroage Explorer here ' f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}' f' to check the logs by adding the storage account using the following URL:') print(f'{format_hyperlink(log_storage_account_url)}') print() if not prompt_y_n('Do you want to see analysis results now?', default="n"): print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' " f"anytime to check the analysis results.") return display_diagnostics_report(temp_kubeconfig_path) return def aks_kanalyze(cmd, client, resource_group_name, name): colorama.init() client.get(resource_group_name, name) _, temp_kubeconfig_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path) display_diagnostics_report(temp_kubeconfig_path) def aks_scale(cmd, # pylint: disable=unused-argument client, resource_group_name, name, node_count, nodepool_name="", no_wait=False): instance = client.get(resource_group_name, name) if len(instance.agent_pool_profiles) > 1 and nodepool_name == "": raise CLIError('There are more than one node pool in the cluster. ' 'Please specify nodepool name or use az aks nodepool command to scale node pool') if node_count == 0: raise CLIError("Can't scale down to 0 nodes.") for agent_profile in instance.agent_pool_profiles: if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1): agent_profile.count = int(node_count) # pylint: disable=no-member # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name)) def aks_upgrade(cmd, # pylint: disable=unused-argument client, resource_group_name, name, kubernetes_version, control_plane_only=False, no_wait=False, **kwargs): # pylint: disable=unused-argument instance = client.get(resource_group_name, name) if instance.kubernetes_version == kubernetes_version: if instance.provisioning_state == "Succeeded": logger.warning("The cluster is already on version %s and is not in a failed state. No operations " "will occur when upgrading to the same version if the cluster is not in a failed state.", instance.kubernetes_version) elif instance.provisioning_state == "Failed": logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to " "attempt resolution of failed cluster state.", instance.kubernetes_version) from knack.prompting import prompt_y_n upgrade_all = False instance.kubernetes_version = kubernetes_version vmas_cluster = False for agent_profile in instance.agent_pool_profiles: if agent_profile.type.lower() == "availabilityset": vmas_cluster = True break # for legacy clusters, we always upgrade node pools with CCP. if instance.max_agent_pools < 8 or vmas_cluster: if control_plane_only: msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be " "upgraded to {} as well. Continue?").format(instance.kubernetes_version) if not prompt_y_n(msg, default="n"): return None upgrade_all = True else: if not control_plane_only: msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane " "AND all nodepools to version {}. Continue?").format(instance.kubernetes_version) if not prompt_y_n(msg, default="n"): return None upgrade_all = True else: msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. " "Node pool will not change. Continue?").format(instance.kubernetes_version) if not prompt_y_n(msg, default="n"): return None if upgrade_all: for agent_profile in instance.agent_pool_profiles: agent_profile.orchestrator_version = kubernetes_version # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None, workspace_resource_id=None, appgw_name=None, appgw_subnet_prefix=None, appgw_id=None, appgw_subnet_id=None, appgw_shared=False, appgw_watch_namespace=None): if not addon_profiles: addon_profiles = {} addons = addons_str.split(',') if addons_str else [] if 'http_application_routing' in addons: addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True) addons.remove('http_application_routing') if 'kube-dashboard' in addons: addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True) addons.remove('kube-dashboard') # TODO: can we help the user find a workspace resource ID? if 'monitoring' in addons: if not workspace_resource_id: # use default workspace if exists else create default workspace workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') addon_profiles['omsagent'] = ManagedClusterAddonProfile( enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id}) addons.remove('monitoring') # error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is elif workspace_resource_id: raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".') if 'azure-policy' in addons: addon_profiles['azurepolicy'] = ManagedClusterAddonProfile(enabled=True) addons.remove('azure-policy') if 'ingress-appgw' in addons: addon_profile = ManagedClusterAddonProfile(enabled=True, config={}) if appgw_name is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name if appgw_subnet_prefix is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_PREFIX] = appgw_subnet_prefix if appgw_id is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id if appgw_subnet_id is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id if appgw_shared: addon_profile.config[CONST_INGRESS_APPGW_SHARED] = "true" if appgw_watch_namespace is not None: addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile addons.remove('ingress-appgw') # error out if any (unrecognized) addons remain if addons: raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format( ",".join(addons), "are" if len(addons) > 1 else "is")) return addon_profiles def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name): # mapping for azure public cloud # log analytics workspaces cannot be created in WCUS region due to capacity limits # so mapped to EUS per discussion with log analytics team AzureCloudLocationToOmsRegionCodeMap = { "australiasoutheast": "ASE", "australiaeast": "EAU", "australiacentral": "CAU", "canadacentral": "CCA", "centralindia": "CIN", "centralus": "CUS", "eastasia": "EA", "eastus": "EUS", "eastus2": "EUS2", "eastus2euap": "EAP", "francecentral": "PAR", "japaneast": "EJP", "koreacentral": "SE", "northeurope": "NEU", "southcentralus": "SCUS", "southeastasia": "SEA", "uksouth": "SUK", "usgovvirginia": "USGV", "westcentralus": "EUS", "westeurope": "WEU", "westus": "WUS", "westus2": "WUS2" } AzureCloudRegionToOmsRegionMap = { "australiacentral": "australiacentral", "australiacentral2": "australiacentral", "australiaeast": "australiaeast", "australiasoutheast": "australiasoutheast", "brazilsouth": "southcentralus", "canadacentral": "canadacentral", "canadaeast": "canadacentral", "centralus": "centralus", "centralindia": "centralindia", "eastasia": "eastasia", "eastus": "eastus", "eastus2": "eastus2", "francecentral": "francecentral", "francesouth": "francecentral", "japaneast": "japaneast", "japanwest": "japaneast", "koreacentral": "koreacentral", "koreasouth": "koreacentral", "northcentralus": "eastus", "northeurope": "northeurope", "southafricanorth": "westeurope", "southafricawest": "westeurope", "southcentralus": "southcentralus", "southeastasia": "southeastasia", "southindia": "centralindia", "uksouth": "uksouth", "ukwest": "uksouth", "westcentralus": "eastus", "westeurope": "westeurope", "westindia": "centralindia", "westus": "westus", "westus2": "westus2" } # mapping for azure china cloud # log analytics only support China East2 region AzureChinaLocationToOmsRegionCodeMap = { "chinaeast": "EAST2", "chinaeast2": "EAST2", "chinanorth": "EAST2", "chinanorth2": "EAST2" } AzureChinaRegionToOmsRegionMap = { "chinaeast": "chinaeast2", "chinaeast2": "chinaeast2", "chinanorth": "chinaeast2", "chinanorth2": "chinaeast2" } # mapping for azure us governmner cloud AzureFairfaxLocationToOmsRegionCodeMap = { "usgovvirginia": "USGV" } AzureFairfaxRegionToOmsRegionMap = { "usgovvirginia": "usgovvirginia" } rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) cloud_name = cmd.cli_ctx.cloud.name if cloud_name.lower() == 'azurecloud': workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus") workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS") elif cloud_name.lower() == 'azurechinacloud': workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2") workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2") elif cloud_name.lower() == 'azureusgovernment': workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia") workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV") else: logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name) default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code) default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \ '/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name) resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id) resources = cf_resources(cmd.cli_ctx, subscription_id) # check if default RG exists if resource_groups.check_existence(default_workspace_resource_group): try: resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview') return resource.id except CloudError as ex: if ex.status_code != 404: raise ex else: resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region}) default_workspace_params = { 'location': workspace_region, 'properties': { 'sku': { 'name': 'standalone' } } } async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview', default_workspace_params) ws_resource_id = '' while True: result = async_poller.result(15) if async_poller.done(): ws_resource_id = result.id break return ws_resource_id def _ensure_container_insights_for_monitoring(cmd, addon): if not addon.enabled: return None # workaround for this addon key which has been seen lowercased in the wild if 'loganalyticsworkspaceresourceid' in addon.config: addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid') workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID'].strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') # extract subscription ID and resource group from workspace_resource_id URL try: subscription_id = workspace_resource_id.split('/')[2] resource_group = workspace_resource_id.split('/')[4] except IndexError: raise CLIError('Could not locate resource group in workspace-resource-id URL.') # region of workspace can be different from region of RG so find the location of the workspace_resource_id resources = cf_resources(cmd.cli_ctx, subscription_id) try: resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview') location = resource.location except CloudError as ex: raise ex unix_time_in_millis = int( (datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0) solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis) # pylint: disable=line-too-long template = { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "workspaceResourceId": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics Resource ID" } }, "workspaceRegion": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics workspace region" } }, "solutionDeploymentName": { "type": "string", "metadata": { "description": "Name of the solution deployment" } } }, "resources": [ { "type": "Microsoft.Resources/deployments", "name": "[parameters('solutionDeploymentName')]", "apiVersion": "2017-05-10", "subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", "resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", "properties": { "mode": "Incremental", "template": { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": {}, "variables": {}, "resources": [ { "apiVersion": "2015-11-01-preview", "type": "Microsoft.OperationsManagement/solutions", "location": "[parameters('workspaceRegion')]", "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "properties": { "workspaceResourceId": "[parameters('workspaceResourceId')]" }, "plan": { "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "product": "[Concat('OMSGallery/', 'ContainerInsights')]", "promotionCode": "", "publisher": "Microsoft" } } ] }, "parameters": {} } } ] } params = { "workspaceResourceId": { "value": workspace_resource_id }, "workspaceRegion": { "value": location }, "solutionDeploymentName": { "value": solution_deployment_name } } deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis) # publish the Container Insights solution to the Log Analytics workspace return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params, validate=False, no_wait=False, subscription_id=subscription_id) def _ensure_aks_service_principal(cli_ctx, service_principal=None, client_secret=None, subscription_id=None, dns_name_prefix=None, location=None, name=None): file_name_aks = 'aksServicePrincipal.json' # TODO: This really needs to be unit tested. rbac_client = get_graph_rbac_management_client(cli_ctx) if not service_principal: # --service-principal not specified, try to load it from local disk principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks) if principal_obj: service_principal = principal_obj.get('service_principal') client_secret = principal_obj.get('client_secret') else: # Nothing to load, make one. if not client_secret: client_secret = _create_client_secret() salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8') url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location) service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret) if not service_principal: raise CLIError('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') logger.info('Created a service principal: %s', service_principal) # We don't need to add role assignment for this created SPN else: # --service-principal specfied, validate --client-secret was too if not client_secret: raise CLIError('--client-secret is required if --service-principal is specified') store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks) return load_acs_service_principal(subscription_id, file_name=file_name_aks) def _get_rg_location(ctx, resource_group_name, subscription_id=None): groups = cf_resource_groups(ctx, subscription_id=subscription_id) # Just do the get, we don't need the result, it will error out if the group doesn't exist. rg = groups.get(resource_group_name) return rg.location def _check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile): if enable_cluster_autoscaler: if min_count is None or max_count is None: raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler enabled') if int(min_count) > int(max_count): raise CLIError('value of min-count should be less than or equal to value of max-count') if int(node_count) < int(min_count) or int(node_count) > int(max_count): raise CLIError('node-count is not in the range of min-count and max-count') agent_pool_profile.min_count = int(min_count) agent_pool_profile.max_count = int(max_count) agent_pool_profile.enable_auto_scaling = True else: if min_count is not None or max_count is not None: raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag') def _create_client_secret(): # Add a special character to satsify AAD SP secret requirements special_char = '$' client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char return client_secret def _ensure_aks_acr(cli_ctx, client_id, acr_name_or_id, subscription_id, # pylint: disable=unused-argument detach=False): from msrestazure.tools import is_valid_resource_id, parse_resource_id # Check if the ACR exists by resource ID. if is_valid_resource_id(acr_name_or_id): try: parsed_registry = parse_resource_id(acr_name_or_id) acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription']) registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name']) except CloudError as ex: raise CLIError(ex.message) _ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach) return # Check if the ACR exists by name accross all resource groups. registry_name = acr_name_or_id registry_resource = 'Microsoft.ContainerRegistry/registries' try: registry = get_resource_by_name(cli_ctx, registry_name, registry_resource) except CloudError as ex: if 'was not found' in ex.message: raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name)) raise CLIError(ex.message) _ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach) return def _ensure_aks_acr_role_assignment(cli_ctx, client_id, registry_id, detach=False): if detach: if not _delete_role_assignments(cli_ctx, 'acrpull', client_id, scope=registry_id): raise CLIError('Could not delete role assignments for ACR. ' 'Are you an Owner on this subscription?') return if not _add_role_assignment(cli_ctx, 'acrpull', client_id, scope=registry_id): raise CLIError('Could not create a role assignment for ACR. ' 'Are you an Owner on this subscription?') return def aks_agentpool_show(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name): instance = client.get(resource_group_name, cluster_name, nodepool_name) return instance def aks_agentpool_list(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name): return client.list(resource_group_name, cluster_name) def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals client, resource_group_name, cluster_name, nodepool_name, tags=None, kubernetes_version=None, node_zones=None, node_vm_size=None, node_osdisk_size=0, node_count=3, vnet_subnet_id=None, max_pods=0, os_type="Linux", min_count=None, max_count=None, enable_cluster_autoscaler=False, node_taints=None, priority=CONST_SCALE_SET_PRIORITY_REGULAR, eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE, spot_max_price=float('nan'), public_ip_per_vm=False, labels=None, no_wait=False): instances = client.list(resource_group_name, cluster_name) for agentpool_profile in instances: if agentpool_profile.name == nodepool_name: raise CLIError("Node pool {} already exists, please try a different name, " "use 'aks nodepool list' to get current list of node pool".format(nodepool_name)) taints_array = [] if node_taints is not None: for taint in node_taints.split(','): try: taint = taint.strip() taints_array.append(taint) except ValueError: raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".') if node_vm_size is None: if os_type == "Windows": node_vm_size = "Standard_D2s_v3" else: node_vm_size = "Standard_DS2_v2" agent_pool = AgentPool( name=nodepool_name, tags=tags, node_labels=labels, count=int(node_count), vm_size=node_vm_size, os_type=os_type, storage_profile=ContainerServiceStorageProfileTypes.managed_disks, vnet_subnet_id=vnet_subnet_id, agent_pool_type="VirtualMachineScaleSets", max_pods=int(max_pods) if max_pods else None, orchestrator_version=kubernetes_version, availability_zones=node_zones, node_taints=taints_array, scale_set_priority=priority, enable_node_public_ip=public_ip_per_vm ) if priority == CONST_SCALE_SET_PRIORITY_SPOT: agent_pool.scale_set_eviction_policy = eviction_policy if isnan(spot_max_price): spot_max_price = -1 agent_pool.spot_max_price = spot_max_price _check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool) if node_osdisk_size: agent_pool.os_disk_size_gb = int(node_osdisk_size) return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool) def aks_agentpool_scale(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, node_count=3, no_wait=False): instance = client.get(resource_group_name, cluster_name, nodepool_name) new_node_count = int(node_count) if new_node_count == 0: raise CLIError("Can't scale down to 0 nodes.") if new_node_count == instance.count: raise CLIError("The new node count is the same as the current node count.") instance.count = new_node_count # pylint: disable=no-member return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance) def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, kubernetes_version, nodepool_name, no_wait=False): instance = client.get(resource_group_name, cluster_name, nodepool_name) instance.orchestrator_version = kubernetes_version return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance) def aks_agentpool_update(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, tags=None, enable_cluster_autoscaler=False, disable_cluster_autoscaler=False, update_cluster_autoscaler=False, min_count=None, max_count=None, no_wait=False): update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler if update_flags != 1: if update_flags != 0 or tags is None: raise CLIError('Please specify "--enable-cluster-autoscaler" or ' '"--disable-cluster-autoscaler" or ' '"--update-cluster-autoscaler"') instance = client.get(resource_group_name, cluster_name, nodepool_name) node_count = instance.count if min_count is None or max_count is None: if enable_cluster_autoscaler or update_cluster_autoscaler: raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler or ' '--update-cluster-autoscaler set.') if min_count is not None and max_count is not None: if int(min_count) > int(max_count): raise CLIError('value of min-count should be less than or equal to value of max-count.') if int(node_count) < int(min_count) or int(node_count) > int(max_count): raise CLIError("current node count '{}' is not in the range of min-count and max-count.".format(node_count)) if enable_cluster_autoscaler: if instance.enable_auto_scaling: logger.warning('Autoscaler is already enabled for this node pool.\n' 'Please run "az aks nodepool update --update-cluster-autoscaler" ' 'if you want to update min-count or max-count.') return None instance.min_count = int(min_count) instance.max_count = int(max_count) instance.enable_auto_scaling = True if update_cluster_autoscaler: if not instance.enable_auto_scaling: raise CLIError('Autoscaler is not enabled for this node pool.\n' 'Run "az aks nodepool update --enable-cluster-autoscaler" ' 'to enable cluster with min-count and max-count.') instance.min_count = int(min_count) instance.max_count = int(max_count) if disable_cluster_autoscaler: if not instance.enable_auto_scaling: logger.warning('Autoscaler is already disabled for this node pool.') return None instance.enable_auto_scaling = False instance.min_count = None instance.max_count = None instance.tags = tags return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance) def aks_agentpool_delete(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, no_wait=False): agentpool_exists = False instances = client.list(resource_group_name, cluster_name) for agentpool_profile in instances: if agentpool_profile.name.lower() == nodepool_name.lower(): agentpool_exists = True break if not agentpool_exists: raise CLIError("Node pool {} doesnt exist, " "use 'aks nodepool list' to get current node pool list".format(nodepool_name)) return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name) def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = get_subscription_id(cmd.cli_ctx) instance = _update_addons( cmd, instance, subscription_id, resource_group_name, name, addons, enable=False, no_wait=no_wait ) # send the managed cluster representation to update the addon profiles return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None, subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_id=None, appgw_subnet_id=None, appgw_shared=False, appgw_watch_namespace=None, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = get_subscription_id(cmd.cli_ctx) service_principal_client_id = instance.service_principal_profile.client_id instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True, workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_shared=appgw_shared, appgw_watch_namespace=appgw_watch_namespace, no_wait=no_wait) if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled: _ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent']) if CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles: if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config: appgw_id = instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] from msrestazure.tools import parse_resource_id, resource_id appgw_id_dict = parse_resource_id(appgw_id) appgw_group_id = resource_id(subscription=appgw_id_dict["subscription"], resource_group=appgw_id_dict["resource_group"]) if not _add_role_assignment(cmd.cli_ctx, 'Contributor', service_principal_client_id, scope=appgw_group_id): logger.warning('Could not create a role assignment for application gateway: {appgw_id} ' 'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. ' 'Are you an Owner on this subscription?') if CONST_INGRESS_APPGW_SUBNET_ID in instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config: subnet_id = instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_SUBNET_ID] from msrestazure.tools import parse_resource_id, resource_id if not _add_role_assignment(cmd.cli_ctx, 'Contributor', service_principal_client_id, scope=subnet_id): logger.warning('Could not create a role assignment for subnet: {subnet_id} ' 'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. ' 'Are you an Owner on this subscription?') if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled: # adding a wait here since we rely on the result for role assignment result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance)) cloud_name = cmd.cli_ctx.cloud.name # mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud if cloud_name.lower() == 'azurecloud': from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) _add_monitoring_role_assignment(result, cluster_resource_id, cmd) else: result = sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) return result def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name) def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements instance, subscription_id, resource_group_name, name, addons, enable, workspace_resource_id=None, subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_id=None, appgw_subnet_id=None, appgw_shared=False, appgw_watch_namespace=None, no_wait=False): # pylint: disable=unused-argument # parse the comma-separated addons argument addon_args = addons.split(',') addon_profiles = instance.addon_profiles or {} if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles: addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True) os_type = 'Linux' # for each addons argument for addon_arg in addon_args: addon = ADDONS[addon_arg] if addon == 'aciConnector': # only linux is supported for now, in the future this will be a user flag addon += os_type # addon name is case insensitive addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon) if enable: # add new addons or update existing ones and enable them addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False)) # special config handling for certain addons if addon == 'omsagent': if addon_profile.enabled: raise CLIError('The monitoring addon is already enabled for this managed cluster.\n' 'To change monitoring configuration, run "az aks disable-addons -a monitoring"' 'before enabling it again.') if not workspace_resource_id: workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id} elif addon.lower() == ('aciConnector' + os_type).lower(): if addon_profile.enabled: raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n' 'To change virtual-node configuration, run ' '"az aks disable-addons -a virtual-node -g {resource_group_name}" ' 'before enabling it again.') if not subnet_name: raise CLIError('The aci-connector addon requires setting a subnet name.') addon_profile.config = {'SubnetName': subnet_name} elif addon.lower() == CONST_INGRESS_APPGW_ADDON_NAME: if addon_profile.enabled: raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n' 'To change ingress-appgw configuration, run ' f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" ' 'before enabling it again.') addon_profile = ManagedClusterAddonProfile(enabled=True, config={}) if appgw_name is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name if appgw_subnet_prefix is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_PREFIX] = appgw_subnet_prefix if appgw_id is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id if appgw_subnet_id is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id if appgw_shared: addon_profile.config[CONST_INGRESS_APPGW_SHARED] = "true" if appgw_watch_namespace is not None: addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace addon_profiles[addon] = addon_profile else: if addon not in addon_profiles: raise CLIError("The addon {} is not installed.".format(addon)) addon_profiles[addon].config = None addon_profiles[addon].enabled = enable instance.addon_profiles = addon_profiles # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return instance def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument return client.list_orchestrators(location, resource_type='managedClusters') def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name): """Merge an unencrypted kubeconfig into the file at the specified path, or print it to stdout if the path is "-". """ # Special case for printing to stdout if path == "-": print(kubeconfig) return # ensure that at least an empty ~/.kube/config exists directory = os.path.dirname(path) if directory and not os.path.exists(directory): try: os.makedirs(directory) except OSError as ex: if ex.errno != errno.EEXIST: raise if not os.path.exists(path): with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'): pass # merge the new kubeconfig into the existing one fd, temp_path = tempfile.mkstemp() additional_file = os.fdopen(fd, 'w+t') try: additional_file.write(kubeconfig) additional_file.flush() merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name) except yaml.YAMLError as ex: logger.warning('Failed to merge credentials to kube config file: %s', ex) finally: additional_file.close() os.remove(temp_path) def _handle_merge(existing, addition, key, replace): if not addition[key]: return if existing[key] is None: existing[key] = addition[key] return for i in addition[key]: for j in existing[key]: if i['name'] == j['name']: if replace or i == j: existing[key].remove(j) else: from knack.prompting import prompt_y_n msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?' overwrite = False try: overwrite = prompt_y_n(msg.format(i['name'])) except NoTTYException: pass if overwrite: existing[key].remove(j) else: msg = 'A different object named {} already exists in {} in your kubeconfig file.' raise CLIError(msg.format(i['name'], key)) existing[key].append(i) def load_kubernetes_configuration(filename): try: with open(filename) as stream: return yaml.safe_load(stream) except (IOError, OSError) as ex: if getattr(ex, 'errno', 0) == errno.ENOENT: raise CLIError('{} does not exist'.format(filename)) except (yaml.parser.ParserError, UnicodeDecodeError) as ex: raise CLIError('Error parsing {} ({})'.format(filename, str(ex))) def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None): existing = load_kubernetes_configuration(existing_file) addition = load_kubernetes_configuration(addition_file) if context_name is not None: addition['contexts'][0]['name'] = context_name addition['contexts'][0]['context']['cluster'] = context_name addition['clusters'][0]['name'] = context_name addition['current-context'] = context_name # rename the admin context so it doesn't overwrite the user context for ctx in addition.get('contexts', []): try: if ctx['context']['user'].startswith('clusterAdmin'): admin_name = ctx['name'] + '-admin' addition['current-context'] = ctx['name'] = admin_name break except (KeyError, TypeError): continue if addition is None: raise CLIError('failed to load additional configuration from {}'.format(addition_file)) if existing is None: existing = addition else: _handle_merge(existing, addition, 'clusters', replace) _handle_merge(existing, addition, 'users', replace) _handle_merge(existing, addition, 'contexts', replace) existing['current-context'] = addition['current-context'] # check that ~/.kube/config is only read- and writable by its owner if platform.system() != 'Windows': existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode)) if not existing_file_perms.endswith('600'): logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.', existing_file, existing_file_perms) with open(existing_file, 'w+') as stream: yaml.safe_dump(existing, stream, default_flow_style=False) current_context = addition.get('current-context', 'UNKNOWN') msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file) print(msg) def cloud_storage_account_service_factory(cli_ctx, kwargs): from azure.cli.core.profiles import ResourceType, get_sdk t_cloud_storage_account = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount') account_name = kwargs.pop('account_name', None) account_key = kwargs.pop('account_key', None) sas_token = kwargs.pop('sas_token', None) kwargs.pop('connection_string', None) return t_cloud_storage_account(account_name, account_key, sas_token) def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name): from azure.mgmt.monitor import MonitorManagementClient diag_settings_client = get_mgmt_service_client(cli_ctx, MonitorManagementClient).diagnostic_settings subscription_id = get_subscription_id(cli_ctx) aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \ '/managedClusters/{2}'.format(subscription_id, resource_group_name, name) diag_settings = diag_settings_client.list(aks_resource_id) if diag_settings.value: return diag_settings.value[0].storage_account_id print("No diag settings specified") return None def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') nodes = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "node", "--no-headers"], universal_newlines=True) logger.debug(nodes) node_lines = nodes.splitlines() ready_nodes = {} for node_line in node_lines: columns = node_line.split() logger.debug(node_line) if columns[1] != "Ready": logger.warning("Node %s is not Ready. Current state is: %s.", columns[0], columns[1]) else: ready_nodes[columns[0]] = False logger.debug('There are %s ready nodes in the cluster', str(len(ready_nodes))) if not ready_nodes: logger.warning('No nodes are ready in the current cluster. Diagnostics info might not be available.') network_config_array = [] network_status_array = [] apds_created = False max_retry = 10 for retry in range(0, max_retry): if not apds_created: apd = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", "-n", "aks-periscope", "--no-headers"], universal_newlines=True ) apd_lines = apd.splitlines() if apd_lines and 'No resources found' in apd_lines[0]: apd_lines.pop(0) print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines), len(ready_nodes), '.' * retry), end='') if len(apd_lines) < len(ready_nodes): time.sleep(3) else: apds_created = True print() else: for node_name in ready_nodes: if ready_nodes[node_name]: continue apdName = "aks-periscope-diagnostic-" + node_name try: network_config = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", apdName, "-n", "aks-periscope", "-o=jsonpath={.spec.networkconfig}"], universal_newlines=True) logger.debug('Dns status for node %s is %s', node_name, network_config) network_status = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", apdName, "-n", "aks-periscope", "-o=jsonpath={.spec.networkoutbound}"], universal_newlines=True) logger.debug('Network status for node %s is %s', node_name, network_status) if not network_config or not network_status: print("The diagnostics information for node {} is not ready yet. " "Will try again in 10 seconds.".format(node_name)) time.sleep(10) break network_config_array += json.loads('[' + network_config + ']') network_status_object = json.loads(network_status) network_status_array += format_diag_status(network_status_object) ready_nodes[node_name] = True except subprocess.CalledProcessError as err: raise CLIError(err.output) print() if network_config_array: print("Below are the network configuration for each node: ") print() print(tabulate(network_config_array, headers="keys", tablefmt='simple')) print() else: logger.warning("Could not get network config. " "Please run 'az aks kanalyze' command later to get the analysis results.") if network_status_array: print("Below are the network connectivity results for each node:") print() print(tabulate(network_status_array, headers="keys", tablefmt='simple')) else: logger.warning("Could not get networking status. " "Please run 'az aks kanalyze' command later to get the analysis results.") def format_diag_status(diag_status): for diag in diag_status: if diag["Status"]: if "Error:" in diag["Status"]: diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}' else: diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}' return diag_status def format_bright(msg): return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}' def format_hyperlink(the_link): return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
main.py
# -*- coding: utf-8 -*- import tensorflow as tf import threading import numpy as np import signal import random import math import os import time from model import SomaticSimpleNetwork, SomaticRecurrentNetwork from model import create_model from trainer import Trainer from rmsprop_applier import RMSPropApplier from constants import * CHECKPOINT_DIR = "checkpoints" # Training parameters env_type = 2 # (sin 30:20) #model_type = "plain" #model_type = "rnn" #model_type = "rnn_action" #model_type = "plain_cnn" model_type = "rnn_cnn" #max_train_step = 200 * 1000 #max_train_step = 500 * 1000 max_train_step = 1000 * 1000 log_file_name = model_type def log_uniform(lo, hi, rate): log_lo = math.log(lo) log_hi = math.log(hi) v = log_lo * (1-rate) + log_hi * rate return math.exp(v) device = "/gpu:0" initial_learning_rate = log_uniform(INITIAL_ALPHA_LOW, INITIAL_ALPHA_HIGH, INITIAL_ALPHA_LOG_RATE) global_t = 0 stop_requested = False global_network = create_model(-1, model_type) trainers = [] learning_rate_input = tf.placeholder("float") grad_applier = RMSPropApplier(learning_rate = learning_rate_input, decay = RMSP_ALPHA, momentum = 0.0, epsilon = RMSP_EPSILON, clip_norm = GRAD_NORM_CLIP, device = device) for i in range(PARALLEL_SIZE): trainer = Trainer(i, model_type, env_type, global_network, initial_learning_rate, learning_rate_input, grad_applier, MAX_TIME_STEP, device = device) trainers.append(trainer) # prepare session sess = tf.Session(config=tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)) init = tf.global_variables_initializer() sess.run(init) # summary for tensorboard score_input = tf.placeholder(tf.int32) step_size_input = tf.placeholder(tf.int32) tf.summary.scalar("score", score_input) tf.summary.scalar("step_size", step_size_input) summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter("log/" + log_file_name, sess.graph) # init or load checkpoint with saver saver = tf.train.Saver() checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIR) if checkpoint and checkpoint.model_checkpoint_path: saver.restore(sess, checkpoint.model_checkpoint_path) print("checkpoint loaded:", checkpoint.model_checkpoint_path) tokens = checkpoint.model_checkpoint_path.split("-") # set global step global_t = int(tokens[1]) print(">>> global step set: ", global_t) # set wall time wall_t_fname = CHECKPOINT_DIR + '/' + 'wall_t.' + str(global_t) with open(wall_t_fname, 'r') as f: wall_t = float(f.read()) else: print("Could not find old checkpoint") # set wall time wall_t = 0.0 def train_function(parallel_index): global global_t trainer = trainers[parallel_index] # set start_time start_time = time.time() - wall_t trainer.set_start_time(start_time) while True: if stop_requested: break if global_t > MAX_TIME_STEP: break if global_t > max_train_step: break diff_global_t = trainer.process(sess, global_t, summary_writer, summary_op, score_input, step_size_input) global_t += diff_global_t def signal_handler(signal, frame): global stop_requested print('You pressed Ctrl+C!') stop_requested = True train_threads = [] for i in range(PARALLEL_SIZE): train_threads.append(threading.Thread(target=train_function, args=(i,))) signal.signal(signal.SIGINT, signal_handler) # set start time start_time = time.time() - wall_t for t in train_threads: t.start() print('Press Ctrl+C to stop') signal.pause() print('Now saving data. Please wait') for t in train_threads: t.join() if not os.path.exists(CHECKPOINT_DIR): os.mkdir(CHECKPOINT_DIR) # write wall time wall_t = time.time() - start_time wall_t_fname = CHECKPOINT_DIR + '/' + 'wall_t.' + str(global_t) with open(wall_t_fname, 'w') as f: f.write(str(wall_t)) saver.save(sess, CHECKPOINT_DIR + '/' + 'checkpoint', global_step = global_t) print("Training finished.")
handler.py
import multiprocessing import time import requests import engine import common from engine import CHECK, BE_MODIFIED, DOWNLOAD_UPLOAD, TO_MODIFY, UPLOAD, urls, url_status, url_status_base from engine.downloader import download, Extractor from engine.upload import Upload from common import logger from common.event import Event # 初始化事件管理器 event_manager = common.event.EventManager() @event_manager.register(DOWNLOAD_UPLOAD, block=True) def process(name, url, mod): try: now = common.time_now() if mod == 'dl': p = multiprocessing.Process(target=download, args=(name, url)) p.start() p.join() # download(name, url) Upload(name).start(url, now) elif mod == 'up': Upload(name).start(url, now) else: return url finally: event = Event(BE_MODIFIED) event.args = (url,) # return url return event @event_manager.server(urls, url_status, url_status_base) class KernelFunc: def __init__(self, _urls, _url_status, _url_status_base): self.urls = _urls self.url_status = _url_status self.url_status_base = _url_status_base self.batches, self.onebyone = Extractor().sorted_checker(_urls) @event_manager.register(CHECK, block=True) def all_check(self): live = [] try: for batch in self.batches: res = batch.check() if res: live.extend(res) for one in self.onebyone: for url in one.url_list: if one('检测' + url, url).check_stream(): live.append(url) if url != one.url_list[-1]: logger.debug('歇息会') time.sleep(15) except requests.exceptions.ReadTimeout as timeout: logger.error("ReadTimeout:" + str(timeout)) except requests.exceptions.SSLError as sslerr: logger.error("SSLError:" + str(sslerr)) except requests.exceptions.ConnectTimeout as timeout: logger.error("ConnectTimeout:" + str(timeout)) except requests.exceptions.ConnectionError as connerr: logger.error("ConnectionError:" + str(connerr)) except requests.exceptions.ChunkedEncodingError as ceer: logger.error("ChunkedEncodingError:" + str(ceer)) except requests.exceptions.RequestException: logger.exception("unknown") finally: event_t = Event(TO_MODIFY) event_t.args = (live,) event_u = Event(UPLOAD) event_u.args = (live,) return event_u, event_t @event_manager.register(engine.TO_MODIFY) def modify(self, live_m): live_d = {} # print(live_m) if live_m: event = [] for live in live_m: if self.url_status[live] == 1: # 已开播正在下载 # print('已开播正在下载') pass else: name = engine.find_name(live) logger.debug(name + '刚刚开播,去下载') event_d = Event(DOWNLOAD_UPLOAD) event_d.args = (name, live, 'dl') event.append(event_d) live_d[live] = 1 self.url_status.update(live_d) # url_status = {**url_status_base, **live_d} return tuple(event) else: logger.debug('无人直播') def free(self, list_url): status_num = list(map(lambda x: self.url_status.get(x), list_url)) # print(status_num) if 1 in status_num or 2 in status_num: return False else: return True @event_manager.register(engine.UPLOAD) def free_upload(self, _urls): logger.debug(_urls) event = [] for title, v in engine.links_id.items(): # names = list(map(find_name, urls)) url = v[0] # if title not in names and url_status[url] == 0 and Upload(title, url).filter_file(): if self.free(v) and Upload(title).filter_file(): event_d = Event(DOWNLOAD_UPLOAD) event_d.args = (title, url, 'up') event.append(event_d) # self.event_manager.send_event(event_d) self.url_status[url] = 2 # print('up') return tuple(event) # Upload(title, url).start() # except: # logger.exception() # print('寻找结束') @event_manager.register(engine.BE_MODIFIED) def revise(self, url): if url: # url_status = {**url_status, **{url: 0}} self.url_status.update({url: 0}) # print('更新字典') # print(url_status)
test_examples.py
import itertools import multiprocessing import runpy import sys from os import path as osp import pytest def run_main(*args): # patch sys.args sys.argv = list(args) target = args[0] # run_path has one difference with invoking Python from command-line: # if the target is a file (rather than a directory), it does not add its # parent directory to sys.path. Thus, importing other modules from the # same directory is broken unless sys.path is patched here. if osp.isfile(target): sys.path.insert(0, osp.dirname(target)) runpy.run_path(target, run_name="__main__") def powerset(iterable): s = list(iterable) return itertools.chain.from_iterable( itertools.combinations(s, r) for r in range(len(s) + 1) ) def run_main_subproc(args): # This test needs to be done in its own process as there is a potentially for # an OpenGL context clash otherwise mp_ctx = multiprocessing.get_context("spawn") proc = mp_ctx.Process(target=run_main, args=args) proc.start() proc.join() assert proc.exitcode == 0 @pytest.mark.gfxtest @pytest.mark.skipif( not osp.exists("data/scene_datasets/habitat-test-scenes/skokloster-castle.glb") or not osp.exists("data/scene_datasets/habitat-test-scenes/van-gogh-room.glb"), reason="Requires the habitat-test-scenes", ) @pytest.mark.parametrize( "args", [ ("examples/tutorials/stereo_agent.py", "--no-display"), ("examples/tutorials/lighting_tutorial.py", "--no-show-images"), ("examples/tutorials/new_actions.py",), ( "examples/tutorials/nb_python/rigid_object_tutorial.py", "--no-show-video", "--no-make-video", ), ( "examples/tutorials/nb_python/ECCV_2020_Navigation.py", "--no-make-video", "--no-display", ), ( "examples/tutorials/nb_python/ECCV_2020_Interactivity.py", "--no-make-video", "--no-display", ), ("examples/tutorials/semantic_id_tutorial.py", "--no-show-images"), ], ) def test_example_modules(args): run_main_subproc(args) @pytest.mark.gfxtest @pytest.mark.skipif( not osp.exists("data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"), reason="Requires the habitat-test-scenes", ) @pytest.mark.parametrize( "args", [ ["examples/example.py"] + list(p) for p in powerset( [ "--compute_shortest_path", "--compute_action_shortest_path", "--enable_physics", "--semantic_sensor", "--depth_sensor", "--recompute_navmesh", ] ) if not (("--compute_action_shortest_path" in p) and ("--enable_physics" in p)) ], ) def test_example_script(args): run_main_subproc(args)
simple_watchtower.py
from werkzeug.wrappers import Request, Response from werkzeug.serving import run_simple from threading import Thread, Lock from time import sleep import requests import argparse import json from jsonrpc import JSONRPCResponseManager, dispatcher notification_dict = {} ntf_service = None verbose = 0 counter = 1 lock = None def contract_all_update_search(contract_hash, start=None, end=None): global counter global notification_dict global lock # Variable where to store contract storage storage = None # Variable where to store RPC response response = "" # Notification request status for which we need to continue scanning status = ["in progress"] if not notification_dict[contract_hash]["bound_search"]: status.append("past scanned - in progress") # Which entrypoint we need to notify the contractor # This could actually be set by the contractor, and his identity would be irrelevant entrypoint = "custClose" if notification_dict[contract_hash]["user_id"] == "customer": entrypoint = "merchClose" try: # Format RPC request payload payload = {} payload["method"] = "contract_update" payload["jsonrpc"] = "2.0" payload["id"] = counter payload["params"] = {"contract_hash":contract_hash} if start is not None: payload["params"]["start"] = start if end is not None: payload["params"]["end"] = end # Update counter for new requests counter += 1 # Send RPC request and receive response response = requests.post(ntf_service, json=payload) response = response.json() # Extract response result storage = json.loads(response["result"]) if verbose: print(contract_hash) print(storage) print() # While the notification request exists and the notification service is still scanning while contract_hash in notification_dict.keys() and storage["status"] in status: try: # Get response and extract storage response = requests.post(ntf_service, json=payload) response = response.json() storage = json.loads(response["result"]) if verbose: print(contract_hash) print(storage) print() # Get notification, if entrypoint of interest was called, break loop notifications = storage["data"] for key in notifications.keys(): if notifications[key]["entrypoint"] == entrypoint: with lock: del notification_dict[contract_hash] except Exception as e: print("Error", e) # Delay to avoid spamming the notification service sleep(2) # Send notificication to contractor # TODO update to send this to communication channel specified by contractor print("-------------- Sending response --------------") print(storage) print("----------------------------------------------") print() # Delete notification request if contract_hash in notification_dict.keys(): with lock: del notification_dict[contract_hash] except Exception as e: print("Error", e) def get_storage(contract_hash): storage = None response = "" try: # Format RPC request payload payload = {} payload["method"] = "get_storage" payload["jsonrpc"] = "2.0" payload["id"] = 0 payload["params"] = {"contract_hash":contract_hash} # Send RPC request, receive response response = requests.post(ntf_service, json=payload) response = response.json() # Extract and return result storage = json.loads(response["result"]) return storage except Exception as e: print("Error", response, e) return None def clean_notifications(): global notification_dict global lock # Check all notification request and delete sent ones with lock: notifications = list(notification_dict.keys()) for ntf in notifications: if notification_dict[ntf]["status"] == "sent": del notification_dict[ntf] @dispatcher.add_method def list_requests(): global lock # Send all notification request hashes as string ntf_list = "" try: with lock: for ntf in notification_dict.keys(): ntf_list += ntf + " " return {"success": ntf_list} except Exception as e: print("Error", e) return {"error": str(e)} @dispatcher.add_method def get_request(contract_hash): global notification_dict global lock # Send notification request response = {} try: if contract_hash in notification_dict.keys(): with lock: response = notification_dict[contract_hash] else: response = {"error": "Notification request not found."} return response except Exception as e: print("Error", e) return {"error": str(e)} @dispatcher.add_method def clear_request(contract_hash): global notification_dict global lock try: # Delete notification request if present if contract_hash in notification_dict.keys(): with lock: del notification_dict[contract_hash] return {"success":"Notification request "+ str(contract_hash) + " successfully deleted."} else: return {"success":"Notification request "+ str(contract_hash) + " not found."} except Exception as e: print("Error", e) return {"error": str(e)} @dispatcher.add_method def clear_all(): global lock try: # Clear all notification requests with lock: notification_dict.clear() return {"success": "All requests were successfully deleted."} except Exception as e: print("Error", e) return {"error": str(e)} @dispatcher.add_method def contract_update(contract_hash, user_id, start=None, end=None): global contract_dict global lock response = "" try: # If notification request already present, send it to contractor and set request to "sent" # TODO: a contract atm can only have one requests, we can change that by using the RPC ids instead if contract_hash in notification_dict: response = json.dumps(notification_dict[contract_hash]) with lock: if notification_dict[contract_hash]["status"] == "finished": notification_dict[contract_hash]["status"] = "sent" return response else: # Check contract exists contract_storage = get_storage(contract_hash) if contract_storage is None: return {"error" : "Contract "+ contract_hash +" does not exist."} # Check user identity if user_id not in ["customer", "merchant"]: return {"error": "Expected 'customer' or 'merchant' as identity, received: "+user_id+"."} bound_search = 0 if end is not None: bound_search = 1 # Make new notification request to notification service with lock: notification_dict[contract_hash] = {"contract_hash": contract_hash, "status":"in progress", "user_id":user_id, "bound_search":bound_search} response = json.dumps(notification_dict[contract_hash]) Thread(target=contract_all_update_search, args=(contract_hash,), kwargs={"start":start, "end":end}).start() # Forward notification service as token the request was accepted return response except Exception as e: print("Error", e) return {"error":str(e)} @Request.application def application(request): try: if verbose: print("---- New request:", request.data, "\n") # Handle request response = JSONRPCResponseManager.handle(request.data, dispatcher) if verbose: print("---- Response:", response.json, "\n") # Clean sent requests clean_notifications() return Response(response.json, mimetype='application/json') except Exception as e: print("Error", e) return Response({"error": str(e)}, mimetype='application/json') def main(): global ntf_service global verbose global lock parser = argparse.ArgumentParser(description='Optional app description') parser.add_argument("-net", "--network", type=str, help="the network, such as mainnet, or a RPC node uri", default=None) parser.add_argument("-v", "--verbose", help="print notification updates", action="store_true") args = parser.parse_args() # Set network and get head's level if args.network is None: return "Network needed" ntf_service = args.network if args.verbose: verbose = True lock = Lock() run_simple('localhost', 40000, application) if __name__ == '__main__': main()
main.py
import argparse from loguru import logger import multiprocessing as mp import sys import numpy as np import recorder from evaluator import Evaluator from user.user_factory import UserFactory q = mp.Queue() ################################## # uncomment the following to run # ################################## # ToPS visualization # general_main('eps_medium', 'precise', user_types=['dns_1k'], exp_round=1, methods=['svt_hie']) # general_main('eps', 'precise', user_types=['dns_1k'], exp_round=1, methods=['pak']) # Overall performance comparison # general_main('eps_medium', 'query_mse', methods=['svt_hie', 'ss_hie', 'svt_bt', 'pak']) # Smoother comparison # general_main('eps_medium', 'smooth',) # Hierarchy comparison # general_main('eps_medium', 'range',) # There is no optimal p # general_main('eps_range_medium', 'ell_est', user_types=['dns_1k'], percentile_methods=['np_p85', 'np_p90', 'np_p95', 'np_p99.5', 'np_p99.9', 'em_mse']) # general_main('eps_range_small', 'ell_est', user_types=['dns_1k'], percentile_methods=['np_p85', 'np_p90', 'np_p95', 'np_p99.5', 'np_p99.9', 'em_mse']) # general_main('eps_range_medium', 'ell_est', m=4096, user_types=['dns_1k'], percentile_methods=['np_p85', 'np_p90', 'np_p95', 'np_p99.5', 'np_p99.9', 'em_mse']) # general_main('eps_range_small', 'ell_est', m=4096, user_types=['dns_1k'], percentile_methods=['np_p85', 'np_p90', 'np_p95', 'np_p99.5', 'np_p99.9', 'em_mse']) # Compare different method to find the threshold # general_main('eps_medium', 'ell_est', percentile_methods=['nm_mse', 'smooth', 'smooth_pak']) # Compare different method to find threshold in LDP # general_main('eps_ldp', 'ell_est', range_epsilon=1, percentile_methods=['sw_mse', 'sws_mse']) # ToPL visualization # general_main('eps_ldp', 'precise', user_types=['dns_1k'], exp_round=1, methods=['sw_hm']) def general_main(vary, metric, user_types=('dns_1k', 'pos', 'fare', 'kosarak'), epsilon=0.05, range_epsilon=0.05, m=65536, exp_round=10, methods=None, percentile_methods=None, range_methods=None, smooth_methods=None ): args.metric = metric args.exp_round = exp_round args.epsilon = epsilon args.range_epsilon = range_epsilon for config_i, user_type in enumerate(user_types): args.user_type = user_type args.m = m args.n = 2 ** 20 + args.m args.vary = vary logger.info('=== vary %s on %s (%d / %d) ===' % (args.vary, args.user_type, config_i + 1, len(user_types))) main(methods, percentile_methods, range_methods, smooth_methods) def main(methods, percentile_methods, range_methods, smooth_methods): for arg in vars(args): print(arg, '=', getattr(args, arg), end=', ') print('# %s' % (' '.join(sys.argv),)) if args.vary in ['eps', 'eps_range']: user_str = '%s_m%d' % (args.user_type, args.m) varys = np.round(np.linspace(0.1, 1, 10), 2) elif args.vary in ['eps_ldp', 'eps_range_ldp']: user_str = '%s_m%d' % (args.user_type, args.m) varys = np.round(np.linspace(0.2, 2, 10), 1) elif args.vary in ['eps_medium', 'eps_range_medium']: user_str = '%s_m%d' % (args.user_type, args.m) varys = np.round(np.linspace(0.01, 0.1, 10), 3) else: raise NotImplementedError(args.vary) if args.metric == 'ell_est' and 'range' not in args.vary: user_str += '_er%d' % (10 * args.range_epsilon) filename = 'results/%s/%s_%s.json' % (args.vary, user_str, args.metric) recorder.write_file_head(args, filename, varys) users = UserFactory.create_user(args.user_type, args) for i, param in enumerate(varys): if args.vary in ['eps', 'eps_medium', 'eps_small', 'eps_large', 'eps_ldp']: args.epsilon = param elif args.vary in ['eps_range', 'eps_range_medium', 'eps_range_small', 'eps_range_large', 'eps_range_ldp']: args.range_epsilon = param elif args.vary in ['p']: args.p = param logger.info('%s = %s (%d / %d)' % (args.vary, param, i + 1, len(varys))) parallel_run(users, filename, param, methods, percentile_methods, range_methods, smooth_methods) def parallel_run(users, filename, param, methods, percentile_methods, range_methods, smooth_methods): evaluator = Evaluator(args, users) def local_process(evaluator, q): np.random.seed() result = evaluator.eval(methods, percentile_methods, range_methods, smooth_methods) q.put(result) q = mp.Queue() processes = [mp.Process(target=local_process, args=(evaluator, q)) for _ in range(args.exp_round)] for p in processes: p.start() for p in processes: p.join() results = [q.get() for _ in processes] recorder.append_to_file(args, results, filename, param) parser = argparse.ArgumentParser(description='Exp of ToPS/ToPL') # method parameter parser.add_argument('--epsilon', type=float, default=0.05, help='specify the differential privacy parameter, epsilon') parser.add_argument('--range_epsilon', type=float, default=1, help='specify epsilon for hierarchy (if not the same as the overall epsilon)') parser.add_argument('--p', type=float, default=0.995, help='specify the percentile') parser.add_argument('--s', type=float, default=1, help='specify the step size of NM/EM/SVT (default 1)') parser.add_argument('--m', type=int, default=65536, help='specify the number of users to hold') parser.add_argument('--g', type=int, default=0, help='specify the guessed leaf nodes (if 0, calculate online)') parser.add_argument('--r', type=int, default=1048576, help='specify the max interested range') parser.add_argument('--hie_fanout', type=int, default=16, help='specify the fanout of the hierarchy') parser.add_argument('--exp_smooth_a', type=float, default=0.6, help='specify the parameter of exponential smoothing') # experiment parameter parser.add_argument('--vary', type=str, default='none', help='specify which parameter to vary') parser.add_argument('--exp_round', type=int, default=16, help='specify the number of iterations for varying frequency') parser.add_argument('--write_file', type=bool, default=True, help='write to file or not?') parser.add_argument('--overwrite', type=bool, default=False, help='overwrite existing results or append to existing results?') parser.add_argument('--overwrite_method', type=bool, default=True, help='overwrite existing results of a specific method (or append)') parser.add_argument("--multi_process", type=bool, default=True, help="whether to run single-process or multiple") parser.add_argument("--metric", type=str, default='mse', help="evaluation metric") parser.add_argument('--user_type', type=str, default='adult', help='specify the type of the data [synthesize, password, url]') args = parser.parse_args()
datasets.py
import glob import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng'] vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv'] # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): if ExifTags.TAGS[orientation] == 'Orientation': break def get_hash(files): # Returns a single hash value of a list of files return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) try: rotation = dict(img._getexif().items())[orientation] if rotation == 6: # rotation 270 s = (s[1], s[0]) elif rotation == 8: # rotation 90 s = (s[1], s[0]) except: pass return s def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, world_size=1, workers=8): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache. with torch_distributed_zero_first(rank): dataset = LoadImagesAndLabels(path, imgsz, batch_size, augment=augment, # augment images hyp=hyp, # augmentation hyperparameters rect=rect, # rectangular training cache_images=cache, single_cls=opt.single_cls, stride=int(stride), pad=pad, rank=rank) batch_size = min(batch_size, len(dataset)) nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None dataloader = InfiniteDataLoader(dataset, batch_size=batch_size, num_workers=nw, sampler=sampler, pin_memory=True, collate_fn=LoadImagesAndLabels.collate_fn) # torch.utils.data.DataLoader() return dataloader, dataset class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): """ Dataloader that reuses workers. Uses same syntax as vanilla DataLoader. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) self.iterator = super().__iter__() def __len__(self): return len(self.batch_sampler.sampler) def __iter__(self): for i in range(len(self)): yield next(self.iterator) class _RepeatSampler(object): """ Sampler that repeats forever. Args: sampler (Sampler) """ def __init__(self, sampler): self.sampler = sampler def __iter__(self): while True: yield from iter(self.sampler) class LoadImages: # for inference def __init__(self, path, img_size=640): p = str(Path(path)) # os-agnostic p = os.path.abspath(p) # absolute path if '*' in p: files = sorted(glob.glob(p, recursive=True)) # glob elif os.path.isdir(p): files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir elif os.path.isfile(p): files = [p] # files else: raise Exception('ERROR: %s does not exist' % p) images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats] videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats] ni, nv = len(images), len(videos) self.img_size = img_size self.files = images + videos self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv self.mode = 'images' if any(videos): self.new_video(videos[0]) # new video else: self.cap = None assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \ (p, img_formats, vid_formats) def __iter__(self): self.count = 0 return self def __next__(self): if self.count == self.nf: raise StopIteration path = self.files[self.count] if self.video_flag[self.count]: # Read video self.mode = 'video' ret_val, img0 = self.cap.read() if not ret_val: self.count += 1 self.cap.release() if self.count == self.nf: # last video raise StopIteration else: path = self.files[self.count] self.new_video(path) ret_val, img0 = self.cap.read() self.frame += 1 print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='') else: # Read image self.count += 1 img0 = cv2.imread(path) # BGR assert img0 is not None, 'Image Not Found ' + path print('image %g/%g %s: ' % (self.count, self.nf, path), end='') # Padded resize img = letterbox(img0, new_shape=self.img_size)[0] # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) # cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image return path, img, img0, self.cap def new_video(self, path): self.frame = 0 self.cap = cv2.VideoCapture(path) self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) def __len__(self): return self.nf # number of files class LoadWebcam: # for inference def __init__(self, pipe=0, img_size=640): self.img_size = img_size if pipe == '0': pipe = 0 # local camera # pipe = 'rtsp://192.168.1.64/1' # IP camera # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/ # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/ # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer self.pipe = pipe self.cap = cv2.VideoCapture(pipe) # video capture object self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 if cv2.waitKey(1) == ord('q'): # q to quit self.cap.release() cv2.destroyAllWindows() raise StopIteration # Read frame if self.pipe == 0: # local camera ret_val, img0 = self.cap.read() img0 = cv2.flip(img0, 1) # flip left-right else: # IP camera n = 0 while True: n += 1 self.cap.grab() if n % 30 == 0: # skip frames ret_val, img0 = self.cap.retrieve() if ret_val: break # Print assert ret_val, 'Camera Error %s' % self.pipe img_path = 'webcam.jpg' print('webcam %g: ' % self.count, end='') # Padded resize img = letterbox(img0, new_shape=self.img_size)[0] # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) return img_path, img, img0, None def __len__(self): return 0 class LoadStreams: # multiple IP or RTSP cameras def __init__(self, sources='streams.txt', img_size=640): self.mode = 'images' self.img_size = img_size if os.path.isfile(sources): with open(sources, 'r') as f: sources = [x.strip() for x in f.read().splitlines() if len(x.strip())] else: sources = [sources] n = len(sources) self.imgs = [None] * n self.sources = sources for i, s in enumerate(sources): # Start the thread to read frames from the video stream print('%g/%g: %s... ' % (i + 1, n, s), end='') cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s) assert cap.isOpened(), 'Failed to open %s' % s w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) % 100 _, self.imgs[i] = cap.read() # guarantee first frame thread = Thread(target=self.update, args=([i, cap]), daemon=True) print(' success (%gx%g at %.2f FPS).' % (w, h, fps)) thread.start() print('') # newline # check for common shapes s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') def update(self, index, cap): # Read next stream frame in a daemon thread n = 0 while cap.isOpened(): n += 1 # _, self.imgs[index] = cap.read() cap.grab() if n == 4: # read every 4th frame _, self.imgs[index] = cap.retrieve() n = 0 time.sleep(0.01) # wait time def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 img0 = self.imgs.copy() if cv2.waitKey(1) == ord('q'): # q to quit cv2.destroyAllWindows() raise StopIteration # Letterbox img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0] # Stack img = np.stack(img, 0) # Convert img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 img = np.ascontiguousarray(img) return self.sources, img, img0, None def __len__(self): return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years class LoadImagesAndLabels(Dataset): # for training/testing def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1): self.img_size = img_size self.augment = augment self.hyp = hyp self.image_weights = image_weights self.rect = False if image_weights else rect self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride def img2label_paths(img_paths): # Define label paths as a function of image paths sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings return [x.replace(sa, sb, 1).replace(os.path.splitext(x)[-1], '.txt') for x in img_paths] try: f = [] # image files for p in path if isinstance(path, list) else [path]: p = str(Path(p)) # os-agnostic parent = str(Path(p).parent) + os.sep if os.path.isfile(p): # file with open(p, 'r') as t: t = t.read().splitlines() f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path elif os.path.isdir(p): # folder f += glob.iglob(p + os.sep + '*.*') else: raise Exception('%s does not exist' % p) self.img_files = sorted( [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]) assert len(self.img_files) > 0, 'No images found' except Exception as e: raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url)) # Check cache self.label_files = img2label_paths(self.img_files) # labels cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels if os.path.isfile(cache_path): cache = torch.load(cache_path) # load if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed cache = self.cache_labels(cache_path) # re-cache else: cache = self.cache_labels(cache_path) # cache # Read cache cache.pop('hash') # remove hash labels, shapes = zip(*cache.values()) self.labels = list(labels) self.shapes = np.array(shapes, dtype=np.float64) self.img_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update n = len(shapes) # number of images bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index nb = bi[-1] + 1 # number of batches self.batch = bi # batch index of image self.n = n # Rectangular Training if self.rect: # Sort by aspect ratio s = self.shapes # wh ar = s[:, 1] / s[:, 0] # aspect ratio irect = ar.argsort() self.img_files = [self.img_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.labels = [self.labels[i] for i in irect] self.shapes = s[irect] # wh ar = ar[irect] # Set training image shapes shapes = [[1, 1]] * nb for i in range(nb): ari = ar[bi == i] mini, maxi = ari.min(), ari.max() if maxi < 1: shapes[i] = [maxi, 1] elif mini > 1: shapes[i] = [1, 1 / mini] self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride # Check labels create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate pbar = enumerate(self.label_files) if rank in [-1, 0]: pbar = tqdm(pbar) for i, file in pbar: l = self.labels[i] # label if l is not None and l.shape[0]: assert l.shape[1] == 5, '> 5 label columns: %s' % file assert (l >= 0).all(), 'negative labels: %s' % file assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows if single_cls: l[:, 0] = 0 # force dataset into single-class mode self.labels[i] = l nf += 1 # file found # Create subdataset (a smaller dataset) if create_datasubset and ns < 1E4: if ns == 0: create_folder(path='./datasubset') os.makedirs('./datasubset/images') exclude_classes = 43 if exclude_classes not in l[:, 0]: ns += 1 # shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image with open('./datasubset/images.txt', 'a') as f: f.write(self.img_files[i] + '\n') # Extract object detection boxes for a second stage classifier if extract_bounding_boxes: p = Path(self.img_files[i]) img = cv2.imread(str(p)) h, w = img.shape[:2] for j, x in enumerate(l): f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name) if not os.path.exists(Path(f).parent): os.makedirs(Path(f).parent) # make new output folder b = x[1:] * [w, h, w, h] # box b[2:] = b[2:].max() # rectangle to square b[2:] = b[2:] * 1.3 + 30 # pad b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes' else: ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove if rank in [-1, 0]: pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % ( cache_path, nf, nm, ne, nd, n) if nf == 0: s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url) print(s) assert not augment, '%s. Can not train without labels.' % s # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) self.imgs = [None] * n if cache_images: gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads pbar = tqdm(enumerate(results), total=n) for i, x in pbar: self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) gb += self.imgs[i].nbytes pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9) def cache_labels(self, path='labels.cache'): # Cache dataset labels, check images and read shapes x = {} # dict pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) for (img, label) in pbar: try: l = [] im = Image.open(img) im.verify() # PIL verify shape = exif_size(im) # image size assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels' if os.path.isfile(label): with open(label, 'r') as f: l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels if len(l) == 0: l = np.zeros((0, 5), dtype=np.float32) x[img] = [l, shape] except Exception as e: print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e)) x['hash'] = get_hash(self.label_files + self.img_files) torch.save(x, path) # save for next time return x def __len__(self): return len(self.img_files) # def __iter__(self): # self.count = -1 # print('ran dataset iter') # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) # return self def __getitem__(self, index): if self.image_weights: index = self.indices[index] hyp = self.hyp mosaic = self.mosaic and random.random() < hyp['mosaic'] if mosaic: # Load mosaic img, labels = load_mosaic(self, index) shapes = None # MixUp https://arxiv.org/pdf/1710.09412.pdf if random.random() < hyp['mixup']: img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1)) r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 img = (img * r + img2 * (1 - r)).astype(np.uint8) labels = np.concatenate((labels, labels2), 0) else: # Load image img, (h0, w0), (h, w) = load_image(self, index) # Letterbox shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling # Load labels labels = [] x = self.labels[index] if x.size > 0: # Normalized xywh to pixel xyxy format labels = x.copy() labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] if self.augment: # Augment imagespace if not mosaic: img, labels = random_perspective(img, labels, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], shear=hyp['shear'], perspective=hyp['perspective']) # Augment colorspace augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Apply cutouts # if random.random() < 0.9: # labels = cutout(img, labels) nL = len(labels) # number of labels if nL: labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 if self.augment: # flip up-down if random.random() < hyp['flipud']: img = np.flipud(img) if nL: labels[:, 2] = 1 - labels[:, 2] # flip left-right if random.random() < hyp['fliplr']: img = np.fliplr(img) if nL: labels[:, 1] = 1 - labels[:, 1] labels_out = torch.zeros((nL, 6)) if nL: labels_out[:, 1:] = torch.from_numpy(labels) # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes @staticmethod def collate_fn(batch): img, label, path, shapes = zip(*batch) # transposed for i, l in enumerate(label): l[:, 0] = i # add target image index for build_targets() return torch.stack(img, 0), torch.cat(label, 0), path, shapes # Ancillary functions -------------------------------------------------------------------------------------------------- def load_image(self, index): # loads 1 image from dataset, returns img, original hw, resized hw img = self.imgs[index] if img is None: # not cached path = self.img_files[index] img = cv2.imread(path) # BGR assert img is not None, 'Image Not Found ' + path h0, w0 = img.shape[:2] # orig hw r = self.img_size / max(h0, w0) # resize image to img_size if r != 1: # always resize down, only resize up if training with augmentation interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized else: return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) dtype = img.dtype # uint8 x = np.arange(0, 256, dtype=np.int16) lut_hue = ((x * r[0]) % 180).astype(dtype) lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed # Histogram equalization # if random.random() < 0.2: # for i in range(3): # img[:, :, i] = cv2.equalizeHist(img[:, :, i]) def load_mosaic(self, index): # loads images in a mosaic labels4 = [] s = self.img_size yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) # place img in img4 if i == 0: # top left img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) elif i == 1: # top right x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h elif i == 2: # bottom left x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) elif i == 3: # bottom right x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] padw = x1a - x1b padh = y1a - y1b # Labels x = self.labels[index] labels = x.copy() if x.size > 0: # Normalized xywh to pixel xyxy format labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh labels4.append(labels) # Concat/clip labels if len(labels4): labels4 = np.concatenate(labels4, 0) np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective # img4, labels4 = replicate(img4, labels4) # replicate # Augment img4, labels4 = random_perspective(img4, labels4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], perspective=self.hyp['perspective'], border=self.mosaic_border) # border to remove return img4, labels4 def replicate(img, labels): # Replicate labels h, w = img.shape[:2] boxes = labels[:, 1:].astype(int) x1, y1, x2, y2 = boxes.T s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices x1b, y1b, x2b, y2b = boxes[i] bh, bw = y2b - y1b, x2b - x1b yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) return img, labels def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True): # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232 shape = img.shape[:2] # current shape [height, width] if isinstance(new_shape, int): new_shape = (new_shape, new_shape) # Scale ratio (new / old) r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) if not scaleup: # only scale down, do not scale up (for better test mAP) r = min(r, 1.0) # Compute padding ratio = r, r # width, height ratios new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding if auto: # minimum rectangle dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding elif scaleFill: # stretch dw, dh = 0.0, 0.0 new_unpad = (new_shape[1], new_shape[0]) ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios dw /= 2 # divide padding into 2 sides dh /= 2 if shape[::-1] != new_unpad: # resize img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border return img, ratio, (dw, dh) def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] height = img.shape[0] + border[0] * 2 # shape(h,w,c) width = img.shape[1] + border[1] * 2 # Center C = np.eye(3) C[0, 2] = -img.shape[1] / 2 # x translation (pixels) C[1, 2] = -img.shape[0] / 2 # y translation (pixels) # Perspective P = np.eye(3) P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) # Rotation and Scale R = np.eye(3) a = random.uniform(-degrees, degrees) # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations s = random.uniform(1 - scale, 1 + scale) # s = 2 ** random.uniform(-scale, scale) R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) # Shear S = np.eye(3) S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) # Translation T = np.eye(3) T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) # Combined rotation matrix M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed if perspective: img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) else: # affine img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) # Visualize # import matplotlib.pyplot as plt # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() # ax[0].imshow(img[:, :, ::-1]) # base # ax[1].imshow(img2[:, :, ::-1]) # warped # Transform label coordinates n = len(targets) if n: # warp points xy = np.ones((n * 4, 3)) xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 xy = xy @ M.T # transform if perspective: xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale else: # affine xy = xy[:, :2].reshape(n, 8) # create new boxes x = xy[:, [0, 2, 4, 6]] y = xy[:, [1, 3, 5, 7]] xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T # # apply angle-based reduction of bounding boxes # radians = a * math.pi / 180 # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 # x = (xy[:, 2] + xy[:, 0]) / 2 # y = (xy[:, 3] + xy[:, 1]) / 2 # w = (xy[:, 2] - xy[:, 0]) * reduction # h = (xy[:, 3] - xy[:, 1]) * reduction # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T # clip boxes xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width) xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height) # filter candidates i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T) targets = targets[i] targets[:, 1:5] = xy[i] return img, targets def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n) # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio w1, h1 = box1[2] - box1[0], box1[3] - box1[1] w2, h2 = box2[2] - box2[0], box2[3] - box2[1] ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates def cutout(image, labels): # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 h, w = image.shape[:2] def bbox_ioa(box1, box2): # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 box2 = box2.transpose() # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] # Intersection area inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) # box2 area box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 # Intersection over box2 area return inter_area / box2_area # create random masks scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction for s in scales: mask_h = random.randint(1, int(h * s)) mask_w = random.randint(1, int(w * s)) # box xmin = max(0, random.randint(0, w) - mask_w // 2) ymin = max(0, random.randint(0, h) - mask_h // 2) xmax = min(w, xmin + mask_w) ymax = min(h, ymin + mask_h) # apply random color mask image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] # return unobscured labels if len(labels) and s > 0.03: box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area labels = labels[ioa < 0.60] # remove >60% obscured labels return labels def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size() # creates a new ./images_reduced folder with reduced size images of maximum size img_size path_new = path + '_reduced' # reduced images path create_folder(path_new) for f in tqdm(glob.glob('%s/*.*' % path)): try: img = cv2.imread(f) h, w = img.shape[:2] r = img_size / max(h, w) # size ratio if r < 1.0: img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg') cv2.imwrite(fnew, img) except: print('WARNING: image failure %s' % f) def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp() # Converts dataset to bmp (for faster training) formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats] for a, b, files in os.walk(dataset): for file in tqdm(files, desc=a): p = a + '/' + file s = Path(file).suffix if s == '.txt': # replace text with open(p, 'r') as f: lines = f.read() for f in formats: lines = lines.replace(f, '.bmp') with open(p, 'w') as f: f.write(lines) elif s in formats: # replace image cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p)) if s != '.bmp': os.system("rm '%s'" % p) def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder() # Copies all the images in a text file (list of images) into a folder create_folder(path[:-4]) with open(path, 'r') as f: for line in f.read().splitlines(): os.system('cp "%s" %s' % (line, path[:-4])) print(line) def create_folder(path='./new'): # Create folder if os.path.exists(path): shutil.rmtree(path) # delete output folder os.makedirs(path) # make new output folder def flatten_recursive(path='../coco128'): # Flatten a recursive directory by bringing all files to top level new_path = Path(path + '_flat') create_folder(new_path) for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): shutil.copyfile(file, new_path / Path(file).name)
main.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import re import traceback import os import sys sys.path.insert(0, os.getcwd()) from code.common import logging, get_system_id, is_xavier from code.common.scopedMPS import ScopedMPS, turn_off_mps from code.common import args_to_string, find_config_files, load_configs, run_command from code.common import BENCHMARKS, SCENARIOS from code.common import auditing import code.common.arguments as common_args from importlib import import_module import multiprocessing as mp from multiprocessing import Process import time import shutil import math def get_benchmark(conf): benchmark_name = conf["benchmark"] # Do not use a map. We want to import benchmarks as we need them, because some take # time to load due to plugins. if benchmark_name == BENCHMARKS.ResNet50: ResNet50 = import_module("code.resnet50.tensorrt.ResNet50").ResNet50 return ResNet50(conf) elif benchmark_name == BENCHMARKS.SSDResNet34: SSDResNet34 = import_module("code.ssd-resnet34.tensorrt.SSDResNet34").SSDResNet34 return SSDResNet34(conf) elif benchmark_name == BENCHMARKS.SSDMobileNet: SSDMobileNet = import_module("code.ssd-mobilenet.tensorrt.SSDMobileNet").SSDMobileNet return SSDMobileNet(conf) elif benchmark_name == BENCHMARKS.BERT: # TODO now only BERT uses gpu_inference_streams to generate engines conf = apply_overrides(conf, ['gpu_inference_streams']) BERTBuilder = import_module("code.bert.tensorrt.bert_var_seqlen").BERTBuilder return BERTBuilder(conf) elif benchmark_name == BENCHMARKS.RNNT: RNNTBuilder = import_module("code.rnnt.tensorrt.rnn-t_builder").RNNTBuilder return RNNTBuilder(conf) elif benchmark_name == BENCHMARKS.DLRM: DLRMBuilder = import_module("code.dlrm.tensorrt.dlrm").DLRMBuilder return DLRMBuilder(conf) elif benchmark_name == BENCHMARKS.UNET: UNETBuilder = import_module("code.3d-unet.tensorrt.3d-unet").UnetBuilder return UNETBuilder(conf) else: raise ValueError("Unknown benchmark: {:}".format(benchmark_name)) def apply_overrides(config, keys): # Make a copy so we don't modify original dict config = dict(config) override_args = common_args.parse_args(keys) for key in override_args: # Unset values (None) and unset store_true values (False) are both false-y if override_args[key]: config[key] = override_args[key] return config def flatten_config(config, system_id): benchmark_conf = config.get(system_id, None) if benchmark_conf is not None: # Passthrough for top level values benchmark_conf["system_id"] = system_id benchmark_conf["scenario"] = config["scenario"] benchmark_conf["benchmark"] = config["benchmark"] return benchmark_conf def launch_handle_generate_engine(*args, **kwargs): retries = 1 timeout = 7200 success = False for i in range(retries): # Build engines in another process to make sure we exit with clean cuda # context so that MPS can be turned off. from code.main import handle_generate_engine p = Process(target=handle_generate_engine, args=args, kwargs=kwargs) p.start() try: p.join(timeout) except KeyboardInterrupt: p.terminate() p.join(timeout) raise KeyboardInterrupt if p.exitcode == 0: success = True break if not success: raise RuntimeError("Building engines failed!") def copy_default_engine(benchmark): new_path = benchmark._get_engine_name(None, None) # Use default values benchmark.config_ver = "default" default_path = benchmark._get_engine_name(None, None) logging.info("Copying {:} to {:}".format(default_path, new_path)) shutil.copyfile(default_path, new_path) def handle_generate_engine(config, gpu=True, dla=True, copy_from_default=False): benchmark_name = config["benchmark"] logging.info( "Building engines for {:} benchmark in {:} scenario...".format( benchmark_name, config["scenario"])) start_time = time.time() arglist = common_args.GENERATE_ENGINE_ARGS config = apply_overrides(config, arglist) if dla and "dla_batch_size" in config: config["batch_size"] = config["dla_batch_size"] logging.info("Building DLA engine for {:}_{:}_{:}".format(config["system_id"], benchmark_name, config["scenario"])) b = get_benchmark(config) if copy_from_default: copy_default_engine(b) else: b.build_engines() if gpu and "gpu_batch_size" in config: config["batch_size"] = config["gpu_batch_size"] config["dla_core"] = None logging.info("Building GPU engine for {:}_{:}_{:}".format(config["system_id"], benchmark_name, config["scenario"])) b = get_benchmark(config) if copy_from_default: copy_default_engine(b) else: b.build_engines() end_time = time.time() logging.info("Finished building engines for {:} benchmark in {:} scenario.".format(benchmark_name, config["scenario"])) print("Time taken to generate engines: {:} seconds".format(end_time - start_time)) def handle_audit_verification(audit_test_name, config): # Decouples the verification step from any auditing runs for better maintenance and testing logging.info('AUDIT HARNESS: Running verification script...') # Prepare log_dir config['log_dir'] = os.path.join('build/compliance_logs', audit_test_name) # Get a harness object harness, config = _generate_harness_object(config=config, profile=None) result = None if audit_test_name == 'TEST01': result = auditing.verify_test01(harness) if result == 'TEST01 FALLBACK': # Signals a fallback for failed test # Process description: # 1. Generate baseline_accuracy file # 2. Calculate the accuracy of baseline, using the benchmark's accuracy script # 3. Use same script to calculate accuracy of compliance run # 4. Depending on accuracy level, declare success if two values are within defined tolerance. logging.info('main.py notified for fallback handling on TEST01') # Run compliance script to generate baseline file full_log_dir = harness.get_full_log_dir() results_path = os.path.join('results', harness.get_system_name(), harness._get_submission_benchmark_name(), harness.scenario) harness_accuracy_log = os.path.join(results_path, 'accuracy/mlperf_log_accuracy.json') compliance_accuracy_log = os.path.join(full_log_dir, 'mlperf_log_accuracy.json') fallback_command = 'bash build/inference/compliance/nvidia/TEST01/create_accuracy_baseline.sh {} {}'.format( harness_accuracy_log, compliance_accuracy_log ) # generates new file called mlperf_log_accuracy_baseline.json run_command(fallback_command, get_output=True) def move_file(src, dst): logging.info('Moving file: {} --> {}'.format(src, dst)) shutil.move(src, dst) def copy_file(src, dst): logging.info('Copying file: {} --> {}'.format(src, dst)) shutil.copy(src, dst) # Create accuracy and performance directories accuracy_dir = os.path.join(full_log_dir, 'TEST01', 'accuracy') performance_dir = os.path.join(full_log_dir, 'TEST01', 'performance', 'run_1') os.makedirs(accuracy_dir, exist_ok=True) os.makedirs(performance_dir, exist_ok=True) # Get the accuracy of baseline file fallback_result_baseline = check_accuracy('mlperf_log_accuracy_baseline.json', config, is_compliance=True) # Move it to the submission dir dest_path = os.path.join(accuracy_dir, 'baseline_accuracy.txt') move_file('accuracy.txt', dest_path) # Get the accuracy of compliance file fallback_result_compliance = check_accuracy('{}/mlperf_log_accuracy.json'.format(full_log_dir), config, is_compliance=True) # Move it to the submission dir - check_accuracy stores accuracy.txt in the directory # name provided in its first argument. So this file will already be located inside get_full_log_dir() src_path = os.path.join(full_log_dir, 'accuracy.txt') dest_path = os.path.join(accuracy_dir, 'compliance_accuracy.txt') move_file(src_path, dest_path) # Move the required logs to their correct locations since run_verification.py has failed. move_file('verify_accuracy.txt', os.path.join(full_log_dir, 'TEST01', 'verify_accuracy.txt')) copy_file(os.path.join(full_log_dir, 'mlperf_log_accuracy.json'), os.path.join(accuracy_dir, 'mlperf_log_accuracy.json')) copy_file(os.path.join(full_log_dir, 'mlperf_log_detail.txt'), os.path.join(performance_dir, 'mlperf_log_detail.txt')) copy_file(os.path.join(full_log_dir, 'mlperf_log_summary.txt'), os.path.join(performance_dir, 'mlperf_log_summary.txt')) # Need to run verify_performance.py script to get verify_performance.txt file. verify_performance_command = ("python3 build/inference/compliance/nvidia/TEST01/verify_performance.py -r " + results_path + "/performance/run_1/mlperf_log_summary.txt" + " -t " + performance_dir + "/mlperf_log_summary.txt | tee " + full_log_dir + "/TEST01/verify_performance.txt") run_command(verify_performance_command, get_output=True) # Check level of accuracy - this test's tolerance depends on it accuracy_level = config["accuracy_level"][:-1] if accuracy_level == '99.9': logging.info('High Accuracy benchmark detected. Tolerance set to 0.1%') if not math.isclose(fallback_result_baseline, fallback_result_compliance, rel_tol=0.001): raise ValueError('TEST01 + Fallback failure: BASELINE ACCURACY: {}, COMPLIANCE_ACCURACY: {}'.format(fallback_result_baseline, fallback_result_compliance)) else: logging.info('AUDIT HARNESS: Success: TEST01 failure redeemed via fallback approach.') print('TEST PASS') elif accuracy_level == '99': logging.info('Low Accuracy benchmark detected. Tolerance set to 1%') if not math.isclose(fallback_result_baseline, fallback_result_compliance, rel_tol=0.01): raise ValueError('TEST01 + Fallback failure: BASELINE ACCURACY: {}, COMPLIANCE_ACCURACY: {}'.format(fallback_result_baseline, fallback_result_compliance)) else: logging.info('AUDIT HARNESS: Success: TEST01 failure redeemed via fallback approach.') print('TEST PASS') else: raise ValueError('Accuracy level not supported: {}'.format(accuracy_level)) elif audit_test_name == 'TEST04-A' or audit_test_name == 'TEST04-B': exclude_list = [BENCHMARKS.BERT, BENCHMARKS.DLRM, BENCHMARKS.RNNT] if BENCHMARKS.alias(config['benchmark']) in exclude_list: logging.info('TEST04 is not supported for benchmark {}. Ignoring request...'.format(config['benchmark'])) return None result = auditing.verify_test04(harness) elif audit_test_name == 'TEST05': result = auditing.verify_test05(harness) return result def _generate_harness_object(config, profile): # Refactors harness generation for use by functions other than handle_run_harness benchmark_name = config['benchmark'] if config.get("use_triton"): from code.common.server_harness import TritonHarness harness = TritonHarness(config, name=benchmark_name) config["inference_server"] = "triton" elif benchmark_name == BENCHMARKS.BERT: from code.bert.tensorrt.harness import BertHarness harness = BertHarness(config, name=benchmark_name) config["inference_server"] = "custom" elif benchmark_name == BENCHMARKS.DLRM: from code.dlrm.tensorrt.harness import DLRMHarness harness = DLRMHarness(config, name=benchmark_name) config["inference_server"] = "custom" elif benchmark_name == BENCHMARKS.RNNT: from code.rnnt.tensorrt.harness import RNNTHarness harness = RNNTHarness(config, name=benchmark_name) config["inference_server"] = "custom" else: from code.common.lwis_harness import LWISHarness harness = LWISHarness(config, name=benchmark_name) # Attempt to run profiler. Note that this is only available internally. if profile is not None: try: from code.internal.profiler import ProfilerHarness harness = ProfilerHarness(harness, profile) except BaseException: logging.info("Could not load profiler: Are you an internal user?") return harness, config def handle_run_harness(config, gpu=True, dla=True, profile=None, power=False, generate_conf_files_only=False, compliance=False): benchmark_name = config["benchmark"] logging.info("Running harness for {:} benchmark in {:} scenario...".format(benchmark_name, config["scenario"])) arglist = common_args.getScenarioBasedHarnessArgs(config["scenario"], benchmark_name) config = apply_overrides(config, arglist) # Validate arguments if not dla: config["dla_batch_size"] = None if not gpu: config["gpu_batch_size"] = None # If we only want to generate conf_files, then set flag to true if generate_conf_files_only: config["generate_conf_files_only"] = True profile = None power = False harness, config = _generate_harness_object(config, profile) if power: try: from code.internal.power_measurements import PowerMeasurements power_measurements = PowerMeasurements("{}/{}/{}".format( os.getcwd(), "power_measurements", config.get("config_name")) ) power_measurements.start() except BaseException: power_measurements = None for key, value in config.items(): print("{} : {}".format(key, value)) result = "" if compliance: # AP: We need to keep the compliance logs separated from accuracy and perf # otherwise it messes up the update_results process config['log_dir'] = os.path.join('build/compliance_logs', config['audit_test_name']) logging.info('AUDIT HARNESS: Overriding log_dir for compliance run. Set to ' + config['log_dir']) # Launch the harness passed = True try: result = harness.run_harness() logging.info("Result: {:}".format(result)) except Exception as e: traceback.print_exc(file=sys.stdout) passed = False finally: if power and power_measurements is not None: power_measurements.stop() if not passed: raise RuntimeError("Run harness failed!") if generate_conf_files_only and result == "Generated conf files": return # Append result to perf result summary log. log_dir = config["log_dir"] summary_file = os.path.join(log_dir, "perf_harness_summary.json") results = {} if os.path.exists(summary_file): with open(summary_file) as f: results = json.load(f) config_name = "{:}-{:}-{:}".format(harness.get_system_name(), config["config_ver"], config["scenario"]) if config_name not in results: results[config_name] = {} results[config_name][benchmark_name] = result with open(summary_file, "w") as f: json.dump(results, f) # Check accuracy from loadgen logs. if not compliance: # TEST01 fails the accuracy test because it produces fewer predictions than expected accuracy = check_accuracy(os.path.join(harness.get_full_log_dir(), "mlperf_log_accuracy.json"), config) summary_file = os.path.join(log_dir, "accuracy_summary.json") results = {} if os.path.exists(summary_file): with open(summary_file) as f: results = json.load(f) if config_name not in results: results[config_name] = {} results[config_name][benchmark_name] = accuracy with open(summary_file, "w") as f: json.dump(results, f) def check_accuracy(log_file, config, is_compliance=False): benchmark_name = config["benchmark"] accuracy_targets = { BENCHMARKS.ResNet50: 76.46, BENCHMARKS.SSDResNet34: 20.0, BENCHMARKS.SSDMobileNet: 22.0, BENCHMARKS.BERT: 90.874, BENCHMARKS.DLRM: 80.25, BENCHMARKS.RNNT: 100.0 - 7.45225, BENCHMARKS.UNET: 0.853 } threshold_ratio = float(config["accuracy_level"][:-1]) / 100 if not os.path.exists(log_file): return "Cannot find accuracy JSON file." # checking if log_file is empty by just reading first several bytes # indeed, first 4B~6B is likely all we need to check: '', '[]', '[]\r', '[\n]\n', '[\r\n]\r\n', ... # but checking 8B for safety with open(log_file, 'r') as lf: first_8B = lf.read(8) if not first_8B or ('[' in first_8B and ']' in first_8B): return "No accuracy results in PerformanceOnly mode." dtype_expand_map = {"fp16": "float16", "fp32": "float32", "int8": "float16"} # Use FP16 output for INT8 mode accuracy_regex_map = import_module("build.inference.tools.submission.submission-checker").ACC_PATTERN threshold = accuracy_targets[benchmark_name] * threshold_ratio if benchmark_name in [BENCHMARKS.ResNet50]: cmd = "python3 build/inference/vision/classification_and_detection/tools/accuracy-imagenet.py --mlperf-accuracy-file {:} \ --imagenet-val-file data_maps/imagenet/val_map.txt --dtype int32 ".format(log_file) regex = accuracy_regex_map["acc"] elif benchmark_name == BENCHMARKS.SSDResNet34: cmd = "python3 build/inference/vision/classification_and_detection/tools/accuracy-coco.py --mlperf-accuracy-file {:} \ --coco-dir {:} --output-file build/ssd-resnet34-results.json --use-inv-map".format( log_file, os.path.join(os.environ.get("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco")) regex = accuracy_regex_map["mAP"] elif benchmark_name == BENCHMARKS.SSDMobileNet: cmd = "python3 build/inference/vision/classification_and_detection/tools/accuracy-coco.py --mlperf-accuracy-file {:} \ --coco-dir {:} --output-file build/ssd-mobilenet-results.json".format( log_file, os.path.join(os.environ.get("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco")) regex = accuracy_regex_map["mAP"] elif benchmark_name == BENCHMARKS.BERT: # Having issue installing tokenizers on Xavier... if is_xavier(): cmd = "python3 code/bert/tensorrt/accuracy-bert.py --mlperf-accuracy-file {:} --squad-val-file {:}".format( log_file, os.path.join(os.environ.get("DATA_DIR", "build/data"), "squad", "dev-v1.1.json")) else: dtype = config["precision"].lower() if dtype in dtype_expand_map: dtype = dtype_expand_map[dtype] val_data_path = os.path.join( os.environ.get("DATA_DIR", "build/data"), "squad", "dev-v1.1.json") vocab_file_path = "build/models/bert/vocab.txt" output_prediction_path = os.path.join(os.path.dirname(log_file), "predictions.json") cmd = "python3 build/inference/language/bert/accuracy-squad.py " \ "--log_file {:} --vocab_file {:} --val_data {:} --out_file {:} " \ "--output_dtype {:}".format(log_file, vocab_file_path, val_data_path, output_prediction_path, dtype) regex = accuracy_regex_map["F1"] elif benchmark_name == BENCHMARKS.DLRM: cmd = "python3 build/inference/recommendation/dlrm/pytorch/tools/accuracy-dlrm.py --mlperf-accuracy-file {:} " \ "--day-23-file build/data/criteo/day_23 --aggregation-trace-file " \ "build/preprocessed_data/criteo/full_recalib/sample_partition_trace.txt".format(log_file) regex = accuracy_regex_map["AUC"] elif benchmark_name == BENCHMARKS.RNNT: # Having issue installing librosa on Xavier... if is_xavier(): cmd = "python3 code/rnnt/tensorrt/accuracy.py --loadgen_log {:}".format(log_file) else: # RNNT output indices are in INT8 cmd = "python3 build/inference/speech_recognition/rnnt/accuracy_eval.py " \ "--log_dir {:} --dataset_dir build/preprocessed_data/LibriSpeech/dev-clean-wav " \ "--manifest build/preprocessed_data/LibriSpeech/dev-clean-wav.json " \ "--output_dtype int8".format(os.path.dirname(log_file)) regex = accuracy_regex_map["WER"] elif benchmark_name == BENCHMARKS.UNET: postprocess_dir = "build/brats_postprocessed_data" if not os.path.exists(postprocess_dir): os.makedirs(postprocess_dir) dtype = config["precision"].lower() if dtype in dtype_expand_map: dtype = dtype_expand_map[dtype] cmd = "python3 build/inference/vision/medical_imaging/3d-unet/accuracy-brats.py --log_file {:} " \ "--output_dtype {:} --preprocessed_data_dir build/preprocessed_data/brats/brats_reference_preprocessed " \ "--postprocessed_data_dir {:} " \ "--label_data_dir build/preprocessed_data/brats/brats_reference_raw/Task043_BraTS2019/labelsTr".format(log_file, dtype, postprocess_dir) regex = accuracy_regex_map["DICE"] # Having issue installing nnUnet on Xavier... if is_xavier(): logging.warning( "Accuracy checking for 3DUnet is not supported on Xavier. Please run the following command on desktop:\n{:}".format(cmd)) cmd = 'echo "Accuracy: mean = 1.0000, whole tumor = 1.0000, tumor core = 1.0000, enhancing tumor = 1.0000"' else: raise ValueError("Unknown benchmark: {:}".format(benchmark_name)) output = run_command(cmd, get_output=True) result_regex = re.compile(regex) accuracy = None with open(os.path.join(os.path.dirname(log_file), "accuracy.txt"), "w") as f: for line in output: print(line, file=f) for line in output: result_match = result_regex.match(line) if not result_match is None: accuracy = float(result_match.group(1)) break accuracy_result = "PASSED" if accuracy is not None and accuracy >= threshold else "FAILED" if accuracy_result == "FAILED" and not is_compliance: raise RuntimeError( "Accuracy = {:.3f}, Threshold = {:.3f}. Accuracy test {:}!".format( accuracy, threshold, accuracy_result)) if is_compliance: return accuracy # Needed for numerical comparison return "Accuracy = {:.3f}, Threshold = {:.3f}. Accuracy test {:}.".format( accuracy, threshold, accuracy_result) def handle_calibrate(config): benchmark_name = config["benchmark"] logging.info("Generating calibration cache for Benchmark \"{:}\"".format(benchmark_name)) config = apply_overrides(config, common_args.CALIBRATION_ARGS) config["dla_core"] = None config["force_calibration"] = True b = get_benchmark(config) b.calibrate() def main(main_args, system_id): # Turn off MPS in case it's turned on. turn_off_mps() benchmarks = BENCHMARKS.ALL if main_args["benchmarks"] is not None: benchmarks = main_args["benchmarks"].split(",") for i, benchmark in enumerate(benchmarks): benchmarks[i] = BENCHMARKS.alias(benchmark) scenarios = SCENARIOS.ALL if main_args["scenarios"] is not None: scenarios = main_args["scenarios"].split(",") for i, scenario in enumerate(scenarios): scenarios[i] = SCENARIOS.alias(scenario) profile = main_args.get("profile", None) power = main_args.get("power", False) # Automatically detect architecture and scenarios and load configs config_files = main_args["configs"] if config_files == "" or config_files is None: config_files = find_config_files(benchmarks, scenarios) if config_files == "": logging.warn("Cannot find any valid configs for the specified benchmark-scenario pairs.") return logging.info("Using config files: {:}".format(str(config_files))) configs = load_configs(config_files) for config in configs: base_benchmark_conf = flatten_config(config, system_id) if base_benchmark_conf is None: continue base_benchmark_conf["config_name"] = "{:}_{:}_{:}".format( system_id, base_benchmark_conf["benchmark"], base_benchmark_conf["scenario"] ) logging.info("Processing config \"{:}\"".format(base_benchmark_conf["config_name"])) # Load config_ver / apply overrides conf_vers = main_args.get("config_ver", "default").split(",") # Build default first. This is because some config_vers only modify harness args, and the engine is the same as # default. In this case, we build default first, and copy it instead of rebuilding it. if "default" in conf_vers: conf_vers = ["default"] + list(set(conf_vers) - {"default"}) elif "all" in conf_vers: conf_vers = ["default"] + list(base_benchmark_conf.get("config_ver", {}).keys()) for conf_ver in conf_vers: benchmark_conf = dict(base_benchmark_conf) # Copy the config so we don't modify it # These fields are canonical names that refer to certain config versions benchmark_conf["accuracy_level"] = "99%" benchmark_conf["optimization_level"] = "plugin-enabled" benchmark_conf["inference_server"] = "lwis" """@etcheng NOTE: The original plan was to use a syntax like high_accuracy+triton to be able to combine already defined config_vers. However, since high_accuracy, triton, and high_accuracy+triton are likely to all have different expected QPS values, it makes more sense to keep high_accuracy_triton as a separate, individual config_ver. In the future, perhaps we can make an "extends": [ list of strings ] or { dict of config_ver name -> config_key } field in config_vers, so that we can define new config_vers that extend or combine previous config_vers. """ equiv_to_default = False if conf_ver != "default": if "config_ver" not in benchmark_conf or conf_ver not in benchmark_conf["config_ver"]: logging.warn( "--config_ver={:} does not exist in config file '{:}'".format(conf_ver, benchmark_conf["config_name"])) continue else: if "high_accuracy" in conf_ver: benchmark_conf["accuracy_level"] = "99.9%" if "ootb" in conf_ver: benchmark_conf["optimization_level"] = "ootb" # "inference_server" is set when we run the harness overrides = benchmark_conf["config_ver"][conf_ver] # Check if this config_ver is equivalent to the default engine gen_eng_argset = set(common_args.GENERATE_ENGINE_ARGS) override_argset = set(overrides.keys()) equiv_to_default = (len(gen_eng_argset & override_argset) == 0) benchmark_conf.update(overrides) # Update the config_ver key to be the actual string name, not the overrides benchmark_conf["config_ver"] = conf_ver need_gpu = not main_args["no_gpu"] need_dla = not main_args["gpu_only"] # Override the system_name if it exists if "system_name" in main_args: benchmark_conf["system_name"] = main_args["system_name"] if main_args["action"] == "generate_engines": # Turn on MPS if server scenario and if active_sms is specified. benchmark_conf = apply_overrides(benchmark_conf, ["active_sms"]) active_sms = benchmark_conf.get("active_sms", None) copy_from_default = ("default" in conf_vers) and equiv_to_default if copy_from_default: logging.info( "config_ver={:} only modifies harness args. Re-using default engine.".format(conf_ver)) _gen_args = [benchmark_conf] _gen_kwargs = { "gpu": need_gpu, "dla": need_dla, "copy_from_default": copy_from_default } if not main_args["no_child_process"]: if config["scenario"] == SCENARIOS.Server and active_sms is not None and active_sms < 100: with ScopedMPS(active_sms): launch_handle_generate_engine(*_gen_args, **_gen_kwargs) else: launch_handle_generate_engine(*_gen_args, **_gen_kwargs) else: handle_generate_engine(*_gen_args, **_gen_kwargs) elif main_args["action"] == "run_harness": # In case there's a leftover audit.config file from a prior compliance run or other reason # we need to delete it or we risk silent failure. auditing.cleanup() handle_run_harness(benchmark_conf, need_gpu, need_dla, profile, power) elif main_args["action"] == "run_audit_harness": logging.info('\n\n\nRunning compliance harness for test ' + main_args['audit_test'] + '\n\n\n') # Find the correct audit.config file and move it in current directory dest_config = auditing.load(main_args['audit_test'], benchmark_conf['benchmark']) # Make sure the log_file override is valid os.makedirs("build/compliance_logs", exist_ok=True) # Pass audit test name to handle_run_harness via benchmark_conf benchmark_conf['audit_test_name'] = main_args['audit_test'] # Run harness handle_run_harness(benchmark_conf, need_gpu, need_dla, profile, power, compliance=True) # Cleanup audit.config logging.info("AUDIT HARNESS: Cleaning Up audit.config...") auditing.cleanup() elif main_args["action"] == "run_audit_verification": logging.info("Running compliance verification for test " + main_args['audit_test']) handle_audit_verification(audit_test_name=main_args['audit_test'], config=benchmark_conf) auditing.cleanup() elif main_args["action"] == "calibrate": # To generate calibration cache, we only need to run each benchmark once. # Use offline config. if benchmark_conf["scenario"] == SCENARIOS.Offline: handle_calibrate(benchmark_conf) elif main_args["action"] == "generate_conf_files": handle_run_harness(benchmark_conf, need_gpu, need_dla, generate_conf_files_only=True) if __name__ == "__main__": mp.set_start_method("spawn") # Check any invalid/misspelling flags. common_args.check_args() main_args = common_args.parse_args(common_args.MAIN_ARGS) # Load System ID system_id = get_system_id() logging.info("Detected System ID: " + system_id) main(main_args, system_id)
test_dialect_detection.py
# -*- coding: utf-8 -*- """ Integration tests for dialect detection. Author: G.J.J. van den Burg """ import argparse import chardet import clevercsv import gzip import json import multiprocessing import os import termcolor import time import warnings THIS_DIR = os.path.abspath(os.path.dirname(__file__)) SOURCE_DIR = os.path.join(THIS_DIR, "data") TEST_FILES = os.path.join(SOURCE_DIR, "files") TEST_DIALECTS = os.path.join(SOURCE_DIR, "dialects") LOG_SUCCESS = os.path.join(THIS_DIR, "success.log") LOG_ERROR = os.path.join(THIS_DIR, "error.log") LOG_FAILED = os.path.join(THIS_DIR, "failed.log") LOG_METHOD = os.path.join(THIS_DIR, "method.log") LOG_RUNTIME = os.path.join(THIS_DIR, "runtime.log") LOG_SUCCESS_PARTIAL = os.path.join(THIS_DIR, "success_partial.log") LOG_ERROR_PARTIAL = os.path.join(THIS_DIR, "error_partial.log") LOG_FAILED_PARTIAL = os.path.join(THIS_DIR, "failed_partial.log") LOG_METHOD_PARTIAL = os.path.join(THIS_DIR, "method_partial.log") LOG_RUNTIME_PARTIAL = os.path.join(THIS_DIR, "runtime_partial.log") TIMEOUT = 5 * 60 N_BYTES_PARTIAL = 10000 def log_result(name, kind, verbose, partial): table = { "error": (LOG_ERROR, LOG_ERROR_PARTIAL, "yellow"), "success": (LOG_SUCCESS, LOG_SUCCESS_PARTIAL, "green"), "failure": (LOG_FAILED, LOG_FAILED_PARTIAL, "red"), } outfull, outpartial, color = table.get(kind) fname = outpartial if partial else outfull with open(fname, "a") as fp: fp.write(name + "\n") if verbose: termcolor.cprint(name, color=color) def log_method(name, method, partial): fname = LOG_METHOD_PARTIAL if partial else LOG_METHOD with open(fname, "a") as fp: fp.write(f"{name},{method}\n") def log_runtime(name, runtime, partial): fname = LOG_RUNTIME_PARTIAL if partial else LOG_RUNTIME with open(fname, "a") as fp: fp.write(f"{name},{runtime}\n") def worker(args, return_dict, **kwargs): det = clevercsv.Detector() filename, encoding, partial = args return_dict["error"] = False return_dict["dialect"] = None return_dict["method"] = None return_dict["runtime"] = float("nan") with gzip.open(filename, "rt", newline="", encoding=encoding) as fp: data = fp.read(N_BYTES_PARTIAL) if partial else fp.read() try: t = time.time() return_dict["dialect"] = det.detect(data, **kwargs) return_dict["runtime"] = time.time() - t return_dict["method"] = det.method_ except clevercsv.Error: return_dict["error"] = True def run_with_timeout(args, kwargs, limit): manager = multiprocessing.Manager() return_dict = manager.dict() p = multiprocessing.Process( target=worker, args=(args, return_dict), kwargs=kwargs ) p.start() p.join(limit) if p.is_alive(): p.terminate() return None, True, None, float('nan') return ( return_dict["dialect"], return_dict["error"], return_dict["method"], return_dict["runtime"], ) def run_test(name, gz_filename, annotation, verbose=1, partial=False): if "encoding" in annotation: enc = annotation["encoding"] else: with gzip.open(gz_filename, "rb") as fid: enc = chardet.detect(fid.read())["encoding"] true_dialect = annotation["dialect"] dialect, error, method, runtime = run_with_timeout( (gz_filename, enc, partial), {"verbose": verbose > 1}, TIMEOUT ) if error: return log_result(name, "error", verbose, partial) if dialect is None: log_result(name, "failure", verbose, partial) elif dialect.delimiter != true_dialect["delimiter"]: log_result(name, "failure", verbose, partial) elif dialect.quotechar != true_dialect["quotechar"]: log_result(name, "failure", verbose, partial) elif dialect.escapechar != true_dialect["escapechar"]: log_result(name, "failure", verbose, partial) else: log_result(name, "success", verbose, partial) log_method(name, method, partial) log_runtime(name, runtime, partial) def load_test_cases(): cases = [] for f in sorted(os.listdir(TEST_FILES)): base = f[: -len(".csv.gz")] dialect_file = os.path.join(TEST_DIALECTS, base + ".json") if not os.path.exists(dialect_file): continue filename = os.path.join(TEST_FILES, f) with open(dialect_file, "r") as fid: annotation = json.load(fid) if not annotation["filename"] == f[: -len(".gz")]: warnings.warn( "filename doesn't match! Input file: %s\nDialect file: %s" % (filename, dialect_file) ) continue if annotation["status"] == "skip": continue cases.append((base, filename, annotation)) return cases def clear_output_files(partial): files = { True: [ LOG_SUCCESS_PARTIAL, LOG_FAILED_PARTIAL, LOG_ERROR_PARTIAL, LOG_METHOD_PARTIAL, LOG_RUNTIME_PARTIAL ], False: [LOG_SUCCESS, LOG_FAILED, LOG_ERROR, LOG_METHOD, LOG_RUNTIME], } delete = lambda f: os.unlink(f) if os.path.exists(f) else None any(map(delete, files[partial])) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--partial", help="Run test with partial file data", action="store_true", ) parser.add_argument("-v", "--verbose", help="Be verbose", action="count") return parser.parse_args() def main(): args = parse_args() clear_output_files(args.partial) cases = load_test_cases() for name, gz_filename, annotation in cases: run_test( name, gz_filename, annotation, verbose=args.verbose, partial=args.partial, ) if __name__ == "__main__": main()
tieba_sync.py
#!/usr/bin/env python3 import sys import socket import threading from time import sleep from fetch import fetch_kw from tieba_transport import move from models import TiebaTopic from config import ( TIEBA_SYNC_ON, TIEBA_SYNC_KW, TIEBA_SYNC_INTERVAL, TIEBA_SYNC_DELAY, TIEBA_SYNC_PENDING_MAX, TIEBA_SYNC_OFFSET ) waiting = 0 def sync(): if not TIEBA_SYNC_ON: return # Update recent updated tieba topics, # which is necessary to avoid missing new replies recent_tieba_topics = ( TiebaTopic .select() .order_by(TiebaTopic.last_update_date.desc()) .paginate(1, TIEBA_SYNC_OFFSET) ) kz_updated = {} for recent_topic in recent_tieba_topics: if not kz_updated.get(recent_topic.kz): move(recent_topic.kz) kz_updated[recent_topic.kz] = True # Fetch new topics topics = fetch_kw(TIEBA_SYNC_KW, 1, 1)[:TIEBA_SYNC_OFFSET] topics.reverse() for topic in topics: if not topic['pin'] and not kz_updated.get(topic['kz']): move(topic['kz']) def force_sync(): global waiting if waiting <= TIEBA_SYNC_PENDING_MAX: waiting += 1 def start_sync(): def sync_thread(): global waiting while True: if waiting > 0: waiting -= 1 try: sync() except socket.timeout: pass sleep(TIEBA_SYNC_DELAY) else: sleep(1) def timer_thread(): global waiting while True: waiting += 1 sleep(TIEBA_SYNC_INTERVAL) threading.Thread(target=sync_thread, args=()).start() threading.Thread(target=timer_thread, args=()).start()
statusprocess.py
try: import thread except ImportError: import threading import functools import time import sublime class StatusProcess(object): def __init__(self, msg, listener): self.msg = msg self.listener = listener myThread = threading.Thread(target=self.run_thread) myThread.start() def run_thread(self): progress = "" while self.listener.is_running: if len(progress) >= 10: progress = "" progress += "." sublime.set_timeout(functools.partial(self.listener.update_status, self.msg, progress), 0) time.sleep(0.1)
test_utils.py
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic helper functions useful in tests.""" import atexit import datetime import io import os import requests import shutil import six import socket import subprocess import tempfile import threading import unittest from config import local_config from datastore import data_types from datastore import ndb_init from google_cloud_utils import pubsub from system import environment from system import process_handler CURRENT_TIME = datetime.datetime.utcnow() EMULATOR_TIMEOUT = 20 # Per-process emulator instances. _emulators = {} def create_generic_testcase(created_days_ago=28): """Create a simple test case.""" testcase = data_types.Testcase() # Add more values here as needed. Intended to be the bare minimum for what we # need to simulate a test case. testcase.absolute_path = '/a/b/c/test.html' testcase.crash_address = '0xdeadbeef' testcase.crash_revision = 1 testcase.crash_state = 'crashy_function()' testcase.crash_stacktrace = testcase.crash_state testcase.crash_type = 'fake type' testcase.comments = 'Fuzzer: test' testcase.fuzzed_keys = 'abcd' testcase.minimized_keys = 'efgh' testcase.fuzzer_name = 'fuzzer1' testcase.open = True testcase.one_time_crasher_flag = False testcase.job_type = 'test_content_shell_drt' testcase.status = 'Processed' testcase.timestamp = CURRENT_TIME - datetime.timedelta(days=created_days_ago) testcase.project_name = 'project' testcase.platform = 'linux' testcase.put() return testcase def entities_equal(entity_1, entity_2, check_key=True): """Return a bool on whether two input entities are the same.""" if check_key: return entity_1.key == entity_2.key return entity_1.to_dict() == entity_2.to_dict() def entity_exists(entity): """Return a bool on where the entity exists in datastore.""" return entity.get_by_id(entity.key.id()) def adhoc(func): """Mark the testcase as an adhoc. Adhoc tests are NOT expected to run before merging and are NOT counted toward test coverage; they are used to test tricky situations. Another way to think about it is that, if there was no adhoc test, we would write a Python script (which is not checked in) to test what we want anyway... so, it's better to check in the script. For example, downloading a chrome revision (10GB) and unpacking it. It can be enabled using the env ADHOC=1.""" return unittest.skipIf(not environment.get_value('ADHOC', False), 'Adhoc tests are not enabled.')( func) def integration(func): """Mark the testcase as integration because it depends on network resources and/or is slow. The integration tests should, at least, be run before merging and are counted toward test coverage. It can be enabled using the env INTEGRATION=1.""" return unittest.skipIf(not environment.get_value('INTEGRATION', False), 'Integration tests are not enabled.')( func) def slow(func): """Slow tests which are skipped during presubmit.""" return unittest.skipIf(not environment.get_value('SLOW_TESTS', True), 'Skipping slow tests.')( func) def reproduce_tool(func): """Tests for the test case reproduction script.""" return unittest.skipIf( not environment.get_value('REPRODUCE_TOOL_TESTS', False), 'Skipping reproduce tool tests.')( func) def android_device_required(func): """Skip Android-specific tests if we cannot run them.""" reason = None if not environment.get_value('ANDROID_SERIAL'): reason = 'Android device tests require that ANDROID_SERIAL is set.' elif not environment.get_value('INTEGRATION'): reason = 'Integration tests are not enabled.' elif environment.platform() != 'LINUX': reason = 'Android device tests can only run on a Linux host.' return unittest.skipIf(reason is not None, reason)(func) class EmulatorInstance(object): """Emulator instance.""" def __init__(self, proc, port, read_thread, data_dir): self._proc = proc self._port = port self._read_thread = read_thread self._data_dir = data_dir def cleanup(self): """Stop and clean up the emulator.""" process_handler.terminate_root_and_child_processes(self._proc.pid) self._read_thread.join() if self._data_dir: shutil.rmtree(self._data_dir, ignore_errors=True) def reset(self): """Reset emulator state.""" req = requests.post('http://localhost:{}/reset'.format(self._port)) req.raise_for_status() def _find_free_port(): """Find a free port.""" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('localhost', 0)) _, port = sock.getsockname() sock.close() return port def wait_for_emulator_ready(proc, emulator, indicator, timeout=EMULATOR_TIMEOUT, output_lines=None): """Wait for emulator to be ready.""" def _read_thread(proc, ready_event): """Thread to continuously read from the process stdout.""" ready = False while True: line = proc.stdout.readline() if not line: break if output_lines is not None: output_lines.append(line) if not ready and indicator in line: ready = True ready_event.set() # Wait for process to become ready. ready_event = threading.Event() thread = threading.Thread(target=_read_thread, args=(proc, ready_event)) thread.daemon = True thread.start() if not ready_event.wait(timeout): raise RuntimeError( '{} emulator did not get ready in time.'.format(emulator)) return thread def start_cloud_emulator(emulator, args=None, data_dir=None, store_on_disk=False): """Start a cloud emulator.""" ready_indicators = { 'datastore': b'is now running', 'pubsub': b'Server started', } store_on_disk_flag = ('--store-on-disk' if store_on_disk else '--no-store-on-disk') default_flags = { 'datastore': [store_on_disk_flag, '--consistency=1'], 'pubsub': [], } if emulator not in ready_indicators: raise RuntimeError('Unsupported emulator') if data_dir: cleanup_dir = None else: temp_dir = tempfile.mkdtemp() data_dir = temp_dir cleanup_dir = temp_dir port = _find_free_port() command = [ 'gcloud', 'beta', 'emulators', emulator, 'start', '--data-dir=' + data_dir, '--host-port=localhost:' + str(port), '--project=' + local_config.GAEConfig().get('application_id') ] if args: command.extend(args) command.extend(default_flags[emulator]) # Start emulator. proc = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) thread = wait_for_emulator_ready(proc, emulator, ready_indicators[emulator]) # Set env vars. env_vars = subprocess.check_output([ 'gcloud', 'beta', 'emulators', emulator, 'env-init', '--data-dir=' + data_dir ]) for line in env_vars.splitlines(): key, value = line.split()[1].split(b'=') os.environ[key.strip().decode('utf-8')] = value.strip().decode('utf-8') return EmulatorInstance(proc, port, thread, cleanup_dir) def create_pubsub_topic(client, project, name): """Create topic if it doesn't exist.""" full_name = pubsub.topic_name(project, name) if client.get_topic(full_name): return client.create_topic(full_name) def create_pubsub_subscription(client, project, topic, name): """Create subscription if it doesn't exist.""" topic_name = pubsub.topic_name(project, topic) full_name = pubsub.subscription_name(project, name) if client.get_subscription(full_name): return client.create_subscription(full_name, topic_name) def setup_pubsub(project): """Set up pubsub topics and subscriptions.""" config = local_config.Config('pubsub.queues') client = pubsub.PubSubClient() queues = config.get('resources') for queue in queues: create_pubsub_topic(client, project, queue['name']) create_pubsub_subscription(client, project, queue['name'], queue['name']) def with_cloud_emulators(*emulator_names): """Decorator for starting cloud emulators from a unittest.TestCase.""" def decorator(cls): """Decorator.""" class Wrapped(cls): """Wrapped class.""" @classmethod def setUpClass(cls): """Class setup.""" for emulator_name in emulator_names: if emulator_name not in _emulators: _emulators[emulator_name] = start_cloud_emulator(emulator_name) atexit.register(_emulators[emulator_name].cleanup) if emulator_name == 'datastore': cls._context_generator = ndb_init.context() cls._context_generator.__enter__() super(Wrapped, cls).setUpClass() @classmethod def tearDownClass(cls): """Class teardown.""" for emulator_name in emulator_names: if emulator_name == 'datastore': cls._context_generator.__exit__(None, None, None) super(Wrapped, cls).tearDownClass() def setUp(self): for emulator in six.itervalues(_emulators): emulator.reset() super().setUp() Wrapped.__module__ = cls.__module__ Wrapped.__name__ = cls.__name__ return Wrapped return decorator def set_up_pyfakefs(test_self, allow_root_user=True): """Helper to set up Pyfakefs.""" real_cwd = os.path.realpath(os.getcwd()) config_dir = os.path.realpath(environment.get_config_directory()) test_self.setUpPyfakefs(allow_root_user=allow_root_user) test_self.fs.add_real_directory(config_dir, lazy_read=False) os.chdir(real_cwd) def supported_platforms(*platforms): """Decorator for enabling tests only on certain platforms.""" def decorator(func): # pylint: disable=unused-argument """Decorator.""" return unittest.skipIf(environment.platform() not in platforms, 'Unsupported platform.')( func) return decorator MockStdout = io.StringIO # pylint: disable=invalid-name
stop_gce_instances.py
#!/usr/bin/env python import argparse parser = argparse.ArgumentParser() parser.add_argument("--pat") parser.add_argument("--instances",nargs="+") args = parser.parse_args() from control4.cloud.cloud_interface import load_cloud_config,create_cloud from threading import Thread cloud_config = load_cloud_config(provider='gce') gce_config = cloud_config['gce'] cloud = create_cloud(cloud_config) if args.pat is not None: infos = cloud.list_instances_glob(args.pat) elif args.instances is not None: infos = cloud.list_instances() infos = [info for info in infos if cloud.instance_name(info) in args.instances] else: raise RuntimeError("specify either --pat or --instances") def stop(info): cloud.run_shell_command(info, "killall -s INT python") threads = [Thread(target=stop, args=(info,)) for info in infos] for thread in threads: thread.start() for thread in threads: thread.join()
minion.py
# -*- coding: utf-8 -*- ''' Routines to set up a minion ''' # Import python libs from __future__ import absolute_import from __future__ import print_function import copy import errno import fnmatch import hashlib import logging import multiprocessing import os import re import salt import signal import sys import threading import time import traceback import types from random import randint, shuffle from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin from stat import S_IMODE # Import third party libs try: import zmq HAS_ZMQ = True except ImportError: # Running in local, zmq not needed HAS_ZMQ = False HAS_RANGE = False try: import seco.range HAS_RANGE = True except ImportError: pass HAS_PSUTIL = False try: import psutil HAS_PSUTIL = True except ImportError: pass HAS_RESOURCE = False try: import resource HAS_RESOURCE = True except ImportError: pass try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False # pylint: enable=import-error # Import salt libs from salt.exceptions import ( AuthenticationError, CommandExecutionError, CommandNotFoundError, SaltInvocationError, SaltReqTimeoutError, SaltClientError, SaltSystemExit, SaltSyndicMasterError ) import salt.client import salt.crypt import salt.loader import salt.payload import salt.beacons import salt.utils import salt.utils.jid import salt.pillar import salt.utils.args import salt.utils.event import salt.utils.minion import salt.utils.schedule import salt.utils.error import salt.utils.zeromq import salt.defaults.exitcodes from salt.defaults import DEFAULT_TARGET_DELIM from salt.ext.six import string_types from salt.utils.debug import enable_sigusr1_handler from salt.utils.event import tagify import salt.syspaths log = logging.getLogger(__name__) # To set up a minion: # 1. Read in the configuration # 2. Generate the function mapping dict # 3. Authenticate with the master # 4. Store the AES key # 5. Connect to the publisher # 6. Handle publications def resolve_dns(opts): ''' Resolves the master_ip and master_uri options ''' ret = {} check_dns = True if (opts.get('file_client', 'remote') == 'local' and not opts.get('use_master_when_local', False)): check_dns = False if check_dns is True: # Because I import salt.log below I need to re-import salt.utils here import salt.utils try: ret['master_ip'] = \ salt.utils.dns_check(opts['master'], True, opts['ipv6']) except SaltClientError: if opts['retry_dns']: while True: import salt.log msg = ('Master hostname: \'{0}\' not found. Retrying in {1} ' 'seconds').format(opts['master'], opts['retry_dns']) if salt.log.is_console_configured(): log.error(msg) else: print('WARNING: {0}'.format(msg)) time.sleep(opts['retry_dns']) try: ret['master_ip'] = salt.utils.dns_check( opts['master'], True, opts['ipv6'] ) break except SaltClientError: pass else: ret['master_ip'] = '127.0.0.1' except SaltSystemExit: err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format( opts.get('master', 'Unknown')) log.error(err) raise SaltSystemExit(code=42, msg=err) else: ret['master_ip'] = '127.0.0.1' if 'master_ip' in ret and 'master_ip' in opts: if ret['master_ip'] != opts['master_ip']: log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'], ret['master_ip']) ) ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'], port=opts['master_port']) return ret def get_proc_dir(cachedir, **kwargs): ''' Given the cache directory, return the directory that process data is stored in, creating it if it doesn't exist. The following optional Keyword Arguments are handled: mode: which is anything os.makedir would accept as mode. uid: the uid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this uid. Must be int. Works only on unix/unix like systems. gid: the gid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this gid. Must be int. Works only on unix/unix like systems. ''' fn_ = os.path.join(cachedir, 'proc') mode = kwargs.pop('mode', None) if mode is None: mode = {} else: mode = {'mode': mode} if not os.path.isdir(fn_): # proc_dir is not present, create it with mode settings os.makedirs(fn_, **mode) d_stat = os.stat(fn_) # if mode is not an empty dict then we have an explicit # dir mode. So lets check if mode needs to be changed. if mode: mode_part = S_IMODE(d_stat.st_mode) if mode_part != mode['mode']: os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode']) if hasattr(os, 'chown'): # only on unix/unix like systems uid = kwargs.pop('uid', -1) gid = kwargs.pop('gid', -1) # if uid and gid are both -1 then go ahead with # no changes at all if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \ [i for i in (uid, gid) if i != -1]: os.chown(fn_, uid, gid) return fn_ def parse_args_and_kwargs(func, args, data=None): ''' Wrap load_args_and_kwargs ''' salt.utils.warn_until( 'Boron', 'salt.minion.parse_args_and_kwargs() has been renamed to ' 'salt.minion.load_args_and_kwargs(). Please change this function call ' 'before the Boron release of Salt.' ) return load_args_and_kwargs(func, args, data=data) def load_args_and_kwargs(func, args, data=None): ''' Detect the args and kwargs that need to be passed to a function call, and check them against what was passed. ''' argspec = salt.utils.args.get_function_argspec(func) _args = [] _kwargs = {} invalid_kwargs = [] for arg in args: if isinstance(arg, string_types): string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632 if string_arg: # Don't append the version that was just derived from parse_cli # above, that would result in a 2nd call to # salt.utils.cli.yamlify_arg(), which could mangle the input. _args.append(arg) elif string_kwarg: salt.utils.warn_until( 'Boron', 'The list of function args and kwargs should be parsed ' 'by salt.utils.args.parse_input() before calling ' 'salt.minion.load_args_and_kwargs().' ) if argspec.keywords or next(iter(string_kwarg.keys())) in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs.update(string_kwarg) else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. for key, val in string_kwarg.iteritems(): invalid_kwargs.append('{0}={1}'.format(key, val)) continue # if the arg is a dict with __kwarg__ == True, then its a kwarg elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True: for key, val in arg.items(): if argspec.keywords or key in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs[key] = val else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. invalid_kwargs.append('{0}={1}'.format(key, val)) continue else: _args.append(arg) if invalid_kwargs: raise SaltInvocationError( 'The following keyword arguments are not valid: {0}' .format(', '.join(invalid_kwargs)) ) if argspec.keywords and isinstance(data, dict): # this function accepts **kwargs, pack in the publish data for key, val in data.items(): _kwargs['__pub_{0}'.format(key)] = val return _args, _kwargs class SMinion(object): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def __init__(self, opts): # Late setup of the opts grains, so we can log from the grains module opts['grains'] = salt.loader.grains(opts) self.opts = opts # Clean out the proc directory (default /var/cache/salt/minion/proc) if (self.opts.get('file_client', 'remote') == 'remote' or self.opts.get('use_master_when_local', False)): if isinstance(self.opts['master'], list): masters = self.opts['master'] if self.opts['random_master'] is True: shuffle(masters) for master in masters: self.opts['master'] = master self.opts.update(resolve_dns(opts)) try: self.gen_modules() break except SaltClientError: log.warning(('Attempted to authenticate with master ' '{0} and failed'.format(master))) continue else: if self.opts['random_master'] is True: log.warning('random_master is True but there is only one master specified. Ignoring.') self.opts.update(resolve_dns(opts)) self.gen_modules(initial_load=True) else: self.gen_modules(initial_load=True) def gen_modules(self, initial_load=False): ''' Load all of the modules for the minion ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['environment'] ).compile_pillar() self.functions = salt.loader.minion_mods(self.opts, include_errors=True) # TODO: remove self.function_errors = {} # Keep the funcs clean self.returners = salt.loader.returners(self.opts, self.functions) self.states = salt.loader.states(self.opts, self.functions) self.rend = salt.loader.render(self.opts, self.functions) self.matcher = Matcher(self.opts, self.functions) self.functions['sys.reload_modules'] = self.gen_modules class MinionBase(object): def __init__(self, opts): self.opts = opts def _init_context_and_poller(self): self.context = zmq.Context() self.poller = zmq.Poller() def _prepare_minion_event_system(self): # Prepare the minion event system # # Start with the publish socket self._init_context_and_poller() hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5')) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(self.opts['id']).hexdigest()[:10] epub_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash) ) if os.path.exists(epub_sock_path): os.unlink(epub_sock_path) epull_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pull.ipc'.format(id_hash) ) if os.path.exists(epull_sock_path): os.unlink(epull_sock_path) self.epub_sock = self.context.socket(zmq.PUB) if self.opts.get('ipc_mode', '') == 'tcp': epub_uri = 'tcp://127.0.0.1:{0}'.format( self.opts['tcp_pub_port'] ) epull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts['tcp_pull_port'] ) else: epub_uri = 'ipc://{0}'.format(epub_sock_path) salt.utils.zeromq.check_ipc_path_max_len(epub_uri) epull_uri = 'ipc://{0}'.format(epull_sock_path) salt.utils.zeromq.check_ipc_path_max_len(epull_uri) log.debug( '{0} PUB socket URI: {1}'.format( self.__class__.__name__, epub_uri ) ) log.debug( '{0} PULL socket URI: {1}'.format( self.__class__.__name__, epull_uri ) ) # Check to make sure the sock_dir is available, create if not default_minion_sock_dir = os.path.join( salt.syspaths.SOCK_DIR, 'minion' ) minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir) if not os.path.isdir(minion_sock_dir): # Let's try to create the directory defined on the configuration # file try: os.makedirs(minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: {0}'.format(exc)) # Let's not fail yet and try using the default path if minion_sock_dir == default_minion_sock_dir: # We're already trying the default system path, stop now! raise if not os.path.isdir(default_minion_sock_dir): try: os.makedirs(default_minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: {0}'.format(exc)) # Let's stop at this stage raise # Create the pull socket self.epull_sock = self.context.socket(zmq.PULL) # Securely bind the event sockets if self.opts.get('ipc_mode', '') != 'tcp': old_umask = os.umask(0o177) try: log.info('Starting pub socket on {0}'.format(epub_uri)) self.epub_sock.bind(epub_uri) log.info('Starting pull socket on {0}'.format(epull_uri)) self.epull_sock.bind(epull_uri) finally: if self.opts.get('ipc_mode', '') != 'tcp': os.umask(old_umask) @staticmethod def process_schedule(minion, loop_interval): try: minion.schedule.eval() # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error( 'Exception {0} occurred in scheduled job'.format(exc) ) return loop_interval def process_beacons(self, functions): ''' Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' if 'config.merge' in functions: b_conf = functions['config.merge']('beacons') if b_conf: return self.beacons.process(b_conf) return [] class MasterMinion(object): ''' Create a fully loaded minion function object for generic use on the master. What makes this class different is that the pillar is omitted, otherwise everything else is loaded cleanly. ''' def __init__( self, opts, returners=True, states=True, rend=True, matcher=True, whitelist=None): self.opts = salt.config.minion_config(opts['conf_file']) self.opts.update(opts) self.whitelist = whitelist self.opts['grains'] = salt.loader.grains(opts) self.opts['pillar'] = {} self.mk_returners = returners self.mk_states = states self.mk_rend = rend self.mk_matcher = matcher self.gen_modules(initial_load=True) def gen_modules(self, initial_load=False): ''' Load all of the modules for the minion ''' self.functions = salt.loader.minion_mods( self.opts, whitelist=self.whitelist, initial_load=initial_load) if self.mk_returners: self.returners = salt.loader.returners(self.opts, self.functions) if self.mk_states: self.states = salt.loader.states(self.opts, self.functions) if self.mk_rend: self.rend = salt.loader.render(self.opts, self.functions) if self.mk_matcher: self.matcher = Matcher(self.opts, self.functions) self.functions['sys.reload_modules'] = self.gen_modules class MultiMinion(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' # timeout for one of the minions to auth with a master MINION_CONNECT_TIMEOUT = 5 def __init__(self, opts): super(MultiMinion, self).__init__(opts) def minions(self): ''' Return a dict of minion generators bound to the tune_in method dict of master -> minion_mapping, the mapping contains: opts: options used to create the minion last: last auth attempt time auth_wait: time to wait for next auth attempt minion: minion object generator: generator function (non-blocking tune_in) ''' if not isinstance(self.opts['master'], list): log.error( 'Attempting to start a multimaster system with one master') sys.exit(salt.defaults.exitcodes.EX_GENERIC) ret = {} for master in set(self.opts['master']): s_opts = copy.deepcopy(self.opts) s_opts['master'] = master s_opts['multimaster'] = True ret[master] = {'opts': s_opts, 'last': time.time(), 'auth_wait': s_opts['acceptance_wait_time']} try: minion = Minion( s_opts, self.MINION_CONNECT_TIMEOUT, False, 'salt.loader.{0}'.format(master)) ret[master]['minion'] = minion ret[master]['generator'] = minion.tune_in_no_block() except SaltClientError as exc: log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(master)) return ret # Multi Master Tune In def tune_in(self): ''' Bind to the masters This loop will attempt to create connections to masters it hasn't connected to yet, but once the initial connection is made it is up to ZMQ to do the reconnect (don't know of an API to get the state here in salt) ''' self._prepare_minion_event_system() self.poller.register(self.epull_sock, zmq.POLLIN) # Prepare the minion generators minions = self.minions() loop_interval = int(self.opts['loop_interval']) auth_wait = self.opts['acceptance_wait_time'] max_wait = self.opts['acceptance_wait_time_max'] while True: package = None for minion in minions.values(): if isinstance(minion, dict): if 'minion' in minion: minion = minion['minion'] else: continue if not hasattr(minion, 'schedule'): continue loop_interval = self.process_schedule(minion, loop_interval) socks = dict(self.poller.poll(1)) if socks.get(self.epull_sock) == zmq.POLLIN: try: package = self.epull_sock.recv(zmq.NOBLOCK) except Exception: pass masters = list(minions.keys()) shuffle(masters) # Do stuff per minion that we have for master in masters: minion = minions[master] # if we haven't connected yet, lets attempt some more. # make sure to keep separate auth_wait times, since these # are separate masters if 'generator' not in minion: if time.time() - minion['auth_wait'] > minion['last']: minion['last'] = time.time() if minion['auth_wait'] < max_wait: minion['auth_wait'] += auth_wait try: t_minion = Minion(minion['opts'], self.MINION_CONNECT_TIMEOUT, False) minions[master]['minion'] = t_minion minions[master]['generator'] = t_minion.tune_in_no_block() minions[master]['auth_wait'] = self.opts['acceptance_wait_time'] except SaltClientError: log.error('Error while bring up minion for multi-master. Is master {0} responding?'.format(master)) continue else: continue # run scheduled jobs if you have them loop_interval = self.process_schedule(minion['minion'], loop_interval) # If a minion instance receives event, handle the event on all # instances if package: try: for master in masters: minions[master].handle_event(package) except Exception: pass finally: package = None # have the Minion class run anything it has to run next(minion['generator']) class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None): # pylint: disable=W0231 ''' Pass in the options dict ''' self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name # Warn if ZMQ < 3.2 if HAS_ZMQ: try: zmq_version_info = zmq.zmq_version_info() except AttributeError: # PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to # using zmq.zmq_version() and build a version info tuple. zmq_version_info = tuple( [int(x) for x in zmq.zmq_version().split('.')] ) if zmq_version_info < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup the of the opts grains, so we can log from the grains # module opts['grains'] = salt.loader.grains(opts) # evaluate the master to connect to and authenticate with it opts['master'] = self.eval_master(opts, timeout, safe) self.opts['pillar'] = salt.pillar.get_pillar( opts, opts['grains'], opts['id'], opts['environment'] ).compile_pillar() self.functions, self.returners, self.function_errors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() self.matcher = Matcher(self.opts, self.functions) self.beacons = salt.beacons.Beacon(opts, self.functions) uid = salt.utils.get_uid(user=opts.get('user', None)) self.proc_dir = get_proc_dir(opts['cachedir'], uid=uid) self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners) # add default scheduling jobs to the minions scheduler if 'mine.update' in self.functions: log.info('Added mine.update to scheduler') self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': opts['mine_interval'], 'jid_include': True, 'maxrunning': 2 } }) # add master_alive job if enabled if self.opts['master_alive_interval'] > 0: self.schedule.add_job({ '__master_alive': { 'function': 'status.master', 'seconds': opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'kwargs': {'master': self.opts['master'], 'connected': True} } }) self.grains_cache = self.opts['grains'] # store your hexid to subscribe to zmq, hash since zmq filters are prefix # matches this way we can avoid collisions self.hexid = hashlib.sha1(self.opts['id']).hexdigest() if 'proxy' in self.opts['pillar']: log.debug('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'], self.opts['pillar']['proxy'])) for p in self.opts['pillar']['proxy']: log.debug('Starting {0} proxy.'.format(p)) pid = os.fork() if pid > 0: continue else: proxyminion = salt.ProxyMinion() proxyminion.start(self.opts['pillar']['proxy'][p]) self.clean_die(signal.SIGTERM, None) else: log.debug('I am {0} and I am not supposed to start any proxies. ' '(Likely not a problem)'.format(self.opts['id'])) # __init__() from MinionBase is called in Minion.eval_master() def eval_master(self, opts, timeout=60, safe=True, failed=False): ''' Evaluates and returns the current master address. In standard mode, just calls authenticate() with the given master address. With master_type=func evaluates the current master address from the given module and then calls authenticate(). With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to connect is used to authenticate() and then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # check if master_type was altered from its default if opts['master_type'] != 'str': # check for a valid keyword if opts['master_type'] == 'func': # split module and function and try loading the module mod, fun = opts['master'].split('.') try: master_mod = salt.loader.raw_mod(opts, mod, fun) if not master_mod: raise TypeError # we take whatever the module returns as master address opts['master'] = master_mod[mod + '.' + fun]() except TypeError: msg = ('Failed to evaluate master address from ' 'module \'{0}\''.format(opts['master'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) log.info('Evaluated master from module: {0}'.format(master_mod)) # if failover is set, master has to be of type list elif opts['master_type'] == 'failover': if isinstance(opts['master'], list): log.info('Got list of available master addresses:' ' {0}'.format(opts['master'])) if opts['master_shuffle']: shuffle(opts['master']) elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master'])) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: log.info('Removing possibly failed master {0} from list of' ' masters'.format(opts['master'])) # create new list of master with the possibly failed one removed opts['master'] = [x for x in opts['master_list'] if opts['master'] != x] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False # shuffle the masters and then loop through them local_masters = copy.copy(opts['master']) for master in local_masters: opts['master'] = master opts.update(resolve_dns(opts)) super(Minion, self).__init__(opts) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in self.opts: self.opts['master_list'] = local_masters try: if self.authenticate(timeout, safe) != 'full': conn = True break except SaltClientError: msg = ('Master {0} could not be reached, trying ' 'next master (if any)'.format(opts['master'])) log.info(msg) continue if not conn: self.connected = False msg = ('No master could be reached or all masters denied ' 'the minions connection attempt.') log.error(msg) else: self.connected = True return opts['master'] # single master sign in else: opts.update(resolve_dns(opts)) super(Minion, self).__init__(opts) if self.authenticate(timeout, safe) == 'full': self.connected = False msg = ('master {0} rejected the minions connection because too ' 'many minions are already connected.'.format(opts['master'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) else: self.connected = True return opts['master'] def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in self.opts.items(): if key == 'logger': continue mod_opts[key] = val return mod_opts def _process_beacons(self): ''' Process each beacon and send events if appropriate ''' # Process Beacons try: beacons = self.process_beacons(self.functions) except Exception as exc: log.critical('Beacon processing errored: {0}. No beacons will be procssed.'.format(traceback.format_exc(exc))) beacons = None if beacons: self._fire_master(events=beacons) for beacon in beacons: serialized_data = salt.utils.dicttrim.trim_dict( self.serial.dumps(beacon['data']), self.opts.get('max_event_size', 1048576), is_msgpacked=True, ) log.debug('Sending event - data = {0}'.format(beacon['data'])) event = '{0}{1}{2}'.format( beacon['tag'], salt.utils.event.TAGEND, serialized_data) self.handle_event(event) self.epub_sock.send(event) def _load_modules(self, force_refresh=False, notify=False): ''' Return the functions and the returners loaded up from the loader module ''' # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory'])) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).get_memory_info() mem_limit = rss + vms + self.opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif self.opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') self.opts['grains'] = salt.loader.grains(self.opts, force_refresh) if self.opts.get('multimaster', False): s_opts = copy.deepcopy(self.opts) functions = salt.loader.minion_mods(s_opts, loaded_base_name=self.loaded_base_name) else: functions = salt.loader.minion_mods(self.opts) returners = salt.loader.returners(self.opts, functions) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') functions.clear() returners.clear() # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) return functions, returners, errors def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return channel = salt.transport.Channel.factory(self.opts) try: result = channel.send(load, timeout=timeout) return True except Exception: log.info('fire_master failed: {0}'.format(traceback.format_exc())) return False def _handle_payload(self, payload): ''' Takes a payload from the master publisher and does whatever the master wants done. ''' {'aes': self._handle_aes, 'pub': self._handle_pub, 'clear': self._handle_clear}[payload['enc']](payload['load'], payload['sig'] if 'sig' in payload else None) def _handle_aes(self, load, sig=None): ''' Takes the AES encrypted load, checks the signature if pub signatures are turned on, decrypts it, and runs the encapsulated instructions ''' # Verify that the signature is valid master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub') if sig and self.functions['config.get']('sign_pub_messages'): if not salt.crypt.verify_signature(master_pubkey_path, load, sig): raise AuthenticationError('Message signature failed to validate.') try: data = self.crypticle.loads(load) except AuthenticationError: # decryption of the payload failed, try to re-auth self.authenticate() data = self.crypticle.loads(load) # Verify that the publication is valid if 'tgt' not in data or 'jid' not in data or 'fun' not in data \ or 'arg' not in data: return # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in data: match_func = getattr(self.matcher, '{0}_match'.format(data['tgt_type']), None) if match_func is None: return if data['tgt_type'] in ('grain', 'grain_pcre', 'pillar', 'pillar_pcre'): delimiter = data.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(data['tgt'], delimiter=delimiter): return elif not match_func(data['tgt']): return else: if not self.matcher.glob_match(data['tgt']): return # If the minion does not have the function, don't execute, # this prevents minions that could not load a minion module # from returning a predictable exception #if data['fun'] not in self.functions: # return if 'user' in data: log.info( 'User {0[user]} Executing command {0[fun]} with jid ' '{0[jid]}'.format(data) ) else: log.info( 'Executing command {0[fun]} with jid {0[jid]}'.format(data) ) log.debug('Command details {0}'.format(data)) self._handle_decoded_payload(data) def _handle_pub(self, load): ''' Handle public key payloads ''' pass def _handle_clear(self, load, sig=None): ''' Handle un-encrypted transmissions ''' pass def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' if isinstance(data['fun'], string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): target = Minion._thread_multi_return else: target = Minion._thread_return # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self if self.opts['multiprocessing']: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None process = multiprocessing.Process( target=target, args=(instance, self.opts, data) ) else: process = threading.Thread( target=target, args=(instance, self.opts, data), name=data['jid'] ) process.start() if not sys.platform.startswith('win'): process.join() else: self.win_proc.append(process) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' # this seems awkward at first, but it's a workaround for Windows # multiprocessing communication. if not minion_instance: minion_instance = cls(opts) fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing']: salt.utils.daemonize_if(opts) salt.utils.appendproctitle(data['jid']) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID {0}'.format(sdata['pid'])) with salt.utils.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] if function_name in minion_instance.functions: try: func = minion_instance.functions[data['fun']] args, kwargs = load_args_and_kwargs( func, data['arg'], data) minion_instance.functions.pack['__context__']['retcode'] = 0 if opts.get('sudo_user', ''): sudo_runas = opts.get('sudo_user') if 'sudo.salt_call' in minion_instance.functions: return_data = minion_instance.functions['sudo.salt_call']( sudo_runas, data['fun'], *args, **kwargs) else: return_data = func(*args, **kwargs) if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data ret['retcode'] = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) ret['success'] = True except CommandNotFoundError as exc: msg = 'Command required for {0!r} not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' except CommandExecutionError as exc: log.error( 'A command in {0!r} had a problem: {1}'.format( function_name, exc ), exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' except SaltInvocationError as exc: log.error( 'Problem executing {0!r}: {1}'.format( function_name, exc ), exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing {0!r}: {1}'.format( function_name, exc ) ret['out'] = 'nested' except TypeError as exc: msg = ('TypeError encountered executing {0}: {1}. See ' 'debug log for more info.').format(function_name, exc) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=logging.DEBUG) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: {0!r}'.format(minion_instance.function_errors[mod_name]) ret['success'] = False ret['retcode'] = 254 ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') minion_instance._return_pub(ret) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job {0} {1}'.format( data['jid'], exc ) ) log.error(traceback.format_exc()) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' salt.utils.appendproctitle(data['jid']) # this seems awkward at first, but it's a workaround for Windows # multiprocessing communication. if not minion_instance: minion_instance = cls(opts) ret = { 'return': {}, 'success': {}, } for ind in range(0, len(data['fun'])): ret['success'][data['fun'][ind]] = False try: func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) ret['return'][data['fun'][ind]] = func(*args, **kwargs) ret['success'][data['fun'][ind]] = True except Exception as exc: trb = traceback.format_exc() log.warning( 'The minion function caused an exception: {0}'.format( exc ) ) ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] minion_instance._return_pub(ret) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job {0} {1}'.format( data['jid'], exc ) ) def _return_pub(self, ret, ret_cmd='_return', timeout=60): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: {0}'.format(jid)) channel = salt.transport.Channel.factory(self.opts) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in ret.items(): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in list(ret.items()): load[key] = value if 'out' in ret: if isinstance(ret['out'], string_types): load['out'] = ret['out'] else: log.error('Invalid outputter {0}. This is likely a bug.' .format(ret['out'])) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled fn_ = os.path.join( self.opts['cachedir'], 'minion_jobs', load['jid'], 'return.p') jdir = os.path.dirname(fn_) if not os.path.isdir(jdir): os.makedirs(jdir) salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret)) try: ret_val = channel.send(load, timeout=timeout) except SaltReqTimeoutError: msg = ('The minion failed to return the job information for job ' '{0}. This is often due to the master being shut down or ' 'overloaded. If the master is running consider increasing ' 'the worker_threads value.').format(jid) log.warn(msg) return '' log.trace('ret_val = {0}'.format(ret_val)) return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _set_tcp_keepalive(self): if hasattr(zmq, 'TCP_KEEPALIVE'): self.socket.setsockopt( zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive'] ) self.socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle'] ) self.socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt'] ) self.socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl'] ) def _set_monitor_socket(self): if not HAS_ZMQ_MONITOR or not self.opts['zmq_monitor']: return self.monitor_socket = self.socket.get_monitor_socket() t = threading.Thread(target=self._socket_monitor, args=(self.monitor_socket,)) t.start() def _socket_monitor(self, monitor): event_map = {} for name in dir(zmq): if name.startswith('EVENT_'): value = getattr(zmq, name) event_map[value] = name while monitor.poll(): evt = zmq.utils.monitor.recv_monitor_message(monitor) evt.update({'description': event_map[evt['event']]}) log.debug("ZeroMQ event: {0}".format(evt)) if evt['event'] == zmq.EVENT_MONITOR_STOPPED: break monitor.close() log.trace("event monitor thread done!") def _set_reconnect_ivl(self): recon_delay = self.opts['recon_default'] if self.opts['recon_randomize']: recon_delay = randint(self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'] ) log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format( self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'], recon_delay) ) log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay)) self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay) def _set_reconnect_ivl_max(self): if hasattr(zmq, 'RECONNECT_IVL_MAX'): log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format( self.opts['recon_default'] + self.opts['recon_max']) ) self.socket.setsockopt( zmq.RECONNECT_IVL_MAX, self.opts['recon_max'] ) def _set_ipv4only(self): if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self.socket.setsockopt(zmq.IPV4ONLY, 0) def _fire_master_minion_start(self): # Send an event to the master that the minion is live self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # dup name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def _setsockopts(self): if self.opts['zmq_filtering']: # TODO: constants file for "broadcast" self.socket.setsockopt(zmq.SUBSCRIBE, 'broadcast') self.socket.setsockopt(zmq.SUBSCRIBE, self.hexid) else: self.socket.setsockopt(zmq.SUBSCRIBE, '') self.socket.setsockopt(zmq.IDENTITY, self.opts['id']) self._set_ipv4only() self._set_reconnect_ivl_max() self._set_tcp_keepalive() @property def master_pub(self): ''' Return the master publish port ''' return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'], port=self.publish_port) def authenticate(self, timeout=60, safe=True): ''' Authenticate with the master, this method breaks the functional paradigm, it will update the master information from a fresh sign in, signing in can occur as often as needed to keep up with the revolving master AES key. ''' log.debug( 'Attempting to authenticate with the Salt Master at {0}'.format( self.opts['master_ip'] ) ) auth = salt.crypt.SAuth(self.opts) auth.authenticate(timeout=timeout, safe=safe) # TODO: remove these and just use a local reference to auth?? self.tok = auth.gen_token('salt') self.crypticle = auth.crypticle if self.opts.get('syndic_master_publish_port'): self.publish_port = self.opts.get('syndic_master_publish_port') else: self.publish_port = auth.creds['publish_port'] def module_refresh(self, force_refresh=False): ''' Refresh the functions and returners. ''' self.functions, self.returners, _ = self._load_modules(force_refresh) self.schedule.functions = self.functions self.schedule.returners = self.returners def pillar_refresh(self, force_refresh=False): ''' Refresh the pillar ''' try: self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['environment'], ).compile_pillar() except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') self.module_refresh(force_refresh) def manage_schedule(self, package): ''' Refresh the functions and returners. ''' tag, data = salt.utils.event.MinionEvent.unpack(package) func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) if func == 'delete': self.schedule.delete_job(name) elif func == 'add': self.schedule.add_job(schedule) elif func == 'modify': self.schedule.modify_job(name, schedule, where) elif func == 'enable': self.schedule.enable_schedule() elif func == 'disable': self.schedule.disable_schedule() elif func == 'enable_job': self.schedule.enable_job(name, where) elif func == 'run_job': self.schedule.run_job(name, where) elif func == 'disable_job': self.schedule.disable_job(name, where) elif func == 'reload': self.schedule.reload(schedule) def environ_setenv(self, package): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' tag, data = salt.utils.event.MinionEvent.unpack(package) environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def clean_die(self, signum, frame): ''' Python does not handle the SIGTERM cleanly, if it is signaled exit the minion process cleanly ''' self._running = False exit(0) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This {0} was scheduled to stop. Not running ' '{0}.tune_in()'.format(self.__class__.__name__) ) return elif self._running is True: log.error( 'This {0} is already running. Not running ' '{0}.tune_in()'.format(self.__class__.__name__) ) return try: log.info( '{0} is starting as user \'{1}\''.format( self.__class__.__name__, salt.utils.get_user() ) ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting {0}'.format( self.__class__.__name__ ), exc_info=err ) def _mine_send(self, package): ''' Send mine data to the master ''' channel = salt.transport.Channel.factory(self.opts) load = salt.utils.event.SaltEvent.unpack(package)[1] ret = channel.send(load) return ret def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' log.debug('Handling event {0!r}'.format(package)) if package.startswith('module_refresh'): self.module_refresh() elif package.startswith('pillar_refresh'): self.pillar_refresh() elif package.startswith('manage_schedule'): self.manage_schedule(package) elif package.startswith('grains_refresh'): if self.grains_cache != self.opts['grains']: self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] elif package.startswith('environ_setenv'): self.environ_setenv(package) elif package.startswith('_minion_mine'): self._mine_send(package) elif package.startswith('fire_master'): tag, data = salt.utils.event.MinionEvent.unpack(package) log.debug('Forwarding master event tag={tag}'.format(tag=data['tag'])) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) elif package.startswith('__master_disconnected'): tag, data = salt.utils.event.MinionEvent.unpack(package) # if the master disconnect event is for a different master, raise an exception if data['master'] != self.opts['master']: raise Exception() if self.connected: # we are not connected anymore self.connected = False # modify the scheduled job to fire only on reconnect schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 2, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name='__master_alive', schedule=schedule) log.info('Connection to master {0} lost'.format(self.opts['master'])) if self.opts['master_type'] == 'failover': log.info('Trying to tune in to next master from master-list') # if eval_master finds a new master for us, self.connected # will be True again on successfull master authentication self.opts['master'] = self.eval_master(opts=self.opts, failed=True) if self.connected: # re-init the subsystems to work with the new master log.info('Re-initialising subsystems for new ' 'master {0}'.format(self.opts['master'])) del self.socket del self.context del self.poller self._init_context_and_poller() self.socket = self.context.socket(zmq.SUB) self._set_reconnect_ivl() self._setsockopts() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) self.poller.register(self.epull_sock, zmq.POLLIN) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 2, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name='__master_alive', schedule=schedule) elif package.startswith('__master_connected'): # handle this event only once. otherwise it will pollute the log if not self.connected: log.info('Connection to master {0} re-established'.format(self.opts['master'])) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 2, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name='__master_alive', schedule=schedule) elif package.startswith('_salt_error'): tag, data = salt.utils.event.MinionEvent.unpack(package) log.debug('Forwarding salt error event tag={tag}'.format(tag=tag)) self._fire_master(data, tag) def _windows_thread_cleanup(self): ''' Cleanup Windows threads ''' if not salt.utils.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass # Main Minion Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() # Properly exit if a SIGTERM is signalled signal.signal(signal.SIGTERM, self.clean_die) log.debug('Minion {0!r} trying to tune in'.format(self.opts['id'])) self._prepare_minion_event_system() self.socket = self.context.socket(zmq.SUB) self._set_reconnect_ivl() self._setsockopts() self._set_monitor_socket() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) self.poller.register(self.epull_sock, zmq.POLLIN) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT salt.utils.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() loop_interval = int(self.opts['loop_interval']) try: if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds! if self.opts['grains_refresh_every'] > 1: log.debug( 'Enabling the grains refresher. Will run every {0} minutes.'.format( self.opts['grains_refresh_every']) ) else: # Clean up minute vs. minutes in log message log.debug( 'Enabling the grains refresher. Will run every {0} minute.'.format( self.opts['grains_refresh_every']) ) self._refresh_grains_watcher( abs(self.opts['grains_refresh_every']) ) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format( exc) ) ping_interval = self.opts.get('ping_interval', 0) * 60 ping_at = None while self._running is True: loop_interval = self.process_schedule(self, loop_interval) self._windows_thread_cleanup() try: socks = self._do_poll(loop_interval) if ping_interval > 0: if socks or not ping_at: ping_at = time.time() + ping_interval if ping_at < time.time(): log.debug('Ping master') self._fire_master('ping', 'minion_ping') ping_at = time.time() + ping_interval self._do_socket_recv(socks) self._do_event_poll(socks) self._process_beacons() except zmq.ZMQError as exc: # The interrupt caused by python handling the # SIGCHLD. Throws this error with errno == EINTR. # Nothing to receive on the zmq socket throws this error # with EAGAIN. # Both are safe to ignore if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR: log.critical('Unexpected ZMQError while polling minion', exc_info=True) continue except SaltClientError: raise except Exception: log.critical( 'An exception occurred while polling the minion', exc_info=True ) def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' self._pre_tune() self._init_context_and_poller() self.socket = self.context.socket(zmq.SUB) self._setsockopts() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) self._fire_master_minion_start() loop_interval = int(self.opts['loop_interval']) # On first startup execute a state run if configured to do so self._state_run() while self._running is True: try: socks = self._do_poll(loop_interval) self._do_socket_recv(socks) # Check the event system except zmq.ZMQError: # If a zeromq error happens recover yield True except Exception: log.critical( 'An exception occurred while polling the minion', exc_info=True ) yield True def _do_poll(self, loop_interval): log.trace('Check main poller timeout {0}'.format(loop_interval)) return dict(self.poller.poll( loop_interval * 1000) ) def _do_event_poll(self, socks): # Check the event system if socks.get(self.epull_sock) == zmq.POLLIN: package = self.epull_sock.recv(zmq.NOBLOCK) try: self.handle_event(package) self.epub_sock.send(package) except Exception: log.debug('Exception while handling events', exc_info=True) # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() def _do_socket_recv(self, socks): if socks.get(self.socket) == zmq.POLLIN: # topic filtering is done at the zmq level, so we just strip it messages = self.socket.recv_multipart(zmq.NOBLOCK) messages_len = len(messages) # if it was one message, then its old style if messages_len == 1: payload = self.serial.loads(messages[0]) # 2 includes a header which says who should do it elif messages_len == 2: payload = self.serial.loads(messages[1]) else: raise Exception(('Invalid number of messages ({0}) in zeromq pub' 'message from master').format(len(messages_len))) log.trace('Handling payload') self._handle_payload(payload) def destroy(self): ''' Tear down the minion ''' self._running = False if getattr(self, 'poller', None) is not None: if isinstance(self.poller.sockets, dict): for socket in self.poller.sockets.keys(): if socket.closed is False: socket.close() self.poller.unregister(socket) else: for socket in self.poller.sockets: if socket[0].closed is False: socket[0].close() self.poller.unregister(socket[0]) if hasattr(self, 'epub_sock') and self.epub_sock.closed is False: self.epub_sock.close() if hasattr(self, 'epull_sock') and self.epull_sock.closed is False: self.epull_sock.close() if hasattr(self, 'socket') and self.socket.closed is False: self.socket.close() if hasattr(self, 'context') and self.context.closed is False: self.context.term() def __del__(self): self.destroy() class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() def _handle_aes(self, load, sig=None): ''' Takes the AES encrypted load, decrypts it, and runs the encapsulated instructions ''' # If the AES authentication has changed, re-authenticate try: data = self.crypticle.loads(load) except AuthenticationError: self.authenticate() data = self.crypticle.loads(load) # Verify that the publication is valid if 'tgt' not in data or 'jid' not in data or 'fun' not in data \ or 'arg' not in data: return data['to'] = int(data.get('to', self.opts['timeout'])) - 1 if 'user' in data: log.debug( 'User {0[user]} Executing syndic command {0[fun]} with ' 'jid {0[jid]}'.format( data ) ) else: log.debug( 'Executing syndic command {0[fun]} with jid {0[jid]}'.format( data ) ) log.debug('Command details: {0}'.format(data)) self._handle_decoded_payload(data) def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Only forward the command if it didn't originate from ourselves if data.get('master_id', 0) != self.opts.get('master_id', 1): self.syndic_cmd(data) def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] try: # Send out the publication self.local.pub(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], **kwargs) except Exception as exc: log.warning('Unable to forward pub data: {0}'.format(exc)) def _setsockopts(self): # no filters for syndication masters, unless we want to maintain a # list of all connected minions and update the filter self.socket.setsockopt(zmq.SUBSCRIBE, '') self.socket.setsockopt(zmq.IDENTITY, self.opts['id']) self._set_reconnect_ivl_max() self._set_tcp_keepalive() self._set_ipv4only() def _fire_master_syndic_start(self): # Send an event to the master that the minion is live self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start' ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), ) def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client(self.opts['_minion_conf_file']) self.local.event.subscribe('') self._init_context_and_poller() self.socket = self.context.socket(zmq.SUB) self._setsockopts() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) loop_interval = int(self.opts['loop_interval']) self._fire_master_syndic_start() while True: try: socks = dict(self.poller.poll(loop_interval * 1000)) if socks.get(self.socket) == zmq.POLLIN: self._process_cmd_socket() except zmq.ZMQError: yield True except Exception: log.critical( 'An exception occurred while polling the minion', exc_info=True ) yield True # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' signal.signal(signal.SIGTERM, self.clean_die) log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id'])) self._init_context_and_poller() # Instantiate the local client self.local = salt.client.get_local_client(self.opts['_minion_conf_file']) self.local.event.subscribe('') self.local.opts['interface'] = self._syndic_interface # register the event sub to the poller self.poller.register(self.local.event.sub) # Start with the publish socket # Share the poller with the event object self.socket = self.context.socket(zmq.SUB) self._setsockopts() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) # Send an event to the master that the minion is live self._fire_master_syndic_start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() loop_interval = int(self.opts['loop_interval']) self._reset_event_aggregation() while True: try: # Do all the maths in seconds timeout = loop_interval if self.event_forward_timeout is not None: timeout = min(timeout, self.event_forward_timeout - time.time()) if timeout >= 0: log.trace('Polling timeout: %f', timeout) socks = dict(self.poller.poll(timeout * 1000)) else: # This shouldn't really happen. # But there's no harm being defensive log.warning('Negative timeout in syndic main loop') socks = {} if socks.get(self.socket) == zmq.POLLIN: self._process_cmd_socket() if socks.get(self.local.event.sub) == zmq.POLLIN: self._process_event_socket() if self.event_forward_timeout is not None and \ self.event_forward_timeout < time.time(): self._forward_events() # We don't handle ZMQErrors like the other minions # I've put explicit handling around the receive calls # in the process_*_socket methods. If we see any other # errors they may need some kind of handling so log them # for now. except Exception: log.critical( 'An exception occurred while polling the syndic', exc_info=True ) def _process_cmd_socket(self): try: messages = self.socket.recv_multipart(zmq.NOBLOCK) messages_len = len(messages) idx = None if messages_len == 1: idx = 0 elif messages_len == 2: idx = 1 else: raise SaltSyndicMasterError('Syndication master received message of invalid len ({0}/2)'.format(messages_len)) payload = self.serial.loads(messages[idx]) except zmq.ZMQError as e: # Swallow errors for bad wakeups or signals needing processing if e.errno != errno.EAGAIN and e.errno != errno.EINTR: raise log.trace('Handling payload') self._handle_payload(payload) def _reset_event_aggregation(self): self.jids = {} self.raw_events = [] self.event_forward_timeout = None def _process_event_socket(self): tout = time.time() + self.opts['syndic_max_event_process_time'] while tout > time.time(): try: event = self.local.event.get_event_noblock() except zmq.ZMQError as e: # EAGAIN indicates no more events at the moment # EINTR some kind of signal maybe someone trying # to get us to quit so escape our timeout if e.errno == errno.EAGAIN or e.errno == errno.EINTR: break raise log.trace('Got event {0}'.format(event['tag'])) if self.event_forward_timeout is None: self.event_forward_timeout = ( time.time() + self.opts['syndic_event_forward_timeout'] ) tag_parts = event['tag'].split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in event['data']: if 'jid' not in event['data']: # Not a job return continue jdict = self.jids.setdefault(event['tag'], {}) if not jdict: jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if event['data']['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](event['data']['jid']) ) self.jid_forward_cache.add(event['data']['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if 'master_id' in event['data']: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = event['data']['master_id'] jdict[event['data']['id']] = event['data']['return'] else: # Add generic event aggregation here if 'retcode' not in event['data']: self.raw_events.append(event) def _forward_events(self): log.trace('Forwarding events') if self.raw_events: self._fire_master(events=self.raw_events, pretag=tagify(self.opts['id'], base='syndic'), ) for jid in self.jids: self._return_pub(self.jids[jid], '_syndic_return') self._reset_event_aggregation() def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. self.poller = None super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local class MultiSyndic(MinionBase): ''' Make a MultiSyndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts): opts['loop_interval'] = 1 super(MultiSyndic, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self._has_master = threading.Event() self.jid_forward_cache = set() # create all of the syndics you need self.master_syndics = {} for master in set(self.opts['master']): self._init_master_conn(master) log.info('Syndic waiting on any master to connect...') # threading events are un-interruptible in python 2 :/ while not self._has_master.is_set(): self._has_master.wait(1) def _init_master_conn(self, master): ''' Start a thread to connect to the master `master` ''' # if we are re-creating one, lets make sure its not still in use if master in self.master_syndics: if 'sign_in_thread' in self.master_syndics[master]: self.master_syndics[master]['sign_in_thread'].join(0) if self.master_syndics[master]['sign_in_thread'].is_alive(): return # otherwise we make one! s_opts = copy.copy(self.opts) s_opts['master'] = master t = threading.Thread(target=self._connect_to_master_thread, args=(master,)) t.daemon = True self.master_syndics[master] = {'opts': s_opts, 'auth_wait': s_opts['acceptance_wait_time'], 'dead_until': 0, 'sign_in_thread': t, } t.start() def _connect_to_master_thread(self, master): ''' Thread target to connect to a master ''' connected = False master_dict = self.master_syndics[master] while connected is False: # if we marked it as dead, wait a while if master_dict['dead_until'] > time.time(): time.sleep(master_dict['dead_until'] - time.time()) if master_dict['dead_until'] > time.time(): time.sleep(master_dict['dead_until'] - time.time()) connected = self._connect_to_master(master) if not connected: time.sleep(1) self._has_master.set() # TODO: do we need all of this? def _connect_to_master(self, master): ''' Attempt to connect to master, including back-off for each one return boolean of whether you connected or not ''' log.debug('Syndic attempting to connect to {0}'.format(master)) if master not in self.master_syndics: log.error('Unable to connect to {0}, not in the list of masters'.format(master)) return False minion = self.master_syndics[master] try: t_minion = Syndic(minion['opts'], timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, ) self.master_syndics[master]['syndic'] = t_minion self.master_syndics[master]['generator'] = t_minion.tune_in_no_block() self.master_syndics[master]['auth_wait'] = self.opts['acceptance_wait_time'] self.master_syndics[master]['dead_until'] = 0 log.info('Syndic successfully connected to {0}'.format(master)) return True except SaltClientError: log.error('Error while bring up minion for multi-syndic. Is master {0} responding?'.format(master)) # re-use auth-wait as backoff for syndic minion['dead_until'] = time.time() + minion['auth_wait'] if minion['auth_wait'] < self.opts['acceptance_wait_time_max']: minion['auth_wait'] += self.opts['acceptance_wait_time'] return False # TODO: Move to an async framework of some type-- channel (the event thing # underneath) doesn't handle failures well, and will retry 3 times at 60s # timeouts-- which all block the main thread's execution. For now we just # cause failures to kick off threads to look for the master to come back up def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} for master, syndic_dict in self.iter_master_options(master_id): if 'syndic' not in syndic_dict: continue if syndic_dict['dead_until'] > time.time(): log.error('Unable to call {0} on {1}, that syndic is dead for now'.format(func, master)) continue try: ret = getattr(syndic_dict['syndic'], func)(*args, **kwargs) if ret is not False: log.debug('{0} called on {1}'.format(func, master)) return except (SaltClientError, SaltReqTimeoutError): pass log.error('Unable to call {0} on {1}, trying another...'.format(func, master)) # If the connection is dead, lets have another thread wait for it to come back self._init_master_conn(master) continue log.critical('Unable to call {0} on any masters!'.format(func)) def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self.master_syndics.keys()) shuffle(masters) if master_id not in self.master_syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self.master_syndics[master_id] if len(masters) == 0: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.jids = {} self.raw_events = [] self.event_forward_timeout = None # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' # Instantiate the local client self.local = salt.client.get_local_client(self.opts['_minion_conf_file']) self.local.event.subscribe('') log.debug('MultiSyndic {0!r} trying to tune in'.format(self.opts['id'])) # Share the poller with the event object self.poller = self.local.event.poller # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() loop_interval = int(self.opts['loop_interval']) self._reset_event_aggregation() while True: try: # Do all the maths in seconds timeout = loop_interval if self.event_forward_timeout is not None: timeout = min(timeout, self.event_forward_timeout - time.time()) if timeout >= 0: log.trace('Polling timeout: %f', timeout) socks = dict(self.poller.poll(timeout * 1000)) else: # This shouldn't really happen. # But there's no harm being defensive log.warning('Negative timeout in syndic main loop') socks = {} # check all of your master_syndics, have them do their thing for master_id, syndic_dict in self.master_syndics.items(): # if not connected, lets try if 'generator' not in syndic_dict: log.info('Syndic still not connected to {0}'.format(master_id)) # if we couldn't connect, lets try later continue next(syndic_dict['generator']) # events if socks.get(self.local.event.sub) == zmq.POLLIN: self._process_event_socket() if self.event_forward_timeout is not None \ and self.event_forward_timeout < time.time(): self._forward_events() # We don't handle ZMQErrors like the other minions # I've put explicit handling around the receive calls # in the process_*_socket methods. If we see any other # errors they may need some kind of handling so log them # for now. except Exception: log.critical( 'An exception occurred while polling the syndic', exc_info=True ) def _process_event_socket(self): tout = time.time() + self.opts['syndic_max_event_process_time'] while tout > time.time(): try: event = self.local.event.get_event_noblock() except zmq.ZMQError as e: # EAGAIN indicates no more events at the moment # EINTR some kind of signal maybe someone trying # to get us to quit so escape our timeout if e.errno == errno.EAGAIN or e.errno == errno.EINTR: break raise log.trace('Got event {0}'.format(event['tag'])) if self.event_forward_timeout is None: self.event_forward_timeout = ( time.time() + self.opts['syndic_event_forward_timeout'] ) tag_parts = event['tag'].split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in event['data']: if 'jid' not in event['data']: # Not a job return continue if self.syndic_mode == 'cluster' and event['data'].get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return recieved with matching master_id, not forwarding') continue jdict = self.jids.setdefault(event['tag'], {}) if not jdict: jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if event['data']['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](event['data']['jid']) ) self.jid_forward_cache.add(event['data']['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if 'master_id' in event['data']: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = event['data']['master_id'] jdict[event['data']['id']] = event['data']['return'] else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in event['data']: self.raw_events.append(event) def _forward_events(self): log.trace('Forwarding events') if self.raw_events: self._call_syndic('_fire_master', kwargs={'events': self.raw_events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self.SYNDIC_EVENT_TIMEOUT, }, ) for jid, jid_ret in self.jids.items(): self._call_syndic('_return_pub', args=(jid_ret, '_syndic_return'), kwargs={'timeout': self.SYNDIC_EVENT_TIMEOUT}, master_id=jid_ret.get('__master_id__'), ) self._reset_event_aggregation() class Matcher(object): ''' Use to return the value for matching calls from the master ''' def __init__(self, opts, functions=None): self.opts = opts self.functions = functions def confirm_top(self, match, data, nodegroups=None): ''' Takes the data passed to a top file environment and determines if the data matches this minion ''' matcher = 'compound' if not data: log.error('Received bad data when setting the match from the top ' 'file') return False for item in data: if isinstance(item, dict): if 'match' in item: matcher = item['match'] if hasattr(self, matcher + '_match'): funcname = '{0}_match'.format(matcher) if matcher == 'nodegroup': return getattr(self, funcname)(match, nodegroups) return getattr(self, funcname)(match) else: log.error('Attempting to match with unknown matcher: {0}'.format( matcher )) return False def glob_match(self, tgt): ''' Returns true if the passed glob matches the id ''' if not isinstance(tgt, str): return False return fnmatch.fnmatch(self.opts['id'], tgt) def pcre_match(self, tgt): ''' Returns true if the passed pcre regex matches ''' return bool(re.match(tgt, self.opts['id'])) def list_match(self, tgt): ''' Determines if this host is on the list ''' if isinstance(tgt, string_types): tgt = tgt.split(',') return bool(self.opts['id'] in tgt) def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Reads in the grains glob match ''' log.debug('grains target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for grains match ' 'statement from master') return False return salt.utils.subdict_match( self.opts['grains'], tgt, delimiter=delimiter ) def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Matches a grain based on regex ''' log.debug('grains pcre target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for grains pcre match ' 'statement from master') return False return salt.utils.subdict_match(self.opts['grains'], tgt, delimiter=delimiter, regex_match=True) def data_match(self, tgt): ''' Match based on the local data store on the minion ''' if self.functions is None: self.functions = salt.loader.minion_mods(self.opts) comps = tgt.split(':') if len(comps) < 2: return False val = self.functions['data.getval'](comps[0]) if val is None: # The value is not defined return False if isinstance(val, list): # We are matching a single component to a single list member for member in val: if fnmatch.fnmatch(str(member).lower(), comps[1].lower()): return True return False if isinstance(val, dict): if comps[1] in val: return True return False return bool(fnmatch.fnmatch( val, comps[1], )) def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Reads in the pillar glob match ''' log.debug('pillar target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for pillar match ' 'statement from master') return False return salt.utils.subdict_match( self.opts['pillar'], tgt, delimiter=delimiter ) def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Reads in the pillar pcre match ''' log.debug('pillar PCRE target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for pillar PCRE match ' 'statement from master') return False return salt.utils.subdict_match( self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True ) def pillar_exact_match(self, tgt, delimiter=':'): ''' Reads in the pillar match, no globbing, no PCRE ''' log.debug('pillar target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for pillar match ' 'statement from master') return False return salt.utils.subdict_match(self.opts['pillar'], tgt, delimiter=delimiter, exact_match=True) def ipcidr_match(self, tgt): ''' Matches based on ip address or CIDR notation ''' num_parts = len(tgt.split('/')) if num_parts > 2: # Target is not valid CIDR return False elif num_parts == 2: # Target is CIDR return salt.utils.network.in_subnet( tgt, addrs=self.opts['grains'].get('ipv4', []) ) else: # Target is an IPv4 address import socket try: socket.inet_aton(tgt) except socket.error: # Not a valid IPv4 address return False else: return tgt in self.opts['grains'].get('ipv4', []) def range_match(self, tgt): ''' Matches based on range cluster ''' if HAS_RANGE: range_ = seco.range.Range(self.opts['range_server']) try: return self.opts['grains']['fqdn'] in range_.expand(tgt) except seco.range.RangeException as exc: log.debug('Range exception in compound match: {0}'.format(exc)) return False return False def compound_match(self, tgt): ''' Runs the compound target check ''' if not isinstance(tgt, string_types): log.debug('Compound target received that is not a string') return False ref = {'G': 'grain', 'P': 'grain_pcre', 'I': 'pillar', 'J': 'pillar_pcre', 'L': 'list', 'S': 'ipcidr', 'E': 'pcre'} if HAS_RANGE: ref['R'] = 'range' results = [] opers = ['and', 'or', 'not', '(', ')'] tokens = tgt.split() for match in tokens: # Try to match tokens from the compound target, first by using # the 'G, X, I, L, S, E' matcher types, then by hostname glob. if '@' in match and match[1] == '@': comps = match.split('@') matcher = ref.get(comps[0]) if not matcher: # If an unknown matcher is called at any time, fail out return False results.append( str( getattr(self, '{0}_match'.format(matcher))( '@'.join(comps[1:]) ) ) ) elif match in opers: # We didn't match a target, so append a boolean operator or # subexpression if results or match in ['(', ')']: if match == 'not': if results[-1] == 'and': pass elif results[-1] == 'or': pass else: results.append('and') results.append(match) else: # seq start with oper, fail if match not in ['(', ')']: return False else: # The match is not explicitly defined, evaluate it as a glob results.append(str(self.glob_match(match))) results = ' '.join(results) try: return eval(results) # pylint: disable=W0123 except Exception: log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results)) return False return False def nodegroup_match(self, tgt, nodegroups): ''' This is a compatibility matcher and is NOT called when using nodegroups for remote execution, but is called when the nodegroups matcher is used in states ''' if tgt in nodegroups: return self.compound_match( salt.utils.minions.nodegroup_comp(tgt, nodegroups) ) return False class ProxyMinion(Minion): ''' This class instantiates a 'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231 ''' Pass in the options dict ''' self._running = None # Warn if ZMQ < 3.2 if HAS_ZMQ: try: zmq_version_info = zmq.zmq_version_info() except AttributeError: # PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to # using zmq.zmq_version() and build a version info tuple. zmq_version_info = tuple( [int(x) for x in zmq.zmq_version().split('.')] ) if zmq_version_info < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup the of the opts grains, so we can log from the grains # module # print opts['proxymodule'] fq_proxyname = 'proxy.'+opts['proxy']['proxytype'] self.proxymodule = salt.loader.proxy(opts, fq_proxyname) opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy']) opts['id'] = opts['proxyobject'].id(opts) opts.update(resolve_dns(opts)) self.opts = opts self.authenticate(timeout, safe) self.opts['pillar'] = salt.pillar.get_pillar( opts, opts['grains'], opts['id'], opts['environment'] ).compile_pillar() self.functions, self.returners, self.function_errors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() self.matcher = Matcher(self.opts, self.functions) uid = salt.utils.get_uid(user=opts.get('user', None)) self.proc_dir = get_proc_dir(opts['cachedir'], uid=uid) self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners) self.grains_cache = self.opts['grains'] # self._running = True def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' return super(ProxyMinion, self)._prep_mod_opts() def _load_modules(self, force_refresh=False, notify=False): ''' Return the functions and the returners loaded up from the loader module ''' return super(ProxyMinion, self)._load_modules(force_refresh=force_refresh, notify=notify)
websocketconnection.py
import threading import websocket import gzip import ssl import logging from urllib import parse import urllib.parse from binance_d.base.printtime import PrintDate from binance_d.impl.utils.timeservice import get_current_timestamp from binance_d.impl.utils.urlparamsbuilder import UrlParamsBuilder from binance_d.impl.utils.apisignature import create_signature from binance_d.exception.binanceapiexception import BinanceApiException from binance_d.impl.utils import * from binance_d.base.printobject import * from binance_d.model.constant import * # Key: ws, Value: connection websocket_connection_handler = dict() def on_message(ws, message): websocket_connection = websocket_connection_handler[ws] websocket_connection.on_message(message) return def on_error(ws, error): websocket_connection = websocket_connection_handler[ws] websocket_connection.on_failure(error) def on_close(ws): websocket_connection = websocket_connection_handler[ws] websocket_connection.on_close() def on_open(ws): websocket_connection = websocket_connection_handler[ws] websocket_connection.on_open(ws) connection_id = 0 class ConnectionState: IDLE = 0 CONNECTED = 1 CLOSED_ON_ERROR = 2 def websocket_func(*args): connection_instance = args[0] connection_instance.ws = websocket.WebSocketApp(connection_instance.url, on_message=on_message, on_error=on_error, on_close=on_close) global websocket_connection_handler websocket_connection_handler[connection_instance.ws] = connection_instance connection_instance.logger.info("[Sub][" + str(connection_instance.id) + "] Connecting...") connection_instance.delay_in_second = -1 connection_instance.ws.on_open = on_open connection_instance.ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE}) connection_instance.logger.info("[Sub][" + str(connection_instance.id) + "] Connection event loop down") if connection_instance.state == ConnectionState.CONNECTED: connection_instance.state = ConnectionState.IDLE class WebsocketConnection: def __init__(self, api_key, secret_key, uri, watch_dog, request): self.__thread = None self.url = uri self.__api_key = api_key self.__secret_key = secret_key self.request = request self.__watch_dog = watch_dog self.delay_in_second = -1 self.ws = None self.last_receive_time = 0 self.logger = logging.getLogger("binance-futures") self.state = ConnectionState.IDLE self.retry_count = 5 global connection_id connection_id += 1 self.id = connection_id def in_delay_connection(self): return self.delay_in_second > 0 or self.delay_in_second < 5 def re_connect_in_delay(self, delay_in_second): if self.ws is not None: self.ws.close() self.ws = None if self.retry_count <= 0: self.close_on_error() return self.delay_in_second = delay_in_second self.logger.warning("[Sub][" + str(self.id) + "] Reconnecting after " + str(self.delay_in_second) + " seconds later" + " - " + self.request.channel + "(" + str(self.retry_count) + ")") def re_connect(self): self.delay_in_second -= 1 if self.delay_in_second > 0: self.logger.warning("In delay connection: " + str(self.delay_in_second) + " - " + self.request.channel) else: self.retry_count -= 1 self.connect() def connect(self): self.delay_in_second -= 1 self.logger.warning("[Sub][" + str(self.id) + "] Connecting " + " - " + self.request.channel) if self.state == ConnectionState.CONNECTED: self.logger.info("[Sub][" + str(self.id) + "] Already connected") else: self.__thread = threading.Thread(target=websocket_func, args=[self]) self.__thread.start() def send(self, data): self.ws.send(data) def close(self): self.ws.close() del websocket_connection_handler[self.ws] self.__watch_dog.on_connection_closed(self) self.logger.error("[Sub][" + str(self.id) + "] Closing normally") def on_open(self, ws): self.logger.info("[Sub][" + str(self.id) + "] Connected to server" + " - " + self.request.channel) self.ws = ws self.retry_count = 5 self.last_receive_time = get_current_timestamp() self.state = ConnectionState.CONNECTED self.__watch_dog.on_connection_created(self) if self.request.subscription_handler is not None: self.request.subscription_handler(self) return def on_error(self, error_message): if self.request.error_handler is not None: print('error') exception = BinanceApiException(BinanceApiException.SUBSCRIPTION_ERROR, error_message) self.request.error_handler(exception) self.logger.error("[Sub][" + str(self.id) + "] Error: " + str(error_message)) def on_failure(self, error): print('on_failure') self.on_error("Unexpected error: " + str(error)) self.close_on_error() def on_close(self): if self.request.error_handler is not None and self.retry_count <= 0: exception = BinanceApiException(BinanceApiException.SUBSCRIPTION_ERROR, "close_socket") self.request.error_handler(exception) self.logger.error("[Sub][" + str(self.id) + "] close_socket") def on_message(self, message): self.retry_count = 5 self.last_receive_time = get_current_timestamp() if not isinstance(message, str): message = gzip.decompress(message).decode('utf-8') json_wrapper = parse_json_from_string(message) if json_wrapper.contain_key("method") and json_wrapper.get_string("method") == "PING": self.__process_ping_on_new_spec(json_wrapper.get_int("E")) elif json_wrapper.contain_key("status") and json_wrapper.get_string("status") != "ok": error_code = json_wrapper.get_string_or_default("err-code", "Unknown error") error_msg = json_wrapper.get_string_or_default("err-msg", "Unknown error") self.on_error(error_code + ": " + error_msg) elif json_wrapper.contain_key("err-code") and json_wrapper.get_int("err-code") != 0: error_code = json_wrapper.get_string_or_default("err-code", "Unknown error") error_msg = json_wrapper.get_string_or_default("err-msg", "Unknown error") self.on_error(error_code + ": " + error_msg) elif json_wrapper.contain_key("result") and json_wrapper.contain_key("id"): self.__on_receive_response(json_wrapper) else: self.__on_receive_payload(json_wrapper) def __on_receive_response(self, json_wrapper): res = None try: res = json_wrapper.get_int("id") except Exception as e: self.on_error("Failed to parse server's response: " + str(e)) try: if self.request.update_callback is not None: self.request.update_callback(SubscribeMessageType.RESPONSE, res) except Exception as e: self.on_error("Process error: " + str(e) + " You should capture the exception in your error handler") def __on_receive_payload(self, json_wrapper): res = None try: if self.request.json_parser is not None: res = self.request.json_parser(json_wrapper) except Exception as e: self.on_error("Failed to parse server's response: " + str(e)) try: if self.request.update_callback is not None: self.request.update_callback(SubscribeMessageType.PAYLOAD, res) except Exception as e: self.on_error("Process error: " + str(e) + " You should capture the exception in your error handler") if self.request.auto_close: self.close() def __process_ping_on_new_spec(self, ping_ts): """Respond on explicit ping frame """ #print("Responding to explicit PING...") respond_pong_msg = "{\"method\":\"PONG\",\"E\":" + str(ping_ts) + "}" self.send(respond_pong_msg) # print(respond_pong_msg) return def __process_ping_on_trading_line(self, ping_ts): self.send("{\"op\":\"pong\",\"ts\":" + str(ping_ts) + "}") return def __process_ping_on_market_line(self, ping_ts): self.send("{\"pong\":" + str(ping_ts) + "}") return def close_on_error(self): if self.ws is not None: self.ws.close() self.state = ConnectionState.CLOSED_ON_ERROR self.logger.error("[Sub][" + str(self.id) + "] Connection is closing due to error") self.on_close()
test_eap_proto.py
# EAP protocol tests # Copyright (c) 2014-2015, Jouni Malinen <j@w1.fi> # # This software may be distributed under the terms of the BSD license. # See README for more details. import binascii import hashlib import hmac import logging logger = logging.getLogger() import os import select import struct import threading import time import hostapd from utils import HwsimSkip, alloc_fail, fail_test, wait_fail_trigger from test_ap_eap import check_eap_capa, check_hlr_auc_gw_support, int_eap_server_params from test_erp import check_erp_capa try: import OpenSSL openssl_imported = True except ImportError: openssl_imported = False EAP_CODE_REQUEST = 1 EAP_CODE_RESPONSE = 2 EAP_CODE_SUCCESS = 3 EAP_CODE_FAILURE = 4 EAP_CODE_INITIATE = 5 EAP_CODE_FINISH = 6 EAP_TYPE_IDENTITY = 1 EAP_TYPE_NOTIFICATION = 2 EAP_TYPE_NAK = 3 EAP_TYPE_MD5 = 4 EAP_TYPE_OTP = 5 EAP_TYPE_GTC = 6 EAP_TYPE_TLS = 13 EAP_TYPE_LEAP = 17 EAP_TYPE_SIM = 18 EAP_TYPE_TTLS = 21 EAP_TYPE_AKA = 23 EAP_TYPE_PEAP = 25 EAP_TYPE_MSCHAPV2 = 26 EAP_TYPE_TLV = 33 EAP_TYPE_TNC = 38 EAP_TYPE_FAST = 43 EAP_TYPE_PAX = 46 EAP_TYPE_PSK = 47 EAP_TYPE_SAKE = 48 EAP_TYPE_IKEV2 = 49 EAP_TYPE_AKA_PRIME = 50 EAP_TYPE_GPSK = 51 EAP_TYPE_PWD = 52 EAP_TYPE_EKE = 53 EAP_TYPE_EXPANDED = 254 # Type field in EAP-Initiate and EAP-Finish messages EAP_ERP_TYPE_REAUTH_START = 1 EAP_ERP_TYPE_REAUTH = 2 EAP_ERP_TLV_KEYNAME_NAI = 1 EAP_ERP_TV_RRK_LIFETIME = 2 EAP_ERP_TV_RMSK_LIFETIME = 3 EAP_ERP_TLV_DOMAIN_NAME = 4 EAP_ERP_TLV_CRYPTOSUITES = 5 EAP_ERP_TLV_AUTHORIZATION_INDICATION = 6 EAP_ERP_TLV_CALLED_STATION_ID = 128 EAP_ERP_TLV_CALLING_STATION_ID = 129 EAP_ERP_TLV_NAS_IDENTIFIER = 130 EAP_ERP_TLV_NAS_IP_ADDRESS = 131 EAP_ERP_TLV_NAS_IPV6_ADDRESS = 132 def run_pyrad_server(srv, t_stop, eap_handler): srv.RunWithStop(t_stop, eap_handler) def start_radius_server(eap_handler): try: import pyrad.server import pyrad.packet import pyrad.dictionary except ImportError: raise HwsimSkip("No pyrad modules available") class TestServer(pyrad.server.Server): def _HandleAuthPacket(self, pkt): pyrad.server.Server._HandleAuthPacket(self, pkt) eap = b'' for p in pkt[79]: eap += p eap_req = self.eap_handler(self.ctx, eap) reply = self.CreateReplyPacket(pkt) if eap_req: while True: if len(eap_req) > 253: reply.AddAttribute("EAP-Message", eap_req[0:253]) eap_req = eap_req[253:] else: reply.AddAttribute("EAP-Message", eap_req) break else: logger.info("No EAP request available") reply.code = pyrad.packet.AccessChallenge hmac_obj = hmac.new(reply.secret) hmac_obj.update(struct.pack("B", reply.code)) hmac_obj.update(struct.pack("B", reply.id)) # reply attributes reply.AddAttribute("Message-Authenticator", 16*b'\x00') attrs = reply._PktEncodeAttributes() # Length flen = 4 + 16 + len(attrs) hmac_obj.update(struct.pack(">H", flen)) hmac_obj.update(pkt.authenticator) hmac_obj.update(attrs) del reply[80] reply.AddAttribute("Message-Authenticator", hmac_obj.digest()) self.SendReplyPacket(pkt.fd, reply) def RunWithStop(self, t_stop, eap_handler): self._poll = select.poll() self._fdmap = {} self._PrepareSockets() self.t_stop = t_stop self.eap_handler = eap_handler self.ctx = {} while not t_stop.is_set(): for (fd, event) in self._poll.poll(200): if event == select.POLLIN: try: fdo = self._fdmap[fd] self._ProcessInput(fdo) except pyrad.server.ServerPacketError as err: logger.info("pyrad server dropping packet: " + str(err)) except pyrad.packet.PacketError as err: logger.info("pyrad server received invalid packet: " + str(err)) else: logger.error("Unexpected event in pyrad server main loop") srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"), authport=18138, acctport=18139) srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1", b"radius", "localhost") srv.BindToAddress("") t_stop = threading.Event() t = threading.Thread(target=run_pyrad_server, args=(srv, t_stop, eap_handler)) t.start() return {'srv': srv, 'stop': t_stop, 'thread': t} def stop_radius_server(srv): srv['stop'].set() srv['thread'].join() def start_ap(ap): params = hostapd.wpa2_eap_params(ssid="eap-test") params['auth_server_port'] = "18138" hapd = hostapd.add_ap(ap, params) return hapd def test_eap_proto(dev, apdev): """EAP protocol tests""" check_eap_capa(dev[0], "MD5") def eap_handler(ctx, req): logger.info("eap_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: MD5 challenge") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_MD5, 1, 0xaa, ord('n')) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Success - id off by 2") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] + 1, 4) idx += 1 if ctx['num'] == idx: logger.info("Test: MD5 challenge") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_MD5, 1, 0xaa, ord('n')) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Success - id off by 3") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] + 2, 4) idx += 1 if ctx['num'] == idx: logger.info("Test: MD5 challenge") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_MD5, 1, 0xaa, ord('n')) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Notification/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_NOTIFICATION, ord('A')) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Notification/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_NOTIFICATION, ord('B')) idx += 1 if ctx['num'] == idx: logger.info("Test: MD5 challenge") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_MD5, 1, 0xaa, ord('n')) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Notification/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_NOTIFICATION, ord('C')) idx += 1 if ctx['num'] == idx: logger.info("Test: MD5 challenge") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_MD5, 1, 0xaa, ord('n')) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Notification/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_NOTIFICATION, ord('D')) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Notification/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_NOTIFICATION, ord('E')) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Notification/Request (same id)") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'] - 1, 4 + 1 + 1, EAP_TYPE_NOTIFICATION, ord('F')) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected EAP-Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 2, 4) return None srv = start_radius_server(eap_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15) if ev is None: raise Exception("Timeout on EAP success") dev[0].request("REMOVE_NETWORK all") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=1) if ev is not None: raise Exception("Unexpected EAP success") dev[0].request("REMOVE_NETWORK all") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10) if ev is None: raise Exception("Timeout on EAP notification") if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION A": raise Exception("Unexpected notification contents: " + ev) ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15) if ev is None: raise Exception("Timeout on EAP success") dev[0].request("REMOVE_NETWORK all") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10) if ev is None: raise Exception("Timeout on EAP notification") if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION B": raise Exception("Unexpected notification contents: " + ev) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15) if ev is None: raise Exception("Timeout on EAP success") dev[0].request("REMOVE_NETWORK all") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10) if ev is None: raise Exception("Timeout on EAP notification") if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION C": raise Exception("Unexpected notification contents: " + ev) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10) if ev is None: raise Exception("Timeout on EAP notification") if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION D": raise Exception("Unexpected notification contents: " + ev) ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15) if ev is None: raise Exception("Timeout on EAP success") dev[0].request("REMOVE_NETWORK all") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10) if ev is None: raise Exception("Timeout on EAP notification") if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION E": raise Exception("Unexpected notification contents: " + ev) ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10) if ev is None: raise Exception("Timeout on EAP notification") if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION F": raise Exception("Unexpected notification contents: " + ev) ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=15) if ev is None: raise Exception("Timeout on EAP failure") dev[0].request("REMOVE_NETWORK all") finally: stop_radius_server(srv) def test_eap_proto_notification_errors(dev, apdev): """EAP Notification errors""" def eap_handler(ctx, req): logger.info("eap_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: MD5 challenge") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_MD5, 1, 0xaa, ord('n')) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Notification/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_NOTIFICATION, ord('A')) idx += 1 if ctx['num'] == idx: logger.info("Test: MD5 challenge") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_MD5, 1, 0xaa, ord('n')) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Notification/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_NOTIFICATION, ord('A')) return None srv = start_radius_server(eap_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "eap_sm_processNotify"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with alloc_fail(dev[0], 1, "eap_msg_alloc;sm_EAP_NOTIFICATION_Enter"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() finally: stop_radius_server(srv) EAP_SAKE_VERSION = 2 EAP_SAKE_SUBTYPE_CHALLENGE = 1 EAP_SAKE_SUBTYPE_CONFIRM = 2 EAP_SAKE_SUBTYPE_AUTH_REJECT = 3 EAP_SAKE_SUBTYPE_IDENTITY = 4 EAP_SAKE_AT_RAND_S = 1 EAP_SAKE_AT_RAND_P = 2 EAP_SAKE_AT_MIC_S = 3 EAP_SAKE_AT_MIC_P = 4 EAP_SAKE_AT_SERVERID = 5 EAP_SAKE_AT_PEERID = 6 EAP_SAKE_AT_SPI_S = 7 EAP_SAKE_AT_SPI_P = 8 EAP_SAKE_AT_ANY_ID_REQ = 9 EAP_SAKE_AT_PERM_ID_REQ = 10 EAP_SAKE_AT_ENCR_DATA = 128 EAP_SAKE_AT_IV = 129 EAP_SAKE_AT_PADDING = 130 EAP_SAKE_AT_NEXT_TMPID = 131 EAP_SAKE_AT_MSK_LIFE = 132 def test_eap_proto_sake(dev, apdev): """EAP-SAKE protocol tests""" global eap_proto_sake_test_done eap_proto_sake_test_done = False def sake_challenge(ctx): logger.info("Test: Challenge subtype") return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 18, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE, EAP_SAKE_AT_RAND_S, 18, 0, 0, 0, 0) def sake_handler(ctx, req): logger.info("sake_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] += 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_SAKE) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype without any attributes") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype") return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY, EAP_SAKE_AT_ANY_ID_REQ, 4, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype (different session id)") return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 1, EAP_SAKE_SUBTYPE_IDENTITY, EAP_SAKE_AT_PERM_ID_REQ, 4, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype with too short attribute") return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 2, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY, EAP_SAKE_AT_ANY_ID_REQ, 2) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype with truncated attribute") return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 2, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY, EAP_SAKE_AT_ANY_ID_REQ, 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype with too short attribute header") payload = struct.pack("B", EAP_SAKE_AT_ANY_ID_REQ) return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + len(payload), EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype with AT_IV but not AT_ENCR_DATA") payload = struct.pack("BB", EAP_SAKE_AT_IV, 2) return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + len(payload), EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype with skippable and non-skippable unknown attribute") payload = struct.pack("BBBB", 255, 2, 127, 2) return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + len(payload), EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype: AT_RAND_P with invalid payload length") payload = struct.pack("BB", EAP_SAKE_AT_RAND_P, 2) return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + len(payload), EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype: AT_MIC_P with invalid payload length") payload = struct.pack("BB", EAP_SAKE_AT_MIC_P, 2) return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + len(payload), EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype: AT_PERM_ID_REQ with invalid payload length") payload = struct.pack("BBBBBBBBBBBBBB", EAP_SAKE_AT_SPI_S, 2, EAP_SAKE_AT_SPI_P, 2, EAP_SAKE_AT_ENCR_DATA, 2, EAP_SAKE_AT_NEXT_TMPID, 2, EAP_SAKE_AT_PERM_ID_REQ, 4, 0, 0, EAP_SAKE_AT_PERM_ID_REQ, 2) return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + len(payload), EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype: AT_PADDING") payload = struct.pack("BBBBBB", EAP_SAKE_AT_PADDING, 3, 0, EAP_SAKE_AT_PADDING, 3, 1) return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + len(payload), EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype: AT_MSK_LIFE") payload = struct.pack(">BBLBBH", EAP_SAKE_AT_MSK_LIFE, 6, 0, EAP_SAKE_AT_MSK_LIFE, 4, 0) return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + len(payload), EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype with invalid attribute length") payload = struct.pack("BB", EAP_SAKE_AT_ANY_ID_REQ, 0) return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + len(payload), EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unknown subtype") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, 123) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge subtype without any attributes") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge subtype with too short AT_RAND_S") return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 2, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE, EAP_SAKE_AT_RAND_S, 2) idx += 1 if ctx['num'] == idx: return sake_challenge(ctx) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Identity subtype") return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY, EAP_SAKE_AT_ANY_ID_REQ, 4, 0) idx += 1 if ctx['num'] == idx: return sake_challenge(ctx) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Challenge subtype") return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 18, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE, EAP_SAKE_AT_RAND_S, 18, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: return sake_challenge(ctx) idx += 1 if ctx['num'] == idx: logger.info("Test: Confirm subtype without any attributes") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM) idx += 1 if ctx['num'] == idx: return sake_challenge(ctx) idx += 1 if ctx['num'] == idx: logger.info("Test: Confirm subtype with too short AT_MIC_S") return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 2, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM, EAP_SAKE_AT_MIC_S, 2) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Confirm subtype") return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 18, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM, EAP_SAKE_AT_MIC_S, 18, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: return sake_challenge(ctx) idx += 1 if ctx['num'] == idx: logger.info("Test: Confirm subtype with incorrect AT_MIC_S") return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 18, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM, EAP_SAKE_AT_MIC_S, 18, 0, 0, 0, 0) global eap_proto_sake_test_done if eap_proto_sake_test_done: return sake_challenge(ctx) logger.info("No more test responses available - test case completed") eap_proto_sake_test_done = True return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(sake_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) while not eap_proto_sake_test_done: dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SAKE", identity="sake user", password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") logger.info("Too short password") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SAKE", identity="sake user", password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcd", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.1) finally: stop_radius_server(srv) def test_eap_proto_sake_errors(dev, apdev): """EAP-SAKE local error cases""" check_eap_capa(dev[0], "SAKE") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(1, 3): with alloc_fail(dev[0], i, "eap_sake_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SAKE", identity="sake user", password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].dump_monitor() tests = [(1, "eap_msg_alloc;eap_sake_build_msg;eap_sake_process_challenge"), (1, "=eap_sake_process_challenge"), (1, "eap_sake_compute_mic;eap_sake_process_challenge"), (1, "eap_sake_build_msg;eap_sake_process_confirm"), (1, "eap_sake_compute_mic;eap_sake_process_confirm"), (2, "eap_sake_compute_mic;=eap_sake_process_confirm"), (1, "eap_sake_getKey"), (1, "eap_sake_get_emsk"), (1, "eap_sake_get_session_id")] for count, func in tests: with alloc_fail(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SAKE", identity="sake user@domain", password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].dump_monitor() tests = [(1, "os_get_random;eap_sake_process_challenge"), (1, "eap_sake_derive_keys;eap_sake_process_challenge")] for count, func in tests: with fail_test(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SAKE", identity="sake user", password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].dump_monitor() def test_eap_proto_sake_errors2(dev, apdev): """EAP-SAKE protocol tests (2)""" def sake_handler(ctx, req): logger.info("sake_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] += 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Identity subtype") return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY, EAP_SAKE_AT_ANY_ID_REQ, 4, 0) srv = start_radius_server(sake_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_sake_build_msg;eap_sake_process_identity"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SAKE", identity="sake user", password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() finally: stop_radius_server(srv) def run_eap_sake_connect(dev): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="SAKE", identity="sake user", password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev.wait_event(["CTRL-EVENT-EAP-SUCCESS", "CTRL-EVENT-EAP-FAILURE", "CTRL-EVENT-DISCONNECTED"], timeout=1) dev.request("REMOVE_NETWORK all") if not ev or "CTRL-EVENT-DISCONNECTED" not in ev: dev.wait_disconnected() dev.dump_monitor() def test_eap_proto_sake_errors_server(dev, apdev): """EAP-SAKE local error cases on server""" check_eap_capa(dev[0], "SAKE") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) tests = [(1, "eap_sake_init"), (1, "eap_sake_build_msg;eap_sake_build_challenge"), (1, "eap_sake_build_msg;eap_sake_build_confirm"), (1, "eap_sake_compute_mic;eap_sake_build_confirm"), (1, "eap_sake_process_challenge"), (1, "eap_sake_getKey"), (1, "eap_sake_get_emsk"), (1, "eap_sake_get_session_id")] for count, func in tests: with alloc_fail(hapd, count, func): run_eap_sake_connect(dev[0]) tests = [(1, "eap_sake_init"), (1, "eap_sake_build_challenge"), (1, "eap_sake_build_confirm"), (1, "eap_sake_derive_keys;eap_sake_process_challenge"), (1, "eap_sake_compute_mic;eap_sake_process_challenge"), (1, "eap_sake_compute_mic;eap_sake_process_confirm"), (1, "eap_sake_compute_mic;eap_sake_build_confirm"), (1, "eap_sake_process_confirm")] for count, func in tests: with fail_test(hapd, count, func): run_eap_sake_connect(dev[0]) def start_sake_assoc(dev, hapd): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="SAKE", identity="sake user", password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", wait_connect=False) proxy_msg(hapd, dev) # EAP-Identity/Request proxy_msg(dev, hapd) # EAP-Identity/Response proxy_msg(hapd, dev) # SAKE/Challenge/Request def stop_sake_assoc(dev, hapd): dev.request("REMOVE_NETWORK all") dev.wait_disconnected() dev.dump_monitor() hapd.dump_monitor() def test_eap_proto_sake_server(dev, apdev): """EAP-SAKE protocol testing for the server""" check_eap_capa(dev[0], "SAKE") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) hapd.request("SET ext_eapol_frame_io 1") dev[0].request("SET ext_eapol_frame_io 1") # Successful exchange to verify proxying mechanism start_sake_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # SAKE/Challenge/Response proxy_msg(hapd, dev[0]) # SAKE/Confirm/Request proxy_msg(dev[0], hapd) # SAKE/Confirm/Response proxy_msg(hapd, dev[0]) # EAP-Success proxy_msg(hapd, dev[0]) # EAPOL-Key msg 1/4 proxy_msg(dev[0], hapd) # EAPOL-Key msg 2/4 proxy_msg(hapd, dev[0]) # EAPOL-Key msg 3/4 proxy_msg(dev[0], hapd) # EAPOL-Key msg 4/4 dev[0].wait_connected() stop_sake_assoc(dev[0], hapd) start_sake_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short EAP-SAKE header # --> EAP-SAKE: Invalid frame msg = resp[0:4] + "0007" + resp[8:12] + "0007" + "300200" tx_msg(dev[0], hapd, msg) # Unknown version # --> EAP-SAKE: Unknown version 1 msg = resp[0:4] + "0008" + resp[8:12] + "0008" + "30010000" tx_msg(dev[0], hapd, msg) # Unknown session # --> EAP-SAKE: Session ID mismatch sess, = struct.unpack('B', binascii.unhexlify(resp[20:22])) sess = binascii.hexlify(struct.pack('B', sess + 1)).decode() msg = resp[0:4] + "0008" + resp[8:12] + "0008" + "3002" + sess + "00" tx_msg(dev[0], hapd, msg) # Unknown subtype # --> EAP-SAKE: Unexpected subtype=5 in state=1 msg = resp[0:22] + "05" + resp[24:] tx_msg(dev[0], hapd, msg) # Empty challenge # --> EAP-SAKE: Response/Challenge did not include AT_RAND_P or AT_MIC_P msg = resp[0:4] + "0008" + resp[8:12] + "0008" + resp[16:24] tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_sake_assoc(dev[0], hapd) start_sake_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Invalid attribute in challenge # --> EAP-SAKE: Too short attribute msg = resp[0:4] + "0009" + resp[8:12] + "0009" + resp[16:26] tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_sake_assoc(dev[0], hapd) start_sake_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # SAKE/Challenge/Response proxy_msg(hapd, dev[0]) # SAKE/Confirm/Request resp = rx_msg(dev[0]) # Empty confirm # --> EAP-SAKE: Response/Confirm did not include AT_MIC_P msg = resp[0:4] + "0008" + resp[8:12] + "0008" + resp[16:26] tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_sake_assoc(dev[0], hapd) start_sake_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # SAKE/Challenge/Response proxy_msg(hapd, dev[0]) # SAKE/Confirm/Request resp = rx_msg(dev[0]) # Invalid attribute in confirm # --> EAP-SAKE: Too short attribute msg = resp[0:4] + "0009" + resp[8:12] + "0009" + resp[16:26] tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_sake_assoc(dev[0], hapd) start_sake_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # SAKE/Challenge/Response proxy_msg(hapd, dev[0]) # SAKE/Confirm/Request resp = rx_msg(dev[0]) # Corrupted AT_MIC_P value # --> EAP-SAKE: Incorrect AT_MIC_P msg = resp[0:30] + "000000000000" + resp[42:] tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_sake_assoc(dev[0], hapd) def test_eap_proto_leap(dev, apdev): """EAP-LEAP protocol tests""" check_eap_capa(dev[0], "LEAP") def leap_handler(ctx, req): logger.info("leap_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 if ctx['num'] == 1: logger.info("Test: Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_LEAP) if ctx['num'] == 2: logger.info("Test: Unexpected version") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_LEAP, 0, 0, 0) if ctx['num'] == 3: logger.info("Test: Invalid challenge length") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_LEAP, 1, 0, 0) if ctx['num'] == 4: logger.info("Test: Truncated challenge") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_LEAP, 1, 0, 8) if ctx['num'] == 5: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) if ctx['num'] == 6: logger.info("Test: Missing payload in Response") return struct.pack(">BBHB", EAP_CODE_RESPONSE, ctx['id'], 4 + 1, EAP_TYPE_LEAP) if ctx['num'] == 7: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) if ctx['num'] == 8: logger.info("Test: Unexpected version in Response") return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3, EAP_TYPE_LEAP, 0, 0, 8) if ctx['num'] == 9: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) if ctx['num'] == 10: logger.info("Test: Invalid challenge length in Response") return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3, EAP_TYPE_LEAP, 1, 0, 0) if ctx['num'] == 11: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) if ctx['num'] == 12: logger.info("Test: Truncated challenge in Response") return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3, EAP_TYPE_LEAP, 1, 0, 24) if ctx['num'] == 13: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) if ctx['num'] == 14: logger.info("Test: Invalid challange value in Response") return struct.pack(">BBHBBBB6L", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_LEAP, 1, 0, 24, 0, 0, 0, 0, 0, 0) if ctx['num'] == 15: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) if ctx['num'] == 16: logger.info("Test: Valid challange value in Response") return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_LEAP, 1, 0, 24, 0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd, 0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04, 0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66) if ctx['num'] == 17: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) if ctx['num'] == 18: logger.info("Test: Success") return struct.pack(">BBHB", EAP_CODE_SUCCESS, ctx['id'], 4 + 1, EAP_TYPE_LEAP) # hostapd will drop the next frame in the sequence if ctx['num'] == 19: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) if ctx['num'] == 20: logger.info("Test: Failure") return struct.pack(">BBHB", EAP_CODE_FAILURE, ctx['id'], 4 + 1, EAP_TYPE_LEAP) return None srv = start_radius_server(leap_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(0, 12): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.1) if i == 10: logger.info("Wait for additional roundtrip") time.sleep(1) dev[0].request("REMOVE_NETWORK all") finally: stop_radius_server(srv) def test_eap_proto_leap_errors(dev, apdev): """EAP-LEAP protocol tests (error paths)""" check_eap_capa(dev[0], "LEAP") def leap_handler2(ctx, req): logger.info("leap_handler2 - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challange value in Response") return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_LEAP, 1, 0, 24, 0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd, 0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04, 0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challange value in Response") return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_LEAP, 1, 0, 24, 0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd, 0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04, 0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challange value in Response") return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_LEAP, 1, 0, 24, 0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd, 0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04, 0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challange value in Response") return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_LEAP, 1, 0, 24, 0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd, 0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04, 0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challange value in Response") return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_LEAP, 1, 0, 24, 0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd, 0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04, 0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challange value in Response") return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_LEAP, 1, 0, 24, 0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd, 0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04, 0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challange value in Response") return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_LEAP, 1, 0, 24, 0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd, 0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04, 0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid challenge") return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_LEAP, 1, 0, 8, 0, 0) return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(leap_handler2) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "eap_leap_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_leap_process_request"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with alloc_fail(dev[0], 1, "eap_leap_process_success"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with fail_test(dev[0], 1, "os_get_random;eap_leap_process_success"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with fail_test(dev[0], 1, "eap_leap_process_response"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c", wait_connect=False) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with fail_test(dev[0], 1, "nt_password_hash;eap_leap_process_response"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with fail_test(dev[0], 1, "hash_nt_password_hash;eap_leap_process_response"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with alloc_fail(dev[0], 1, "eap_leap_getKey"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with fail_test(dev[0], 1, "eap_leap_getKey"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c", wait_connect=False) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with fail_test(dev[0], 1, "nt_password_hash;eap_leap_getKey"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with fail_test(dev[0], 1, "hash_nt_password_hash;eap_leap_getKey"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with fail_test(dev[0], 1, "nt_challenge_response;eap_leap_process_request"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="LEAP", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() finally: stop_radius_server(srv) def test_eap_proto_md5(dev, apdev): """EAP-MD5 protocol tests""" check_eap_capa(dev[0], "MD5") def md5_handler(ctx, req): logger.info("md5_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 if ctx['num'] == 1: logger.info("Test: Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_MD5) if ctx['num'] == 2: logger.info("Test: Zero-length challenge") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_MD5, 0) if ctx['num'] == 3: logger.info("Test: Truncated challenge") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_MD5, 1) if ctx['num'] == 4: logger.info("Test: Shortest possible challenge and name") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_MD5, 1, 0xaa, ord('n')) return None srv = start_radius_server(md5_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(0, 4): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") finally: stop_radius_server(srv) def test_eap_proto_md5_errors(dev, apdev): """EAP-MD5 local error cases""" check_eap_capa(dev[0], "MD5") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) with fail_test(dev[0], 1, "chap_md5"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="phase1-user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_md5_process"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="phase1-user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") def run_eap_md5_connect(dev): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="phase1-user", password="password", wait_connect=False) ev = dev.wait_event(["CTRL-EVENT-EAP-SUCCESS", "CTRL-EVENT-EAP-FAILURE", "CTRL-EVENT-DISCONNECTED"], timeout=1) dev.request("REMOVE_NETWORK all") if not ev or "CTRL-EVENT-DISCONNECTED" not in ev: dev.wait_disconnected() dev.dump_monitor() def test_eap_proto_md5_errors_server(dev, apdev): """EAP-MD5 local error cases on server""" check_eap_capa(dev[0], "MD5") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) tests = [(1, "eap_md5_init")] for count, func in tests: with alloc_fail(hapd, count, func): run_eap_md5_connect(dev[0]) tests = [(1, "os_get_random;eap_md5_buildReq"), (1, "chap_md5;eap_md5_process")] for count, func in tests: with fail_test(hapd, count, func): run_eap_md5_connect(dev[0]) def start_md5_assoc(dev, hapd): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="phase1-user", password="password", wait_connect=False) proxy_msg(hapd, dev) # EAP-Identity/Request proxy_msg(dev, hapd) # EAP-Identity/Response proxy_msg(hapd, dev) # MSCHAPV2/Request proxy_msg(dev, hapd) # NAK proxy_msg(hapd, dev) # MD5 Request def stop_md5_assoc(dev, hapd): dev.request("REMOVE_NETWORK all") dev.wait_disconnected() dev.dump_monitor() hapd.dump_monitor() def test_eap_proto_md5_server(dev, apdev): """EAP-MD5 protocol testing for the server""" check_eap_capa(dev[0], "MD5") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) hapd.request("SET ext_eapol_frame_io 1") dev[0].request("SET ext_eapol_frame_io 1") # Successful exchange to verify proxying mechanism start_md5_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # MD5 Response proxy_msg(hapd, dev[0]) # EAP-Success ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=5) if ev is None: raise Exception("No EAP-Success reported") stop_md5_assoc(dev[0], hapd) start_md5_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short EAP-MD5 header (no length field) hapd.note("EAP-MD5: Invalid frame") msg = resp[0:4] + "0005" + resp[8:12] + "0005" + "04" tx_msg(dev[0], hapd, msg) # Too short EAP-MD5 header (no length field) hapd.note("EAP-MD5: Invalid response (response_len=0 payload_len=1") msg = resp[0:4] + "0006" + resp[8:12] + "0006" + "0400" tx_msg(dev[0], hapd, msg) stop_md5_assoc(dev[0], hapd) def test_eap_proto_otp(dev, apdev): """EAP-OTP protocol tests""" def otp_handler(ctx, req): logger.info("otp_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 if ctx['num'] == 1: logger.info("Test: Empty payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_OTP) if ctx['num'] == 2: logger.info("Test: Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4) if ctx['num'] == 3: logger.info("Test: Challenge included") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_OTP, ord('A')) if ctx['num'] == 4: logger.info("Test: Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4) return None srv = start_radius_server(otp_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(0, 1): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="OTP", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="OTP", identity="user", wait_connect=False) ev = dev[0].wait_event(["CTRL-REQ-OTP"]) if ev is None: raise Exception("Request for password timed out") id = ev.split(':')[0].split('-')[-1] dev[0].request("CTRL-RSP-OTP-" + id + ":password") ev = dev[0].wait_event("CTRL-EVENT-EAP-SUCCESS") if ev is None: raise Exception("Success not reported") finally: stop_radius_server(srv) def test_eap_proto_otp_errors(dev, apdev): """EAP-OTP local error cases""" def otp_handler2(ctx, req): logger.info("otp_handler2 - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge included") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_OTP, ord('A')) return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(otp_handler2) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_otp_process"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="OTP", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() finally: stop_radius_server(srv) EAP_GPSK_OPCODE_GPSK_1 = 1 EAP_GPSK_OPCODE_GPSK_2 = 2 EAP_GPSK_OPCODE_GPSK_3 = 3 EAP_GPSK_OPCODE_GPSK_4 = 4 EAP_GPSK_OPCODE_FAIL = 5 EAP_GPSK_OPCODE_PROTECTED_FAIL = 6 def test_eap_proto_gpsk(dev, apdev): """EAP-GPSK protocol tests""" def gpsk_handler(ctx, req): logger.info("gpsk_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_GPSK) idx += 1 if ctx['num'] == idx: logger.info("Test: Unknown opcode") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_GPSK, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected GPSK-3") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Too short GPSK-1") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Truncated ID_Server") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Missing RAND_Server") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Missing CSuite_List") return struct.pack(">BBHBBH8L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Truncated CSuite_List") return struct.pack(">BBHBBH8LH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Empty CSuite_List") return struct.pack(">BBHBBH8LH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Invalid CSuite_List") return struct.pack(">BBHBBH8LHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 1, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 No supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected GPSK-1") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite but too short key") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short GPSK-3") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Mismatch in RAND_Peer") return struct.pack(">BBHBB8L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3, 0, 0, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Missing RAND_Server") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[14:46] return msg idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Mismatch in RAND_Server") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32 + 32, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[14:46] msg += struct.pack(">8L", 1, 1, 1, 1, 1, 1, 1, 1) return msg idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Missing ID_Server") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32 + 32, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[14:46] msg += struct.pack(">8L", 0, 0, 0, 0, 0, 0, 0, 0) return msg idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Truncated ID_Server") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32 + 32 + 2, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[14:46] msg += struct.pack(">8LH", 0, 0, 0, 0, 0, 0, 0, 0, 1) return msg idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Mismatch in ID_Server") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32 + 32 + 3, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[14:46] msg += struct.pack(">8LHB", 0, 0, 0, 0, 0, 0, 0, 0, 1, ord('B')) return msg idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBHB8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 3 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 1, ord('A'), 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Mismatch in ID_Server (same length)") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32 + 32 + 3, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[15:47] msg += struct.pack(">8LHB", 0, 0, 0, 0, 0, 0, 0, 0, 1, ord('B')) return msg idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Missing CSuite_Sel") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32 + 32 + 2, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[14:46] msg += struct.pack(">8LH", 0, 0, 0, 0, 0, 0, 0, 0, 0) return msg idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Mismatch in CSuite_Sel") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[14:46] msg += struct.pack(">8LHLH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2) return msg idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Missing len(PD_Payload_Block)") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[14:46] msg += struct.pack(">8LHLH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) return msg idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Truncated PD_Payload_Block") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32 + 32 + 2 + 6 + 2, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[14:46] msg += struct.pack(">8LHLHH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) return msg idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Missing MAC") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32 + 32 + 2 + 6 + 3, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[14:46] msg += struct.pack(">8LHLHHB", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 123) return msg idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-1 Supported CSuite") return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 32 + 2 + 6, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: GPSK-3 Incorrect MAC") msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 32 + 32 + 2 + 6 + 3 + 16, EAP_TYPE_GPSK, EAP_GPSK_OPCODE_GPSK_3) msg += req[14:46] msg += struct.pack(">8LHLHHB4L", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 123, 0, 0, 0, 0) return msg return None srv = start_radius_server(gpsk_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(0, 27): if i == 12: pw = "short" else: pw = "abcdefghijklmnop0123456789abcdef" dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="GPSK", identity="user", password=pw, wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.05) dev[0].request("REMOVE_NETWORK all") finally: stop_radius_server(srv) def run_eap_gpsk_connect(dev): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="GPSK", identity="gpsk user", password="abcdefghijklmnop0123456789abcdef", wait_connect=False) ev = dev.wait_event(["CTRL-EVENT-EAP-SUCCESS", "CTRL-EVENT-EAP-FAILURE", "CTRL-EVENT-DISCONNECTED"], timeout=1) dev.request("REMOVE_NETWORK all") if not ev or "CTRL-EVENT-DISCONNECTED" not in ev: dev.wait_disconnected() dev.dump_monitor() def test_eap_proto_gpsk_errors_server(dev, apdev): """EAP-GPSK local error cases on server""" check_eap_capa(dev[0], "GPSK") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) tests = [(1, "eap_gpsk_init"), (1, "eap_msg_alloc;eap_gpsk_build_gpsk_1"), (1, "eap_msg_alloc;eap_gpsk_build_gpsk_3"), (1, "eap_gpsk_process_gpsk_2"), (1, "eap_gpsk_derive_keys;eap_gpsk_process_gpsk_2"), (1, "eap_gpsk_derive_session_id;eap_gpsk_process_gpsk_2"), (1, "eap_gpsk_getKey"), (1, "eap_gpsk_get_emsk"), (1, "eap_gpsk_get_session_id")] for count, func in tests: with alloc_fail(hapd, count, func): run_eap_gpsk_connect(dev[0]) tests = [(1, "os_get_random;eap_gpsk_build_gpsk_1"), (1, "eap_gpsk_compute_mic;eap_gpsk_build_gpsk_3"), (1, "eap_gpsk_derive_keys;eap_gpsk_process_gpsk_2"), (1, "eap_gpsk_derive_session_id;eap_gpsk_process_gpsk_2"), (1, "eap_gpsk_compute_mic;eap_gpsk_process_gpsk_2"), (1, "eap_gpsk_compute_mic;eap_gpsk_process_gpsk_4")] for count, func in tests: with fail_test(hapd, count, func): run_eap_gpsk_connect(dev[0]) def start_gpsk_assoc(dev, hapd): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="GPSK", identity="gpsk user", password="abcdefghijklmnop0123456789abcdef", wait_connect=False) proxy_msg(hapd, dev) # EAP-Identity/Request proxy_msg(dev, hapd) # EAP-Identity/Response proxy_msg(hapd, dev) # GPSK-1 def stop_gpsk_assoc(dev, hapd): dev.request("REMOVE_NETWORK all") dev.wait_disconnected() dev.dump_monitor() hapd.dump_monitor() def test_eap_proto_gpsk_server(dev, apdev): """EAP-GPSK protocol testing for the server""" check_eap_capa(dev[0], "GPSK") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) hapd.request("SET ext_eapol_frame_io 1") dev[0].request("SET ext_eapol_frame_io 1") # Successful exchange to verify proxying mechanism start_gpsk_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # GPSK-2 proxy_msg(hapd, dev[0]) # GPSK-3 proxy_msg(dev[0], hapd) # GPSK-4 proxy_msg(hapd, dev[0]) # EAP-Success proxy_msg(hapd, dev[0]) # EAPOL-Key msg 1/4 proxy_msg(dev[0], hapd) # EAPOL-Key msg 2/4 proxy_msg(hapd, dev[0]) # EAPOL-Key msg 3/4 proxy_msg(dev[0], hapd) # EAPOL-Key msg 4/4 dev[0].wait_connected() stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short EAP-GPSK header (no OP-Code) # --> EAP-GPSK: Invalid frame msg = resp[0:4] + "0005" + resp[8:12] + "0005" + "33" tx_msg(dev[0], hapd, msg) # Unknown OP-Code # --> EAP-GPSK: Unexpected opcode=7 in state=0 msg = resp[0:4] + "0006" + resp[8:12] + "0006" + "3307" tx_msg(dev[0], hapd, msg) # Too short GPSK-2 # --> EAP-GPSK: Too short message for ID_Peer length msg = resp[0:4] + "0006" + resp[8:12] + "0006" + "3302" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: Too short message for ID_Peer msg = resp[0:4] + "0008" + resp[8:12] + "0008" + "33020001" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: Too short message for ID_Server length msg = resp[0:4] + "0008" + resp[8:12] + "0008" + "33020000" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: Too short message for ID_Server msg = resp[0:4] + "000a" + resp[8:12] + "000a" + "330200000001" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # ID_Server mismatch # --> EAP-GPSK: ID_Server in GPSK-1 and GPSK-2 did not match msg = resp[0:4] + "000a" + resp[8:12] + "000a" + "330200000000" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: Too short message for RAND_Peer msg = resp[0:4] + "0011" + resp[8:12] + "0011" + "330200000007" + binascii.hexlify(b"hostapd").decode() tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: Too short message for RAND_Server msg = resp[0:4] + "0031" + resp[8:12] + "0031" + "330200000007" + binascii.hexlify(b"hostapd").decode() + 32*"00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # RAND_Server mismatch # --> EAP-GPSK: RAND_Server in GPSK-1 and GPSK-2 did not match msg = resp[0:4] + "0051" + resp[8:12] + "0051" + "330200000007" + binascii.hexlify(b"hostapd").decode() + 32*"00" + 32*"00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: Too short message for CSuite_List length msg = resp[0:4] + "005a" + resp[8:12] + "005a" + resp[16:188] tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: Too short message for CSuite_List msg = resp[0:4] + "005c" + resp[8:12] + "005c" + resp[16:192] tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: CSuite_List in GPSK-1 and GPSK-2 did not match msg = resp[0:4] + "005c" + resp[8:12] + "005c" + resp[16:188] + "0000" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: Too short message for CSuite_Sel msg = resp[0:4] + "0068" + resp[8:12] + "0068" + resp[16:216] tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Unsupported CSuite_Sel # --> EAP-GPSK: Peer selected unsupported ciphersuite 0:255 msg = resp[0:4] + "006e" + resp[8:12] + "006e" + resp[16:226] + "ff" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: Too short message for PD_Payload_1 length msg = resp[0:4] + "006e" + resp[8:12] + "006e" + resp[16:228] tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: Too short message for PD_Payload_1 msg = resp[0:4] + "0070" + resp[8:12] + "0070" + resp[16:230] + "ff" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short GPSK-2 # --> EAP-GPSK: Message too short for MIC (left=0 miclen=16) msg = resp[0:4] + "0070" + resp[8:12] + "0070" + resp[16:232] tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Extra data in the end of GPSK-2 # --> EAP-GPSK: Ignored 1 bytes of extra data in the end of GPSK-2 msg = resp[0:4] + "0081" + resp[8:12] + "0081" + resp[16:264] + "00" tx_msg(dev[0], hapd, msg) proxy_msg(hapd, dev[0]) # GPSK-3 resp = rx_msg(dev[0]) # Too short GPSK-4 # --> EAP-GPSK: Too short message for PD_Payload_1 length msg = resp[0:4] + "0006" + resp[8:12] + "0006" + "3304" tx_msg(dev[0], hapd, msg) rx_msg(hapd) # EAP-Failure stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # GPSK-2 proxy_msg(hapd, dev[0]) # GPSK-3 resp = rx_msg(dev[0]) # Too short GPSK-4 # --> EAP-GPSK: Too short message for PD_Payload_1 msg = resp[0:4] + "0008" + resp[8:12] + "0008" + "33040001" tx_msg(dev[0], hapd, msg) rx_msg(hapd) # EAP-Failure stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # GPSK-2 proxy_msg(hapd, dev[0]) # GPSK-3 resp = rx_msg(dev[0]) # Too short GPSK-4 # --> EAP-GPSK: Message too short for MIC (left=0 miclen=16) msg = resp[0:4] + "0008" + resp[8:12] + "0008" + "33040000" tx_msg(dev[0], hapd, msg) rx_msg(hapd) # EAP-Failure stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # GPSK-2 proxy_msg(hapd, dev[0]) # GPSK-3 resp = rx_msg(dev[0]) # Incorrect MIC in GPSK-4 # --> EAP-GPSK: Incorrect MIC in GPSK-4 msg = resp[0:4] + "0018" + resp[8:12] + "0018" + "33040000" + 16*"00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) # EAP-Failure stop_gpsk_assoc(dev[0], hapd) start_gpsk_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # GPSK-2 proxy_msg(hapd, dev[0]) # GPSK-3 resp = rx_msg(dev[0]) # Incorrect MIC in GPSK-4 # --> EAP-GPSK: Ignored 1 bytes of extra data in the end of GPSK-4 msg = resp[0:4] + "0019" + resp[8:12] + "0019" + resp[16:] + "00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) # EAP-Success stop_gpsk_assoc(dev[0], hapd) EAP_EKE_ID = 1 EAP_EKE_COMMIT = 2 EAP_EKE_CONFIRM = 3 EAP_EKE_FAILURE = 4 def test_eap_proto_eke(dev, apdev): """EAP-EKE protocol tests""" def eke_handler(ctx, req): logger.info("eke_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_EKE) idx += 1 if ctx['num'] == idx: logger.info("Test: Unknown exchange") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_EKE, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: No NumProposals in EAP-EKE-ID/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_EKE, EAP_EKE_ID) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: NumProposals=0 in EAP-EKE-ID/Request") return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 1, EAP_TYPE_EKE, EAP_EKE_ID, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Truncated Proposals list in EAP-EKE-ID/Request") return struct.pack(">BBHBBBB4B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 4, EAP_TYPE_EKE, EAP_EKE_ID, 2, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unsupported proposals in EAP-EKE-ID/Request") return struct.pack(">BBHBBBB4B4B4B4B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 4 * 4, EAP_TYPE_EKE, EAP_EKE_ID, 4, 0, 0, 0, 0, 0, 3, 0, 0, 0, 3, 1, 0, 0, 3, 1, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Missing IDType/Identity in EAP-EKE-ID/Request") return struct.pack(">BBHBBBB4B4B4B4B4B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 5 * 4, EAP_TYPE_EKE, EAP_EKE_ID, 5, 0, 0, 0, 0, 0, 3, 0, 0, 0, 3, 1, 0, 0, 3, 1, 1, 0, 3, 1, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid EAP-EKE-ID/Request") return struct.pack(">BBHBBBB4BB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 4 + 1, EAP_TYPE_EKE, EAP_EKE_ID, 1, 0, 3, 1, 1, 1, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected EAP-EKE-ID/Request") return struct.pack(">BBHBBBB4BB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 4 + 1, EAP_TYPE_EKE, EAP_EKE_ID, 1, 0, 3, 1, 1, 1, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid EAP-EKE-ID/Request") return struct.pack(">BBHBBBB4BB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 4 + 1, EAP_TYPE_EKE, EAP_EKE_ID, 1, 0, 3, 1, 1, 1, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected EAP-EKE-Confirm/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_EKE, EAP_EKE_CONFIRM) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short EAP-EKE-Failure/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_EKE, EAP_EKE_FAILURE) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected EAP-EKE-Commit/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_EKE, EAP_EKE_COMMIT) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid EAP-EKE-ID/Request") return struct.pack(">BBHBBBB4BB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 4 + 1, EAP_TYPE_EKE, EAP_EKE_ID, 1, 0, 3, 1, 1, 1, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short EAP-EKE-Commit/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_EKE, EAP_EKE_COMMIT) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid EAP-EKE-ID/Request") return struct.pack(">BBHBBBB4BB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 4 + 1, EAP_TYPE_EKE, EAP_EKE_ID, 1, 0, 1, 1, 1, 1, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: All zeroes DHComponent_S and empty CBvalue in EAP-EKE-Commit/Request") return struct.pack(">BBHBB4L32L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16 + 128, EAP_TYPE_EKE, EAP_EKE_COMMIT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short EAP-EKE-Confirm/Request") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_EKE, EAP_EKE_CONFIRM) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid EAP-EKE-ID/Request") return struct.pack(">BBHBBBB4BB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2 + 4 + 1, EAP_TYPE_EKE, EAP_EKE_ID, 1, 0, 1, 1, 1, 1, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: All zeroes DHComponent_S and empty CBvalue in EAP-EKE-Commit/Request") return struct.pack(">BBHBB4L32L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16 + 128, EAP_TYPE_EKE, EAP_EKE_COMMIT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid PNonce_PS and Auth_S values in EAP-EKE-Confirm/Request") return struct.pack(">BBHBB4L8L5L5L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16 + 2 * 16 + 20 + 20, EAP_TYPE_EKE, EAP_EKE_CONFIRM, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) return None srv = start_radius_server(eke_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(0, 14): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="EKE", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") if i in [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]: ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("Timeout on EAP failure") else: time.sleep(0.05) dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() finally: stop_radius_server(srv) def eap_eke_test_fail(dev, phase1=None, success=False): dev.connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="EKE", identity="eke user@domain", password="hello", phase1=phase1, erp="1", wait_connect=False) ev = dev.wait_event(["CTRL-EVENT-EAP-FAILURE", "CTRL-EVENT-EAP-SUCCESS"], timeout=5) if ev is None: raise Exception("Timeout on EAP failure") if not success and "CTRL-EVENT-EAP-FAILURE" not in ev: raise Exception("EAP did not fail during failure test") dev.request("REMOVE_NETWORK all") dev.wait_disconnected() def test_eap_proto_eke_errors(dev, apdev): """EAP-EKE local error cases""" check_eap_capa(dev[0], "EKE") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(1, 3): with alloc_fail(dev[0], i, "eap_eke_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="EKE", identity="eke user", password="hello", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "eap_eke_dh_init", None), (1, "eap_eke_prf_hmac_sha1", "dhgroup=3 encr=1 prf=1 mac=1"), (1, "eap_eke_prf_hmac_sha256", "dhgroup=5 encr=1 prf=2 mac=2"), (1, "eap_eke_prf", None), (1, "os_get_random;eap_eke_dhcomp", None), (1, "aes_128_cbc_encrypt;eap_eke_dhcomp", None), (1, "aes_128_cbc_decrypt;eap_eke_shared_secret", None), (1, "eap_eke_prf;eap_eke_shared_secret", None), (1, "eap_eke_prfplus;eap_eke_derive_ke_ki", None), (1, "eap_eke_prfplus;eap_eke_derive_ka", None), (1, "eap_eke_prfplus;eap_eke_derive_msk", None), (1, "os_get_random;eap_eke_prot", None), (1, "aes_128_cbc_decrypt;eap_eke_decrypt_prot", None), (1, "eap_eke_derive_key;eap_eke_process_commit", None), (1, "eap_eke_dh_init;eap_eke_process_commit", None), (1, "eap_eke_shared_secret;eap_eke_process_commit", None), (1, "eap_eke_derive_ke_ki;eap_eke_process_commit", None), (1, "eap_eke_dhcomp;eap_eke_process_commit", None), (1, "os_get_random;eap_eke_process_commit", None), (1, "os_get_random;=eap_eke_process_commit", None), (1, "eap_eke_prot;eap_eke_process_commit", None), (1, "eap_eke_decrypt_prot;eap_eke_process_confirm", None), (1, "eap_eke_derive_ka;eap_eke_process_confirm", None), (1, "eap_eke_auth;eap_eke_process_confirm", None), (2, "eap_eke_auth;eap_eke_process_confirm", None), (1, "eap_eke_prot;eap_eke_process_confirm", None), (1, "eap_eke_derive_msk;eap_eke_process_confirm", None)] for count, func, phase1 in tests: with fail_test(dev[0], count, func): eap_eke_test_fail(dev[0], phase1) tests = [(1, "=eap_eke_derive_ke_ki", None), (1, "=eap_eke_derive_ka", None), (1, "=eap_eke_derive_msk", None), (1, "eap_eke_build_msg;eap_eke_process_id", None), (1, "wpabuf_alloc;eap_eke_process_id", None), (1, "=eap_eke_process_id", None), (1, "wpabuf_alloc;=eap_eke_process_id", None), (1, "wpabuf_alloc;eap_eke_process_id", None), (1, "eap_eke_build_msg;eap_eke_process_commit", None), (1, "wpabuf_resize;eap_eke_process_commit", None), (1, "eap_eke_build_msg;eap_eke_process_confirm", None)] for count, func, phase1 in tests: with alloc_fail(dev[0], count, func): eap_eke_test_fail(dev[0], phase1) tests = [(1, "eap_eke_getKey", None), (1, "eap_eke_get_emsk", None), (1, "eap_eke_get_session_id", None)] for count, func, phase1 in tests: with alloc_fail(dev[0], count, func): eap_eke_test_fail(dev[0], phase1, success=True) EAP_PAX_OP_STD_1 = 0x01 EAP_PAX_OP_STD_2 = 0x02 EAP_PAX_OP_STD_3 = 0x03 EAP_PAX_OP_SEC_1 = 0x11 EAP_PAX_OP_SEC_2 = 0x12 EAP_PAX_OP_SEC_3 = 0x13 EAP_PAX_OP_SEC_4 = 0x14 EAP_PAX_OP_SEC_5 = 0x15 EAP_PAX_OP_ACK = 0x21 EAP_PAX_FLAGS_MF = 0x01 EAP_PAX_FLAGS_CE = 0x02 EAP_PAX_FLAGS_AI = 0x04 EAP_PAX_MAC_HMAC_SHA1_128 = 0x01 EAP_PAX_HMAC_SHA256_128 = 0x02 EAP_PAX_DH_GROUP_NONE = 0x00 EAP_PAX_DH_GROUP_2048_MODP = 0x01 EAP_PAX_DH_GROUP_3072_MODP = 0x02 EAP_PAX_DH_GROUP_NIST_ECC_P_256 = 0x03 EAP_PAX_PUBLIC_KEY_NONE = 0x00 EAP_PAX_PUBLIC_KEY_RSAES_OAEP = 0x01 EAP_PAX_PUBLIC_KEY_RSA_PKCS1_V1_5 = 0x02 EAP_PAX_PUBLIC_KEY_EL_GAMAL_NIST_ECC = 0x03 EAP_PAX_ADE_VENDOR_SPECIFIC = 0x01 EAP_PAX_ADE_CLIENT_CHANNEL_BINDING = 0x02 EAP_PAX_ADE_SERVER_CHANNEL_BINDING = 0x03 def test_eap_proto_pax(dev, apdev): """EAP-PAX protocol tests""" def pax_std_1(ctx): logger.info("Test: STD-1") ctx['id'] = 10 return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 2 + 32 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0x16, 0xc9, 0x08, 0x9d, 0x98, 0xa5, 0x6e, 0x1f, 0xf0, 0xac, 0xcf, 0xc4, 0x66, 0xcd, 0x2d, 0xbf) def pax_handler(ctx, req): logger.info("pax_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_PAX) idx += 1 if ctx['num'] == idx: logger.info("Test: Minimum length payload") return struct.pack(">BBHB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 16, EAP_TYPE_PAX, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Unsupported MAC ID") return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, 255, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Unsupported DH Group ID") return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128, 255, EAP_PAX_PUBLIC_KEY_NONE, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Unsupported Public Key ID") return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, 255, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: More fragments") return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, EAP_PAX_FLAGS_MF, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid ICV") return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid ICV in short frame") return struct.pack(">BBHBBBBBB3L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 12, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Correct ICV - unsupported op_code") ctx['id'] = 10 return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 16, EAP_TYPE_PAX, 255, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 0x90, 0x78, 0x97, 0x38, 0x29, 0x94, 0x32, 0xd4, 0x81, 0x27, 0xe0, 0xf6, 0x3b, 0x0d, 0xb2, 0xb2) idx += 1 if ctx['num'] == idx: logger.info("Test: Correct ICV - CE flag in STD-1") ctx['id'] = 10 return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, EAP_PAX_FLAGS_CE, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 0x9c, 0x98, 0xb4, 0x0b, 0x94, 0x90, 0xde, 0x88, 0xb7, 0x72, 0x63, 0x44, 0x1d, 0xe3, 0x7c, 0x5c) idx += 1 if ctx['num'] == idx: logger.info("Test: Correct ICV - too short STD-1 payload") ctx['id'] = 10 return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 0xda, 0xab, 0x2c, 0xe7, 0x84, 0x41, 0xb5, 0x5c, 0xee, 0xcf, 0x62, 0x03, 0xc5, 0x69, 0xcb, 0xf4) idx += 1 if ctx['num'] == idx: logger.info("Test: Correct ICV - incorrect A length in STD-1") ctx['id'] = 10 return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 2 + 32 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xc4, 0xb0, 0x81, 0xe4, 0x6c, 0x8c, 0x20, 0x23, 0x60, 0x46, 0x89, 0xea, 0x94, 0x60, 0xf3, 0x2a) idx += 1 if ctx['num'] == idx: logger.info("Test: Correct ICV - extra data in STD-1") ctx['id'] = 10 return struct.pack(">BBHBBBBBBH8LB16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 2 + 32 + 1 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 32, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0x61, 0x49, 0x65, 0x37, 0x21, 0xe8, 0xd8, 0xbf, 0xf3, 0x02, 0x01, 0xe5, 0x42, 0x51, 0xd3, 0x34) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected STD-1") return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 2 + 32 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0xe5, 0x1d, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba, 0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d) idx += 1 if ctx['num'] == idx: return pax_std_1(ctx) idx += 1 if ctx['num'] == idx: logger.info("Test: MAC ID changed during session") return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 2 + 32 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_HMAC_SHA256_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0xee, 0x00, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba, 0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d) idx += 1 if ctx['num'] == idx: return pax_std_1(ctx) idx += 1 if ctx['num'] == idx: logger.info("Test: DH Group ID changed during session") return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 2 + 32 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_2048_MODP, EAP_PAX_PUBLIC_KEY_NONE, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0xee, 0x01, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba, 0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d) idx += 1 if ctx['num'] == idx: return pax_std_1(ctx) idx += 1 if ctx['num'] == idx: logger.info("Test: Public Key ID changed during session") return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 2 + 32 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_RSAES_OAEP, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0xee, 0x02, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba, 0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected STD-3") ctx['id'] = 10 return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 2 + 32 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_3, 0, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0x47, 0xbb, 0xc0, 0xf9, 0xb9, 0x69, 0xf5, 0xcb, 0x3a, 0xe8, 0xe7, 0xd6, 0x80, 0x28, 0xf2, 0x59) idx += 1 if ctx['num'] == idx: return pax_std_1(ctx) idx += 1 if ctx['num'] == idx: # TODO: MAC calculation; for now, this gets dropped due to incorrect # ICV logger.info("Test: STD-3 with CE flag") return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 5 + 2 + 32 + 16, EAP_TYPE_PAX, EAP_PAX_OP_STD_3, EAP_PAX_FLAGS_CE, EAP_PAX_MAC_HMAC_SHA1_128, EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0x8a, 0xc2, 0xf9, 0xf4, 0x8b, 0x75, 0x72, 0xa2, 0x4d, 0xd3, 0x1e, 0x54, 0x77, 0x04, 0x05, 0xe2) idx += 1 if ctx['num'] & 0x1 == idx & 0x1: logger.info("Test: Default request") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_PAX) else: logger.info("Test: Default EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(pax_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(0, 18): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PAX", identity="user", password_hex="0123456789abcdef0123456789abcdef", wait_connect=False) logger.info("Waiting for EAP method to start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.05) dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() logger.info("Too short password") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PAX", identity="user", password_hex="0123456789abcdef0123456789abcd", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() logger.info("No password") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PAX", identity="user", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() finally: stop_radius_server(srv) def test_eap_proto_pax_errors(dev, apdev): """EAP-PAX local error cases""" check_eap_capa(dev[0], "PAX") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(1, 3): with alloc_fail(dev[0], i, "eap_pax_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PAX", identity="pax.user@example.com", password_hex="0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = ["eap_msg_alloc;eap_pax_alloc_resp;eap_pax_process_std_1", "eap_msg_alloc;eap_pax_alloc_resp;eap_pax_process_std_3", "eap_pax_getKey", "eap_pax_get_emsk", "eap_pax_get_session_id"] for func in tests: with alloc_fail(dev[0], 1, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PAX", identity="pax.user@example.com", password_hex="0123456789abcdef0123456789abcdef", erp="1", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "os_get_random;eap_pax_process_std_1"), (1, "eap_pax_initial_key_derivation"), (1, "eap_pax_mac;eap_pax_process_std_3"), (2, "eap_pax_mac;eap_pax_process_std_3"), (1, "eap_pax_kdf;eap_pax_getKey"), (1, "eap_pax_kdf;eap_pax_get_emsk")] for count, func in tests: with fail_test(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PAX", identity="pax.user@example.com", password_hex="0123456789abcdef0123456789abcdef", erp="1", wait_connect=False) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() def run_eap_pax_connect(dev): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="PAX", identity="pax.user@example.com", password_hex="0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev.wait_event(["CTRL-EVENT-EAP-SUCCESS", "CTRL-EVENT-EAP-FAILURE", "CTRL-EVENT-DISCONNECTED"], timeout=1) dev.request("REMOVE_NETWORK all") if not ev or "CTRL-EVENT-DISCONNECTED" not in ev: dev.wait_disconnected() dev.dump_monitor() def test_eap_proto_pax_errors_server(dev, apdev): """EAP-PAX local error cases on server""" check_eap_capa(dev[0], "PAX") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) tests = [(1, "eap_pax_init"), (1, "eap_msg_alloc;eap_pax_build_std_1"), (1, "eap_msg_alloc;eap_pax_build_std_3"), (1, "=eap_pax_process_std_2"), (1, "eap_pax_getKey"), (1, "eap_pax_get_emsk"), (1, "eap_pax_get_session_id")] for count, func in tests: with alloc_fail(hapd, count, func): run_eap_pax_connect(dev[0]) tests = [(1, "os_get_random;eap_pax_build_std_1"), (1, "eap_pax_mac;eap_pax_build_std_1"), (1, "eap_pax_mac;eap_pax_build_std_3"), (2, "eap_pax_mac;=eap_pax_build_std_3"), (1, "eap_pax_initial_key_derivation;eap_pax_process_std_2"), (1, "eap_pax_mac;eap_pax_process_std_2"), (2, "eap_pax_mac;=eap_pax_process_std_2"), (1, "eap_pax_mac;eap_pax_check")] for count, func in tests: with fail_test(hapd, count, func): run_eap_pax_connect(dev[0]) def start_pax_assoc(dev, hapd): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="PAX", identity="pax.user@example.com", password_hex="0123456789abcdef0123456789abcdef", wait_connect=False) proxy_msg(hapd, dev) # EAP-Identity/Request proxy_msg(dev, hapd) # EAP-Identity/Response proxy_msg(hapd, dev) # PAX_STD-1 def stop_pax_assoc(dev, hapd): dev.request("REMOVE_NETWORK all") dev.wait_disconnected() dev.dump_monitor() hapd.dump_monitor() def test_eap_proto_pax_server(dev, apdev): """EAP-PAX protocol testing for the server""" check_eap_capa(dev[0], "PAX") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) hapd.request("SET ext_eapol_frame_io 1") dev[0].request("SET ext_eapol_frame_io 1") # Successful exchange to verify proxying mechanism start_pax_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # PAX_STD-2 proxy_msg(hapd, dev[0]) # PAX_STD-3 proxy_msg(dev[0], hapd) # PAX-ACK proxy_msg(hapd, dev[0]) # EAP-Success proxy_msg(hapd, dev[0]) # EAPOL-Key msg 1/4 proxy_msg(dev[0], hapd) # EAPOL-Key msg 2/4 proxy_msg(hapd, dev[0]) # EAPOL-Key msg 3/4 proxy_msg(dev[0], hapd) # EAPOL-Key msg 4/4 dev[0].wait_connected() stop_pax_assoc(dev[0], hapd) start_pax_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short EAP-PAX header (no OP-Code) hapd.note("EAP-PAX: Invalid frame") msg = resp[0:4] + "0005" + resp[8:12] + "0005" + "2e" tx_msg(dev[0], hapd, msg) # Too short EAP-PAX message (no payload) hapd.note("EAP-PAX: Invalid frame") msg = resp[0:4] + "000a" + resp[8:12] + "000a" + "2e1100000000" tx_msg(dev[0], hapd, msg) # Unexpected PAX_SEC-2 hapd.note("EAP-PAX: Expected PAX_STD-2 - ignore op 17") msg = resp[0:4] + "001a" + resp[8:12] + "001a" + "2e1100000000" + 16*"00" tx_msg(dev[0], hapd, msg) # Unexpected MAC ID hapd.note("EAP-PAX: Expected MAC ID 0x1, received 0xff") msg = resp[0:4] + "001a" + resp[8:12] + "001a" + "2e0200ff0000" + 16*"00" tx_msg(dev[0], hapd, msg) # Unexpected DH Group ID hapd.note("EAP-PAX: Expected DH Group ID 0x0, received 0xff") msg = resp[0:4] + "001a" + resp[8:12] + "001a" + "2e020001ff00" + 16*"00" tx_msg(dev[0], hapd, msg) # Unexpected Public Key ID hapd.note("EAP-PAX: Expected Public Key ID 0x0, received 0xff") msg = resp[0:4] + "001a" + resp[8:12] + "001a" + "2e02000100ff" + 16*"00" tx_msg(dev[0], hapd, msg) # Unsupported Flags - MF hapd.note("EAP-PAX: fragmentation not supported") msg = resp[0:4] + "001a" + resp[8:12] + "001a" + "2e0201010000" + 16*"00" tx_msg(dev[0], hapd, msg) # Unsupported Flags - CE hapd.note("EAP-PAX: Unexpected CE flag") msg = resp[0:4] + "001a" + resp[8:12] + "001a" + "2e0202010000" + 16*"00" tx_msg(dev[0], hapd, msg) # Too short Payload in PAX_STD-2 hapd.note("EAP-PAX: Too short PAX_STD-2 (B)") msg = resp[0:4] + "001a" + resp[8:12] + "001a" + "2e0200010000" + 16*"00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_pax_assoc(dev[0], hapd) start_pax_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short Payload in PAX_STD-2 hapd.note("EAP-PAX: Too short PAX_STD-2 (CID)") msg = resp[0:4] + "002c" + resp[8:12] + "002c" + "2e0200010000" + "0020" + 32*"00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_pax_assoc(dev[0], hapd) start_pax_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short Payload in PAX_STD-2 hapd.note("EAP-PAX: Too short PAX_STD-2 (CID)") msg = resp[0:4] + "002e" + resp[8:12] + "002e" + "2e0200010000" + "0020" + 32*"00" + "ffff" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_pax_assoc(dev[0], hapd) start_pax_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too long CID in PAX_STD-2 hapd.note("EAP-PAX: Too long CID") msg = resp[0:4] + "062e" + resp[8:12] + "062e" + "2e0200010000" + "0020" + 32*"00" + "0600" + 1536*"00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_pax_assoc(dev[0], hapd) start_pax_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short Payload in PAX_STD-2 hapd.note("EAP-PAX: Too short PAX_STD-2 (MAC_CK)") msg = resp[0:4] + "003c" + resp[8:12] + "003c" + "2e0200010000" + "0020" + 32*"00" + 16*"00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_pax_assoc(dev[0], hapd) start_pax_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Unknown CID for PAX hapd.note("EAP-PAX: EAP-PAX not enabled for CID") msg = resp[0:4] + "0041" + resp[8:12] + "0041" + "2e0200010000" + "0020" + 32*"00" + "0001" + "00" + "0010" + 16*"00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_pax_assoc(dev[0], hapd) start_pax_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short ICV hapd.note("EAP-PAX: Too short ICV (15) in PAX_STD-2") msg = resp[0:4] + "0063" + resp[8:12] + "0063" + resp[16:206] tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_pax_assoc(dev[0], hapd) start_pax_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # PAX_STD-2 proxy_msg(hapd, dev[0]) # PAX_STD-3 resp = rx_msg(dev[0]) # Unexpected PAX_STD-2 hapd.note("EAP-PAX: Expected PAX-ACK - ignore op 1") msg = resp[0:4] + "001a" + resp[8:12] + "001a" + "2e0100000000" + 16*"00" tx_msg(dev[0], hapd, msg) stop_pax_assoc(dev[0], hapd) def test_eap_proto_psk(dev, apdev): """EAP-PSK protocol tests""" def psk_handler(ctx, req): logger.info("psk_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_PSK) idx += 1 if ctx['num'] == idx: logger.info("Test: Non-zero T in first message") return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16, EAP_TYPE_PSK, 0xc0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid first message") return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16, EAP_TYPE_PSK, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short third message") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_PSK) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid first message") return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16, EAP_TYPE_PSK, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Incorrect T in third message") return struct.pack(">BBHBB4L4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16 + 16, EAP_TYPE_PSK, 0, 0, 0, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid first message") return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16, EAP_TYPE_PSK, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Missing PCHANNEL in third message") return struct.pack(">BBHBB4L4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16 + 16, EAP_TYPE_PSK, 0x80, 0, 0, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid first message") return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16, EAP_TYPE_PSK, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalic MAC_S in third message") return struct.pack(">BBHBB4L4L5LB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16 + 16 + 21, EAP_TYPE_PSK, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid first message") return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 16, EAP_TYPE_PSK, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) return None srv = start_radius_server(psk_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(0, 6): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PSK", identity="user", password_hex="0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") logger.info("Test: Invalid PSK length") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PSK", identity="user", password_hex="0123456789abcdef0123456789abcd", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") finally: stop_radius_server(srv) def test_eap_proto_psk_errors(dev, apdev): """EAP-PSK local error cases""" check_eap_capa(dev[0], "PSK") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(1, 3): with alloc_fail(dev[0], i, "eap_psk_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PSK", identity="psk.user@example.com", password_hex="0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() for i in range(1, 4): with fail_test(dev[0], i, "eap_psk_key_setup;eap_psk_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PSK", identity="psk.user@example.com", password_hex="0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "=eap_psk_process_1"), (2, "=eap_psk_process_1"), (1, "eap_msg_alloc;eap_psk_process_1"), (1, "=eap_psk_process_3"), (2, "=eap_psk_process_3"), (1, "eap_msg_alloc;eap_psk_process_3"), (1, "eap_psk_getKey"), (1, "eap_psk_get_session_id"), (1, "eap_psk_get_emsk")] for count, func in tests: with alloc_fail(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PSK", identity="psk.user@example.com", password_hex="0123456789abcdef0123456789abcdef", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_ALLOC_FAIL", note="No allocation failure seen for %d:%s" % (count, func)) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "os_get_random;eap_psk_process_1"), (1, "omac1_aes_128;eap_psk_process_3"), (1, "=omac1_aes_vector;omac1_aes_128;aes_128_eax_encrypt"), (2, "=omac1_aes_vector;omac1_aes_128;aes_128_eax_encrypt"), (3, "=omac1_aes_vector;omac1_aes_128;aes_128_eax_encrypt"), (1, "=omac1_aes_vector;omac1_aes_128;aes_128_eax_decrypt"), (2, "=omac1_aes_vector;omac1_aes_128;aes_128_eax_decrypt"), (3, "=omac1_aes_vector;omac1_aes_128;aes_128_eax_decrypt"), (1, "aes_128_eax_decrypt;eap_psk_process_3"), (2, "aes_128_eax_decrypt;eap_psk_process_3"), (3, "aes_128_eax_decrypt;eap_psk_process_3"), (1, "aes_128_eax_encrypt;eap_psk_process_3"), (2, "aes_128_eax_encrypt;eap_psk_process_3"), (3, "aes_128_eax_encrypt;eap_psk_process_3"), (1, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"), (2, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"), (3, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"), (4, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"), (5, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"), (6, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"), (7, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"), (8, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"), (9, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"), (10, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"), (1, "aes_ctr_encrypt;aes_128_eax_decrypt;eap_psk_process_3"), (1, "aes_ctr_encrypt;aes_128_eax_encrypt;eap_psk_process_3")] for count, func in tests: with fail_test(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PSK", identity="psk.user@example.com", password_hex="0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_FAIL", note="No failure seen for %d:%s" % (count, func)) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].dump_monitor() def run_eap_psk_connect(dev): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="PSK", identity="psk.user@example.com", password_hex="0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev.wait_event(["CTRL-EVENT-EAP-SUCCESS", "CTRL-EVENT-EAP-FAILURE", "CTRL-EVENT-DISCONNECTED"], timeout=1) dev.request("REMOVE_NETWORK all") if not ev or "CTRL-EVENT-DISCONNECTED" not in ev: dev.wait_disconnected() dev.dump_monitor() def test_eap_proto_psk_errors_server(dev, apdev): """EAP-PSK local error cases on server""" check_eap_capa(dev[0], "PSK") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) tests = [(1, "eap_psk_init"), (1, "eap_msg_alloc;eap_psk_build_1"), (1, "eap_msg_alloc;eap_psk_build_3"), (1, "=eap_psk_build_3"), (1, "=eap_psk_process_2"), (2, "=eap_psk_process_2"), (1, "=eap_psk_process_4"), (1, "aes_128_eax_decrypt;eap_psk_process_4"), (1, "eap_psk_getKey"), (1, "eap_psk_get_emsk"), (1, "eap_psk_get_session_id")] for count, func in tests: with alloc_fail(hapd, count, func): run_eap_psk_connect(dev[0]) tests = [(1, "os_get_random;eap_psk_build_1"), (1, "omac1_aes_128;eap_psk_build_3"), (1, "eap_psk_derive_keys;eap_psk_build_3"), (1, "aes_128_eax_encrypt;eap_psk_build_3"), (1, "eap_psk_key_setup;eap_psk_process_2"), (1, "omac1_aes_128;eap_psk_process_2"), (1, "aes_128_eax_decrypt;eap_psk_process_4")] for count, func in tests: with fail_test(hapd, count, func): run_eap_psk_connect(dev[0]) def start_psk_assoc(dev, hapd): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="PSK", identity="psk.user@example.com", password_hex="0123456789abcdef0123456789abcdef", wait_connect=False) proxy_msg(hapd, dev) # EAP-Identity/Request proxy_msg(dev, hapd) # EAP-Identity/Response proxy_msg(hapd, dev) # PSK-1 def stop_psk_assoc(dev, hapd): dev.request("REMOVE_NETWORK all") dev.wait_disconnected() dev.dump_monitor() hapd.dump_monitor() def test_eap_proto_psk_server(dev, apdev): """EAP-PSK protocol testing for the server""" check_eap_capa(dev[0], "PSK") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) hapd.request("SET ext_eapol_frame_io 1") dev[0].request("SET ext_eapol_frame_io 1") # Successful exchange to verify proxying mechanism start_psk_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # PSK-2 proxy_msg(hapd, dev[0]) # PSK-3 proxy_msg(dev[0], hapd) # PSK-4 proxy_msg(hapd, dev[0]) # EAP-Success proxy_msg(hapd, dev[0]) # EAPOL-Key msg 1/4 proxy_msg(dev[0], hapd) # EAPOL-Key msg 2/4 proxy_msg(hapd, dev[0]) # EAPOL-Key msg 3/4 proxy_msg(dev[0], hapd) # EAPOL-Key msg 4/4 dev[0].wait_connected() stop_psk_assoc(dev[0], hapd) start_psk_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short EAP-PSK header (no Flags) hapd.note("EAP-PSK: Invalid frame") msg = resp[0:4] + "0005" + resp[8:12] + "0005" + "2f" tx_msg(dev[0], hapd, msg) # Unexpected PSK-1 hapd.note("EAP-PSK: Expected PSK-2 - ignore T=0") msg = resp[0:4] + "0006" + resp[8:12] + "0006" + "2f00" tx_msg(dev[0], hapd, msg) # Too short PSK-2 hapd.note("EAP-PSK: Too short frame") msg = resp[0:4] + "0006" + resp[8:12] + "0006" + "2f40" tx_msg(dev[0], hapd, msg) # PSK-2 with unknown ID_P hapd.note("EAP-PSK: EAP-PSK not enabled for ID_P") msg = resp[0:4] + "004a" + resp[8:12] + "004a" + "2f40" + 3*16*"00" + 20*"00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) # EAP-Failure stop_psk_assoc(dev[0], hapd) start_psk_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # PSK-2 proxy_msg(hapd, dev[0]) # PSK-3 resp = rx_msg(dev[0]) # Unexpected PSK-2 hapd.note("EAP-PSK: Expected PSK-4 - ignore T=1") msg = resp[0:4] + "0016" + resp[8:12] + "0016" + "2f40" + 16*"00" tx_msg(dev[0], hapd, msg) # Too short PSK-4 (no PCHANNEL) hapd.note("EAP-PSK: Too short PCHANNEL data in PSK-4 (len=0, expected 21)") msg = resp[0:4] + "0016" + resp[8:12] + "0016" + "2fc0" + 16*"00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) # PSK-3 retry stop_psk_assoc(dev[0], hapd) start_psk_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # PSK-2 proxy_msg(hapd, dev[0]) # PSK-3 resp = rx_msg(dev[0]) # PCHANNEL Nonce did not increase hapd.note("EAP-PSK: Nonce did not increase") msg = resp[0:4] + "002b" + resp[8:12] + "002b" + "2fc0" + 16*"00" + 21*"00" tx_msg(dev[0], hapd, msg) rx_msg(hapd) # PSK-3 retry stop_psk_assoc(dev[0], hapd) start_psk_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # PSK-2 proxy_msg(hapd, dev[0]) # PSK-3 resp = rx_msg(dev[0]) # Invalid PCHANNEL encryption hapd.note("EAP-PSK: PCHANNEL decryption failed") msg = resp[0:4] + "002b" + resp[8:12] + "002b" + "2fc0" + 16*"00" + 21*"11" tx_msg(dev[0], hapd, msg) rx_msg(hapd) # PSK-3 retry stop_psk_assoc(dev[0], hapd) EAP_SIM_SUBTYPE_START = 10 EAP_SIM_SUBTYPE_CHALLENGE = 11 EAP_SIM_SUBTYPE_NOTIFICATION = 12 EAP_SIM_SUBTYPE_REAUTHENTICATION = 13 EAP_SIM_SUBTYPE_CLIENT_ERROR = 14 EAP_AKA_SUBTYPE_CHALLENGE = 1 EAP_AKA_SUBTYPE_AUTHENTICATION_REJECT = 2 EAP_AKA_SUBTYPE_SYNCHRONIZATION_FAILURE = 4 EAP_AKA_SUBTYPE_IDENTITY = 5 EAP_AKA_SUBTYPE_NOTIFICATION = 12 EAP_AKA_SUBTYPE_REAUTHENTICATION = 13 EAP_AKA_SUBTYPE_CLIENT_ERROR = 14 EAP_SIM_AT_RAND = 1 EAP_SIM_AT_AUTN = 2 EAP_SIM_AT_RES = 3 EAP_SIM_AT_AUTS = 4 EAP_SIM_AT_PADDING = 6 EAP_SIM_AT_NONCE_MT = 7 EAP_SIM_AT_PERMANENT_ID_REQ = 10 EAP_SIM_AT_MAC = 11 EAP_SIM_AT_NOTIFICATION = 12 EAP_SIM_AT_ANY_ID_REQ = 13 EAP_SIM_AT_IDENTITY = 14 EAP_SIM_AT_VERSION_LIST = 15 EAP_SIM_AT_SELECTED_VERSION = 16 EAP_SIM_AT_FULLAUTH_ID_REQ = 17 EAP_SIM_AT_COUNTER = 19 EAP_SIM_AT_COUNTER_TOO_SMALL = 20 EAP_SIM_AT_NONCE_S = 21 EAP_SIM_AT_CLIENT_ERROR_CODE = 22 EAP_SIM_AT_KDF_INPUT = 23 EAP_SIM_AT_KDF = 24 EAP_SIM_AT_IV = 129 EAP_SIM_AT_ENCR_DATA = 130 EAP_SIM_AT_NEXT_PSEUDONYM = 132 EAP_SIM_AT_NEXT_REAUTH_ID = 133 EAP_SIM_AT_CHECKCODE = 134 EAP_SIM_AT_RESULT_IND = 135 EAP_SIM_AT_BIDDING = 136 def test_eap_proto_aka(dev, apdev): """EAP-AKA protocol tests""" def aka_handler(ctx, req): logger.info("aka_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_AKA) idx += 1 if ctx['num'] == idx: logger.info("Test: Unknown subtype") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_AKA, 255, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Client Error") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CLIENT_ERROR, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short attribute header") return struct.pack(">BBHBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 3, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Truncated attribute") return struct.pack(">BBHBBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short attribute data") return struct.pack(">BBHBBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Skippable/non-skippable unrecognzized attribute") return struct.pack(">BBHBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 10, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255, 1, 0, 127, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request without ID type") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request ANY_ID") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_ANY_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request ANY_ID (duplicate)") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_ANY_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request ANY_ID") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_ANY_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request FULLAUTH_ID") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request FULLAUTH_ID (duplicate)") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request ANY_ID") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_ANY_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request FULLAUTH_ID") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request PERMANENT_ID") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request PERMANENT_ID (duplicate)") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with no attributes") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: AKA Challenge with BIDDING") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_BIDDING, 1, 0x8000) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification with no attributes") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification indicating success, but no MAC") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 32768) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification indicating success, but invalid MAC value") return struct.pack(">BBHBBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 20, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 32768, EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification indicating success with zero-key MAC") return struct.pack(">BBHBBHBBHBBH16B", EAP_CODE_REQUEST, ctx['id'] - 2, 4 + 1 + 3 + 4 + 20, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 32768, EAP_SIM_AT_MAC, 5, 0, 0xbe, 0x2e, 0xbb, 0xa9, 0xfa, 0x2e, 0x82, 0x36, 0x37, 0x8c, 0x32, 0x41, 0xb7, 0xc7, 0x58, 0xa3) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification before auth") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 16384) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification before auth") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 16385) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification with unrecognized non-failure") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 0xc000) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification before auth (duplicate)") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 0xc000) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Re-authentication (unexpected) with no attributes") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_REAUTHENTICATION, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: AKA Challenge with Checkcode claiming identity round was used") return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request ANY_ID") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_ANY_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: AKA Challenge with Checkcode claiming no identity round was used") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_CHECKCODE, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request ANY_ID") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_ANY_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: AKA Challenge with mismatching Checkcode value") return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Re-authentication (unexpected) with Checkcode claimin identity round was used") return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_REAUTHENTICATION, 0, EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_RAND length") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_RAND, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_AUTN length") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_AUTN, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unencrypted AT_PADDING") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_PADDING, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_NONCE_MT length") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_NONCE_MT, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_MAC length") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_MAC, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_NOTIFICATION length") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_NOTIFICATION, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: AT_IDENTITY overflow") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_IDENTITY, 1, 0xffff) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected AT_VERSION_LIST") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_VERSION_LIST, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_SELECTED_VERSION length") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_SELECTED_VERSION, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unencrypted AT_COUNTER") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_COUNTER, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unencrypted AT_COUNTER_TOO_SMALL") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_COUNTER_TOO_SMALL, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unencrypted AT_NONCE_S") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_NONCE_S, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_CLIENT_ERROR_CODE length") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_CLIENT_ERROR_CODE, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_IV length") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_IV, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_ENCR_DATA length") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_ENCR_DATA, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unencrypted AT_NEXT_PSEUDONYM") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_NEXT_PSEUDONYM, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unencrypted AT_NEXT_REAUTH_ID") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_NEXT_REAUTH_ID, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_RES length") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_RES, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_RES length") return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 24, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_RES, 6, 0xffff, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_AUTS length") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_AUTS, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_CHECKCODE length") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_CHECKCODE, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_RESULT_IND length") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_RESULT_IND, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected AT_KDF_INPUT") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_KDF_INPUT, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected AT_KDF") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_KDF, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_BIDDING length") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_BIDDING, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) return None srv = start_radius_server(aka_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(0, 49): eap = "AKA AKA'" if i == 11 else "AKA" dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap=eap, identity="0232010000000000", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") if i in [0, 15]: time.sleep(0.1) else: ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("Timeout on EAP failure") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() finally: stop_radius_server(srv) def test_eap_proto_aka_prime(dev, apdev): """EAP-AKA' protocol tests""" def aka_prime_handler(ctx, req): logger.info("aka_prime_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Missing payload") dev[0].note("Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_AKA_PRIME) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with no attributes") dev[0].note("Challenge with no attributes") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with empty AT_KDF_INPUT") dev[0].note("Challenge with empty AT_KDF_INPUT") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with AT_KDF_INPUT") dev[0].note("Test: Challenge with AT_KDF_INPUT") return struct.pack(">BBHBBHBBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d')) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with duplicated KDF") dev[0].note("Challenge with duplicated KDF") return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 3 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 1, EAP_SIM_AT_KDF, 1, 2, EAP_SIM_AT_KDF, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with multiple KDF proposals") dev[0].note("Challenge with multiple KDF proposals (preparation)") return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 3 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 255, EAP_SIM_AT_KDF, 1, 254, EAP_SIM_AT_KDF, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with incorrect KDF selected") dev[0].note("Challenge with incorrect KDF selected") return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 255, EAP_SIM_AT_KDF, 1, 255, EAP_SIM_AT_KDF, 1, 254, EAP_SIM_AT_KDF, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with multiple KDF proposals") dev[0].note("Challenge with multiple KDF proposals (preparation)") return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 3 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 255, EAP_SIM_AT_KDF, 1, 254, EAP_SIM_AT_KDF, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with selected KDF not duplicated") dev[0].note("Challenge with selected KDF not duplicated") return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 3 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 1, EAP_SIM_AT_KDF, 1, 255, EAP_SIM_AT_KDF, 1, 254) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with multiple KDF proposals") dev[0].note("Challenge with multiple KDF proposals (preparation)") return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 3 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 255, EAP_SIM_AT_KDF, 1, 254, EAP_SIM_AT_KDF, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with selected KDF duplicated (missing MAC, RAND, AUTN)") dev[0].note("Challenge with selected KDF duplicated (missing MAC, RAND, AUTN)") return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 1, EAP_SIM_AT_KDF, 1, 255, EAP_SIM_AT_KDF, 1, 254, EAP_SIM_AT_KDF, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with multiple unsupported KDF proposals") dev[0].note("Challenge with multiple unsupported KDF proposals") return struct.pack(">BBHBBHBBHBBBBBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 2 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 255, EAP_SIM_AT_KDF, 1, 254) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with multiple KDF proposals") dev[0].note("Challenge with multiple KDF proposals (preparation)") return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 3 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 255, EAP_SIM_AT_KDF, 1, 254, EAP_SIM_AT_KDF, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with invalid MAC, RAND, AUTN values)") dev[0].note("Challenge with invalid MAC, RAND, AUTN values)") return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBHBBH4LBBH4LBBH4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4 * 4 + 20 + 20 + 20, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 1, EAP_SIM_AT_KDF, 1, 255, EAP_SIM_AT_KDF, 1, 254, EAP_SIM_AT_KDF, 1, 1, EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0, EAP_SIM_AT_RAND, 5, 0, 0, 0, 0, 0, EAP_SIM_AT_AUTN, 5, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge - AMF separation bit not set)") dev[0].note("Challenge - AMF separation bit not set)") return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4 + 20 + 20 + 20, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 1, EAP_SIM_AT_MAC, 5, 0, 1, 2, 3, 4, EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8, EAP_SIM_AT_AUTN, 5, 0, 9, 10, 0x2fda8ef7, 0xbba518cc) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge - Invalid MAC") dev[0].note("Challenge - Invalid MAC") return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4 + 20 + 20 + 20, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 1, EAP_SIM_AT_MAC, 5, 0, 1, 2, 3, 4, EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8, EAP_SIM_AT_AUTN, 5, 0, 0xffffffff, 0xffffffff, 0xd1f90322, 0x40514cb4) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge - Valid MAC") dev[0].note("Challenge - Valid MAC") return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4 + 20 + 20 + 20, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 1, EAP_SIM_AT_MAC, 5, 0, 0xf4a3c1d3, 0x7c901401, 0x34bd8b01, 0x6f7fa32f, EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8, EAP_SIM_AT_AUTN, 5, 0, 0xffffffff, 0xffffffff, 0xd1f90322, 0x40514cb4) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_KDF_INPUT length") dev[0].note("Invalid AT_KDF_INPUT length") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_KDF_INPUT, 2, 0xffff, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid AT_KDF length") dev[0].note("Invalid AT_KDF length") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_IDENTITY, 0, EAP_SIM_AT_KDF, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with large number of KDF proposals") dev[0].note("Challenge with large number of KDF proposals") return struct.pack(">BBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 12 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF, 1, 255, EAP_SIM_AT_KDF, 1, 254, EAP_SIM_AT_KDF, 1, 253, EAP_SIM_AT_KDF, 1, 252, EAP_SIM_AT_KDF, 1, 251, EAP_SIM_AT_KDF, 1, 250, EAP_SIM_AT_KDF, 1, 249, EAP_SIM_AT_KDF, 1, 248, EAP_SIM_AT_KDF, 1, 247, EAP_SIM_AT_KDF, 1, 246, EAP_SIM_AT_KDF, 1, 245, EAP_SIM_AT_KDF, 1, 244) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with multiple KDF proposals") dev[0].note("Challenge with multiple KDF proposals (preparation)") return struct.pack(">BBHBBHBBHBBBBBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 2 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 2, EAP_SIM_AT_KDF, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with an extra KDF appended") dev[0].note("Challenge with an extra KDF appended") return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 1, EAP_SIM_AT_KDF, 1, 2, EAP_SIM_AT_KDF, 1, 1, EAP_SIM_AT_KDF, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with multiple KDF proposals") dev[0].note("Challenge with multiple KDF proposals (preparation)") return struct.pack(">BBHBBHBBHBBBBBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 2 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 2, EAP_SIM_AT_KDF, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge with a modified KDF") dev[0].note("Challenge with a modified KDF") return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 3 * 4, EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'), ord('c'), ord('d'), EAP_SIM_AT_KDF, 1, 1, EAP_SIM_AT_KDF, 1, 0, EAP_SIM_AT_KDF, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) return None srv = start_radius_server(aka_prime_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(0, 18): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="AKA'", identity="6555444333222111", password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") if i in [0]: time.sleep(0.1) else: ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("Timeout on EAP failure") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() finally: stop_radius_server(srv) def test_eap_proto_sim(dev, apdev): """EAP-SIM protocol tests""" def sim_handler(ctx, req): logger.info("sim_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_SIM) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected AT_AUTN") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_AUTN, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short AT_VERSION_LIST") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: AT_VERSION_LIST overflow") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 1, 0xffff) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected AT_AUTS") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_AUTS, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected AT_CHECKCODE") return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_CHECKCODE, 2, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: No AT_VERSION_LIST in Start") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: No support version in AT_VERSION_LIST") return struct.pack(">BBHBBHBBH4B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 2, 3, 2, 3, 4, 5) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request without ID type") return struct.pack(">BBHBBHBBH2H", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request ANY_ID") return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0, EAP_SIM_AT_ANY_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request ANY_ID (duplicate)") return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0, EAP_SIM_AT_ANY_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request ANY_ID") return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0, EAP_SIM_AT_ANY_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request FULLAUTH_ID") return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0, EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request FULLAUTH_ID (duplicate)") return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0, EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request ANY_ID") return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0, EAP_SIM_AT_ANY_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request FULLAUTH_ID") return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0, EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request PERMANENT_ID") return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0, EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Identity request PERMANENT_ID (duplicate)") return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 8 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0, EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0, EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: No AT_MAC and AT_RAND in Challenge") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: No AT_RAND in Challenge") return struct.pack(">BBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 20, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Insufficient number of challenges in Challenge") return struct.pack(">BBHBBHBBH4LBBH4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 20 + 20, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_RAND, 5, 0, 0, 0, 0, 0, EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Too many challenges in Challenge") return struct.pack(">BBHBBHBBH4L4L4L4LBBH4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 4 * 16 + 20, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_RAND, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Same RAND multiple times in Challenge") return struct.pack(">BBHBBHBBH4L4L4LBBH4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 3 * 16 + 20, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0, EAP_SIM_AT_RAND, 13, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification with no attributes") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification indicating success, but no MAC") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 32768) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification indicating success, but invalid MAC value") return struct.pack(">BBHBBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 20, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 32768, EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification before auth") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 16384) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification before auth") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 16385) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification with unrecognized non-failure") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 0xc000) idx += 1 if ctx['num'] == idx: logger.info("Test: Notification before auth (duplicate)") return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0, EAP_SIM_AT_NOTIFICATION, 1, 0xc000) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Re-authentication (unexpected) with no attributes") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_REAUTHENTICATION, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Client Error") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CLIENT_ERROR, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unknown subtype") return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3, EAP_TYPE_SIM, 255, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) return None srv = start_radius_server(sim_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(0, 25): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") if i in [0]: time.sleep(0.1) else: ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("Timeout on EAP failure") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() finally: stop_radius_server(srv) def test_eap_proto_sim_errors(dev, apdev): """EAP-SIM protocol tests (error paths)""" check_hlr_auc_gw_support() params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "eap_sim_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with fail_test(dev[0], 1, "os_get_random;eap_sim_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581") with fail_test(dev[0], 1, "aes_128_cbc_encrypt;eap_sim_response_reauth"): hapd.request("EAPOL_REAUTH " + dev[0].own_addr()) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5) if ev is None: raise Exception("EAP re-authentication did not start") wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581") with fail_test(dev[0], 1, "os_get_random;eap_sim_msg_add_encr_start"): hapd.request("EAPOL_REAUTH " + dev[0].own_addr()) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5) if ev is None: raise Exception("EAP re-authentication did not start") wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581") with fail_test(dev[0], 1, "os_get_random;eap_sim_init_for_reauth"): hapd.request("EAPOL_REAUTH " + dev[0].own_addr()) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5) if ev is None: raise Exception("EAP re-authentication did not start") wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581") with alloc_fail(dev[0], 1, "eap_sim_parse_encr;eap_sim_process_reauthentication"): hapd.request("EAPOL_REAUTH " + dev[0].own_addr()) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5) if ev is None: raise Exception("EAP re-authentication did not start") wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() tests = [(1, "eap_sim_verify_mac;eap_sim_process_challenge"), (1, "eap_sim_parse_encr;eap_sim_process_challenge"), (1, "eap_sim_msg_init;eap_sim_response_start"), (1, "wpabuf_alloc;eap_sim_msg_init;eap_sim_response_start"), (1, "=eap_sim_learn_ids"), (2, "=eap_sim_learn_ids"), (2, "eap_sim_learn_ids"), (3, "eap_sim_learn_ids"), (1, "eap_sim_process_start"), (1, "eap_sim_getKey"), (1, "eap_sim_get_emsk"), (1, "eap_sim_get_session_id")] for count, func in tests: with alloc_fail(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000@domain", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581", erp="1", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() tests = [(1, "aes_128_cbc_decrypt;eap_sim_parse_encr")] for count, func in tests: with fail_test(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581", wait_connect=False) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() params = int_eap_server_params() params['eap_sim_db'] = "unix:/tmp/hlr_auc_gw.sock" params['eap_sim_aka_result_ind'] = "1" hapd2 = hostapd.add_ap(apdev[1], params) dev[0].scan_for_bss(hapd2.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "eap_sim_msg_init;eap_sim_response_notification"): dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000", phase1="result_ind=1", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() tests = ["eap_sim_msg_add_encr_start;eap_sim_response_notification", "aes_128_cbc_encrypt;eap_sim_response_notification"] for func in tests: with fail_test(dev[0], 1, func): dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000", phase1="result_ind=1", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581") dev[0].request("REAUTHENTICATE") ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5) if ev is None: raise Exception("EAP method not started on reauthentication") time.sleep(0.1) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() tests = ["eap_sim_parse_encr;eap_sim_process_notification_reauth"] for func in tests: with alloc_fail(dev[0], 1, func): dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="SIM", identity="1232010000000000", phase1="result_ind=1", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581") dev[0].request("REAUTHENTICATE") ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5) if ev is None: raise Exception("EAP method not started on reauthentication") time.sleep(0.1) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() def test_eap_proto_aka_errors(dev, apdev): """EAP-AKA protocol tests (error paths)""" check_hlr_auc_gw_support() params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "eap_aka_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="AKA", identity="0232010000000000", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "=eap_aka_learn_ids"), (2, "=eap_aka_learn_ids"), (1, "eap_sim_parse_encr;eap_aka_process_challenge"), (1, "wpabuf_dup;eap_aka_add_id_msg"), (1, "wpabuf_resize;eap_aka_add_id_msg"), (1, "eap_aka_getKey"), (1, "eap_aka_get_emsk"), (1, "eap_aka_get_session_id")] for count, func in tests: with alloc_fail(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="AKA", identity="0232010000000000@domain", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123", erp="1", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() params = int_eap_server_params() params['eap_sim_db'] = "unix:/tmp/hlr_auc_gw.sock" params['eap_sim_aka_result_ind'] = "1" hapd2 = hostapd.add_ap(apdev[1], params) dev[0].scan_for_bss(hapd2.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "eap_sim_msg_init;eap_aka_response_notification"): dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="AKA", identity="0232010000000000", phase1="result_ind=1", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() tests = [(1, "aes_128_encrypt_block;milenage_f1;milenage_check", None), (2, "aes_128_encrypt_block;milenage_f1;milenage_check", None), (1, "milenage_f2345;milenage_check", None), (7, "aes_128_encrypt_block;milenage_f2345;milenage_check", "ff0000000123"), (1, "aes_128_encrypt_block;milenage_f1;milenage_check", "fff000000123")] for count, func, seq in tests: if not seq: seq = "000000000123" with fail_test(dev[0], count, func): dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="AKA", identity="0232010000000000", phase1="result_ind=1", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:" + seq, wait_connect=False) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].dump_monitor() tests = ["eap_sim_msg_add_encr_start;eap_aka_response_notification", "aes_128_cbc_encrypt;eap_aka_response_notification"] for func in tests: with fail_test(dev[0], 1, func): dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="AKA", identity="0232010000000000", phase1="result_ind=1", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123") dev[0].request("REAUTHENTICATE") ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5) if ev is None: raise Exception("EAP method not started on reauthentication") time.sleep(0.1) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() tests = ["eap_sim_parse_encr;eap_aka_process_notification_reauth"] for func in tests: with alloc_fail(dev[0], 1, func): dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="AKA", identity="0232010000000000", phase1="result_ind=1", password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123") dev[0].request("REAUTHENTICATE") ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5) if ev is None: raise Exception("EAP method not started on reauthentication") time.sleep(0.1) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() def test_eap_proto_aka_prime_errors(dev, apdev): """EAP-AKA' protocol tests (error paths)""" check_hlr_auc_gw_support() params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "eap_aka_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="AKA'", identity="6555444333222111", password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="AKA'", identity="6555444333222111", password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123") with fail_test(dev[0], 1, "aes_128_cbc_encrypt;eap_aka_response_reauth"): hapd.request("EAPOL_REAUTH " + dev[0].own_addr()) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5) if ev is None: raise Exception("EAP re-authentication did not start") wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="AKA'", identity="6555444333222111", password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123") with alloc_fail(dev[0], 1, "eap_sim_parse_encr;eap_aka_process_reauthentication"): hapd.request("EAPOL_REAUTH " + dev[0].own_addr()) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5) if ev is None: raise Exception("EAP re-authentication did not start") wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() tests = [(1, "eap_sim_verify_mac_sha256"), (1, "=eap_aka_process_challenge")] for count, func in tests: with alloc_fail(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="AKA'", identity="6555444333222111", password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123", erp="1", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].dump_monitor() def test_eap_proto_ikev2(dev, apdev): """EAP-IKEv2 protocol tests""" check_eap_capa(dev[0], "IKEV2") global eap_proto_ikev2_test_done eap_proto_ikev2_test_done = False def ikev2_handler(ctx, req): logger.info("ikev2_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_IKEV2) idx += 1 if ctx['num'] == idx: logger.info("Test: Truncated Message Length field") return struct.pack(">BBHBB3B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 3, EAP_TYPE_IKEV2, 0x80, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short Message Length value") return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4 + 1, EAP_TYPE_IKEV2, 0x80, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Truncated message") return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4, EAP_TYPE_IKEV2, 0x80, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Truncated message(2)") return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4, EAP_TYPE_IKEV2, 0x80, 0xffffffff) idx += 1 if ctx['num'] == idx: logger.info("Test: Truncated message(3)") return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4, EAP_TYPE_IKEV2, 0xc0, 0xffffffff) idx += 1 if ctx['num'] == idx: logger.info("Test: Truncated message(4)") return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4, EAP_TYPE_IKEV2, 0xc0, 10000000) idx += 1 if ctx['num'] == idx: logger.info("Test: Too long fragments (first fragment)") return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4 + 1, EAP_TYPE_IKEV2, 0xc0, 2, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Too long fragments (second fragment)") return struct.pack(">BBHBB2B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2, EAP_TYPE_IKEV2, 0x00, 2, 3) idx += 1 if ctx['num'] == idx: logger.info("Test: No Message Length field in first fragment") return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 1, EAP_TYPE_IKEV2, 0x40, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: ICV before keys") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_IKEV2, 0x20) idx += 1 if ctx['num'] == idx: logger.info("Test: Unsupported IKEv2 header version") return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 28, EAP_TYPE_IKEV2, 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Incorrect IKEv2 header Length") return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 28, EAP_TYPE_IKEV2, 0x00, 0, 0, 0, 0, 0, 0x20, 0, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected IKEv2 Exchange Type in SA_INIT state") return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 28, EAP_TYPE_IKEV2, 0x00, 0, 0, 0, 0, 0, 0x20, 0, 0, 0, 28) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected IKEv2 Message ID in SA_INIT state") return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 28, EAP_TYPE_IKEV2, 0x00, 0, 0, 0, 0, 0, 0x20, 34, 0, 1, 28) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected IKEv2 Flags value") return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 28, EAP_TYPE_IKEV2, 0x00, 0, 0, 0, 0, 0, 0x20, 34, 0, 0, 28) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected IKEv2 Flags value(2)") return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 28, EAP_TYPE_IKEV2, 0x00, 0, 0, 0, 0, 0, 0x20, 34, 0x20, 0, 28) idx += 1 if ctx['num'] == idx: logger.info("Test: No SAi1 in SA_INIT") return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 28, EAP_TYPE_IKEV2, 0x00, 0, 0, 0, 0, 0, 0x20, 34, 0x08, 0, 28) def build_ike(id, next=0, exch_type=34, flags=0x00, ike=b''): return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, id, 4 + 1 + 1 + 28 + len(ike), EAP_TYPE_IKEV2, flags, 0, 0, 0, 0, next, 0x20, exch_type, 0x08, 0, 28 + len(ike)) + ike idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected extra data after payloads") return build_ike(ctx['id'], ike=struct.pack(">B", 1)) idx += 1 if ctx['num'] == idx: logger.info("Test: Truncated payload header") return build_ike(ctx['id'], next=128, ike=struct.pack(">B", 1)) idx += 1 if ctx['num'] == idx: logger.info("Test: Too small payload header length") ike = struct.pack(">BBH", 0, 0, 3) return build_ike(ctx['id'], next=128, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Too large payload header length") ike = struct.pack(">BBH", 0, 0, 5) return build_ike(ctx['id'], next=128, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Unsupported payload (non-critical and critical)") ike = struct.pack(">BBHBBH", 129, 0, 4, 0, 0x01, 4) return build_ike(ctx['id'], next=128, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Certificate and empty SAi1") ike = struct.pack(">BBHBBH", 33, 0, 4, 0, 0, 4) return build_ike(ctx['id'], next=37, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short proposal") ike = struct.pack(">BBHBBHBBB", 0, 0, 4 + 7, 0, 0, 7, 0, 0, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Too small proposal length in SAi1") ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8, 0, 0, 7, 0, 0, 0, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Too large proposal length in SAi1") ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8, 0, 0, 9, 0, 0, 0, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected proposal type in SAi1") ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8, 1, 0, 8, 0, 0, 0, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Protocol ID in SAi1") ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8, 0, 0, 8, 0, 0, 0, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected proposal number in SAi1") ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8, 0, 0, 8, 0, 1, 0, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Not enough room for SPI in SAi1") ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8, 0, 0, 8, 1, 1, 1, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected SPI in SAi1") ike = struct.pack(">BBHBBHBBBBB", 0, 0, 4 + 9, 0, 0, 9, 1, 1, 1, 0, 1) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: No transforms in SAi1") ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8, 0, 0, 8, 1, 1, 0, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short transform in SAi1") ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8, 0, 0, 8, 1, 1, 0, 1) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Too small transform length in SAi1") ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8, 0, 0, 8 + 8, 1, 1, 0, 1, 0, 0, 7, 0, 0, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Too large transform length in SAi1") ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8, 0, 0, 8 + 8, 1, 1, 0, 1, 0, 0, 9, 0, 0, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Transform type in SAi1") ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8, 0, 0, 8 + 8, 1, 1, 0, 1, 1, 0, 8, 0, 0, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: No transform attributes in SAi1") ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8, 0, 0, 8 + 8, 1, 1, 0, 1, 0, 0, 8, 0, 0, 0) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: No transform attr for AES and unexpected data after transforms in SAi1") tlen1 = 8 + 3 tlen2 = 8 + 4 tlen3 = 8 + 4 tlen = tlen1 + tlen2 + tlen3 ike = struct.pack(">BBHBBHBBBBBBHBBH3BBBHBBHHHBBHBBHHHB", 0, 0, 4 + 8 + tlen + 1, 0, 0, 8 + tlen + 1, 1, 1, 0, 3, 3, 0, tlen1, 1, 0, 12, 1, 2, 3, 3, 0, tlen2, 1, 0, 12, 0, 128, 0, 0, tlen3, 1, 0, 12, 0x8000 | 14, 127, 1) return build_ike(ctx['id'], next=33, ike=ike) def build_sa(next=0): tlen = 5 * 8 return struct.pack(">BBHBBHBBBBBBHBBHBBHBBHBBHBBHBBHBBHBBHBBH", next, 0, 4 + 8 + tlen, 0, 0, 8 + tlen, 1, 1, 0, 5, 3, 0, 8, 1, 0, 3, 3, 0, 8, 2, 0, 1, 3, 0, 8, 3, 0, 1, 3, 0, 8, 4, 0, 5, 0, 0, 8, 241, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid proposal, but no KEi in SAi1") ike = build_sa() return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Empty KEi in SAi1") ike = build_sa(next=34) + struct.pack(">BBH", 0, 0, 4) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Mismatch in DH Group in SAi1") ike = build_sa(next=34) ike += struct.pack(">BBHHH", 0, 0, 4 + 4 + 96, 12345, 0) ike += 96*b'\x00' return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid DH public value length in SAi1") ike = build_sa(next=34) ike += struct.pack(">BBHHH", 0, 0, 4 + 4 + 96, 5, 0) ike += 96*b'\x00' return build_ike(ctx['id'], next=33, ike=ike) def build_ke(next=0): ke = struct.pack(">BBHHH", next, 0, 4 + 4 + 192, 5, 0) ke += 191*b'\x00'+b'\x02' return ke idx += 1 if ctx['num'] == idx: logger.info("Test: Valid proposal and KEi, but no Ni in SAi1") ike = build_sa(next=34) ike += build_ke() return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short Ni in SAi1") ike = build_sa(next=34) ike += build_ke(next=40) ike += struct.pack(">BBH", 0, 0, 4) return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Too long Ni in SAi1") ike = build_sa(next=34) ike += build_ke(next=40) ike += struct.pack(">BBH", 0, 0, 4 + 257) + 257*b'\x00' return build_ike(ctx['id'], next=33, ike=ike) def build_ni(next=0): return struct.pack(">BBH", next, 0, 4 + 256) + 256*b'\x00' def build_sai1(id): ike = build_sa(next=34) ike += build_ke(next=40) ike += build_ni() return build_ike(ctx['id'], next=33, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid proposal, KEi, and Ni in SAi1") return build_sai1(ctx['id']) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid proposal, KEi, and Ni in SAi1") return build_sai1(ctx['id']) idx += 1 if ctx['num'] == idx: logger.info("Test: No integrity checksum") ike = b'' return build_ike(ctx['id'], next=37, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid proposal, KEi, and Ni in SAi1") return build_sai1(ctx['id']) idx += 1 if ctx['num'] == idx: logger.info("Test: Truncated integrity checksum") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_IKEV2, 0x20) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid proposal, KEi, and Ni in SAi1") return build_sai1(ctx['id']) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid integrity checksum") ike = b'' return build_ike(ctx['id'], next=37, flags=0x20, ike=ike) idx += 1 if ctx['num'] == idx: logger.info("No more test responses available - test case completed") global eap_proto_ikev2_test_done eap_proto_ikev2_test_done = True return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_IKEV2) return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(ikev2_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) i = 0 while not eap_proto_ikev2_test_done: i += 1 logger.info("Running connection iteration %d" % i) dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="IKEV2", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP method start") if i in [41, 46]: ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("Timeout on EAP failure") else: time.sleep(0.05) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].dump_monitor() dev[1].dump_monitor() dev[2].dump_monitor() finally: stop_radius_server(srv) def NtPasswordHash(password): pw = password.encode('utf_16_le') return hashlib.new('md4', pw).digest() def HashNtPasswordHash(password_hash): return hashlib.new('md4', password_hash).digest() def ChallengeHash(peer_challenge, auth_challenge, username): data = peer_challenge + auth_challenge + username return hashlib.sha1(data).digest()[0:8] def GenerateAuthenticatorResponse(password, nt_response, peer_challenge, auth_challenge, username): magic1 = binascii.unhexlify("4D616769632073657276657220746F20636C69656E74207369676E696E6720636F6E7374616E74") magic2 = binascii.unhexlify("50616420746F206D616B6520697420646F206D6F7265207468616E206F6E6520697465726174696F6E") password_hash = NtPasswordHash(password) password_hash_hash = HashNtPasswordHash(password_hash) data = password_hash_hash + nt_response + magic1 digest = hashlib.sha1(data).digest() challenge = ChallengeHash(peer_challenge, auth_challenge, username.encode()) data = digest + challenge + magic2 resp = hashlib.sha1(data).digest() return resp def test_eap_proto_ikev2_errors(dev, apdev): """EAP-IKEv2 local error cases""" check_eap_capa(dev[0], "IKEV2") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(1, 5): with alloc_fail(dev[0], i, "eap_ikev2_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="IKEV2", identity="ikev2 user", password="ike password", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "ikev2_encr_encrypt"), (1, "ikev2_encr_decrypt"), (1, "ikev2_derive_auth_data"), (2, "ikev2_derive_auth_data"), (1, "=ikev2_decrypt_payload"), (1, "ikev2_encr_decrypt;ikev2_decrypt_payload"), (1, "ikev2_encr_encrypt;ikev2_build_encrypted"), (1, "ikev2_derive_sk_keys"), (2, "ikev2_derive_sk_keys"), (3, "ikev2_derive_sk_keys"), (4, "ikev2_derive_sk_keys"), (5, "ikev2_derive_sk_keys"), (6, "ikev2_derive_sk_keys"), (7, "ikev2_derive_sk_keys"), (8, "ikev2_derive_sk_keys"), (1, "eap_ikev2_derive_keymat;eap_ikev2_peer_keymat"), (1, "eap_msg_alloc;eap_ikev2_build_msg"), (1, "eap_ikev2_getKey"), (1, "eap_ikev2_get_emsk"), (1, "eap_ikev2_get_session_id"), (1, "=ikev2_derive_keys"), (2, "=ikev2_derive_keys"), (1, "wpabuf_alloc;ikev2_process_kei"), (1, "=ikev2_process_idi"), (1, "ikev2_derive_auth_data;ikev2_build_auth"), (1, "wpabuf_alloc;ikev2_build_sa_init"), (2, "wpabuf_alloc;ikev2_build_sa_init"), (3, "wpabuf_alloc;ikev2_build_sa_init"), (4, "wpabuf_alloc;ikev2_build_sa_init"), (5, "wpabuf_alloc;ikev2_build_sa_init"), (6, "wpabuf_alloc;ikev2_build_sa_init"), (1, "wpabuf_alloc;ikev2_build_sa_auth"), (2, "wpabuf_alloc;ikev2_build_sa_auth"), (1, "ikev2_build_auth;ikev2_build_sa_auth")] for count, func in tests: with alloc_fail(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="IKEV2", identity="ikev2 user@domain", password="ike password", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ok = False for j in range(10): state = dev[0].request('GET_ALLOC_FAIL') if state.startswith('0:'): ok = True break time.sleep(0.1) if not ok: raise Exception("No allocation failure seen for %d:%s" % (count, func)) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "wpabuf_alloc;ikev2_build_notify"), (2, "wpabuf_alloc;ikev2_build_notify"), (1, "ikev2_build_encrypted;ikev2_build_notify")] for count, func in tests: with alloc_fail(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="IKEV2", identity="ikev2 user", password="wrong password", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ok = False for j in range(10): state = dev[0].request('GET_ALLOC_FAIL') if state.startswith('0:'): ok = True break time.sleep(0.1) if not ok: raise Exception("No allocation failure seen for %d:%s" % (count, func)) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "ikev2_integ_hash"), (1, "ikev2_integ_hash;ikev2_decrypt_payload"), (1, "os_get_random;ikev2_build_encrypted"), (1, "ikev2_prf_plus;ikev2_derive_sk_keys"), (1, "eap_ikev2_derive_keymat;eap_ikev2_peer_keymat"), (1, "os_get_random;ikev2_build_sa_init"), (2, "os_get_random;ikev2_build_sa_init"), (1, "ikev2_integ_hash;eap_ikev2_validate_icv"), (1, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_keys"), (1, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_auth_data"), (2, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_auth_data"), (3, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_auth_data")] for count, func in tests: with fail_test(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="IKEV2", identity="ikev2 user", password="ike password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ok = False for j in range(10): state = dev[0].request('GET_FAIL') if state.startswith('0:'): ok = True break time.sleep(0.1) if not ok: raise Exception("No failure seen for %d:%s" % (count, func)) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() params = {"ssid": "eap-test2", "wpa": "2", "wpa_key_mgmt": "WPA-EAP", "rsn_pairwise": "CCMP", "ieee8021x": "1", "eap_server": "1", "eap_user_file": "auth_serv/eap_user.conf", "fragment_size": "50"} hapd2 = hostapd.add_ap(apdev[1], params) dev[0].scan_for_bss(hapd2.own_addr(), freq=2412) tests = [(1, "eap_ikev2_build_frag_ack"), (1, "wpabuf_alloc;eap_ikev2_process_fragment")] for count, func in tests: with alloc_fail(dev[0], count, func): dev[0].connect("eap-test2", key_mgmt="WPA-EAP", scan_freq="2412", eap="IKEV2", identity="ikev2 user", password="ike password", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ok = False for j in range(10): state = dev[0].request('GET_ALLOC_FAIL') if state.startswith('0:'): ok = True break time.sleep(0.1) if not ok: raise Exception("No allocation failure seen for %d:%s" % (count, func)) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() def run_eap_ikev2_connect(dev): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="IKEV2", identity="ikev2 user", password="ike password", fragment_size="30", wait_connect=False) ev = dev.wait_event(["CTRL-EVENT-EAP-SUCCESS", "CTRL-EVENT-EAP-FAILURE", "CTRL-EVENT-DISCONNECTED"], timeout=1) dev.request("REMOVE_NETWORK all") if not ev or "CTRL-EVENT-DISCONNECTED" not in ev: dev.wait_disconnected() dev.dump_monitor() def test_eap_proto_ikev2_errors_server(dev, apdev): """EAP-IKEV2 local error cases on server""" check_eap_capa(dev[0], "IKEV2") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) tests = [(1, "eap_ikev2_init"), (2, "=eap_ikev2_init"), (3, "=eap_ikev2_init"), (1, "eap_msg_alloc;eap_ikev2_build_msg"), (1, "ikev2_initiator_build;eap_ikev2_buildReq"), (1, "eap_ikev2_process_fragment"), (1, "wpabuf_alloc_copy;ikev2_process_ker"), (1, "ikev2_process_idr"), (1, "ikev2_derive_auth_data;ikev2_process_auth_secret"), (1, "ikev2_decrypt_payload;ikev2_process_sa_auth"), (1, "ikev2_process_sa_auth_decrypted;ikev2_process_sa_auth"), (1, "dh_init;ikev2_build_kei"), (1, "ikev2_build_auth"), (1, "wpabuf_alloc;ikev2_build_sa_init"), (1, "ikev2_build_sa_auth"), (1, "=ikev2_build_sa_auth"), (2, "=ikev2_derive_auth_data"), (1, "wpabuf_alloc;ikev2_build_sa_auth"), (2, "wpabuf_alloc;=ikev2_build_sa_auth"), (1, "ikev2_decrypt_payload;ikev2_process_sa_init_encr"), (1, "dh_derive_shared;ikev2_derive_keys"), (1, "=ikev2_derive_keys"), (2, "=ikev2_derive_keys"), (1, "eap_ikev2_getKey"), (1, "eap_ikev2_get_emsk"), (1, "eap_ikev2_get_session_id")] for count, func in tests: with alloc_fail(hapd, count, func): run_eap_ikev2_connect(dev[0]) tests = [(1, "eap_ikev2_validate_icv;eap_ikev2_process_icv"), (1, "eap_ikev2_server_keymat"), (1, "ikev2_build_auth"), (1, "os_get_random;ikev2_build_sa_init"), (2, "os_get_random;ikev2_build_sa_init"), (1, "ikev2_derive_keys"), (2, "ikev2_derive_keys"), (3, "ikev2_derive_keys"), (4, "ikev2_derive_keys"), (5, "ikev2_derive_keys"), (6, "ikev2_derive_keys"), (7, "ikev2_derive_keys"), (8, "ikev2_derive_keys"), (1, "ikev2_decrypt_payload;ikev2_process_sa_auth"), (1, "eap_ikev2_process_icv;eap_ikev2_process")] for count, func in tests: with fail_test(hapd, count, func): run_eap_ikev2_connect(dev[0]) def start_ikev2_assoc(dev, hapd): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="IKEV2", identity="ikev2 user", password="ike password", wait_connect=False) proxy_msg(hapd, dev) # EAP-Identity/Request proxy_msg(dev, hapd) # EAP-Identity/Response proxy_msg(hapd, dev) # IKEV2 1 def stop_ikev2_assoc(dev, hapd): dev.request("REMOVE_NETWORK all") dev.wait_disconnected() dev.dump_monitor() hapd.dump_monitor() def test_eap_proto_ikev2_server(dev, apdev): """EAP-IKEV2 protocol testing for the server""" check_eap_capa(dev[0], "IKEV2") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) hapd.request("SET ext_eapol_frame_io 1") dev[0].request("SET ext_eapol_frame_io 1") # Successful exchange to verify proxying mechanism start_ikev2_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # IKEV2 2 proxy_msg(hapd, dev[0]) # IKEV2 3 proxy_msg(dev[0], hapd) # IKEV2 4 proxy_msg(hapd, dev[0]) # EAP-Success proxy_msg(hapd, dev[0]) # EAPOL-Key msg 1/4 proxy_msg(dev[0], hapd) # EAPOL-Key msg 2/4 proxy_msg(hapd, dev[0]) # EAPOL-Key msg 3/4 proxy_msg(dev[0], hapd) # EAPOL-Key msg 4/4 dev[0].wait_connected() stop_ikev2_assoc(dev[0], hapd) start_ikev2_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short EAP-IKEV2 header hapd.note("IKEV2: Too short frame to include HDR") msg = resp[0:4] + "0005" + resp[8:12] + "0005" + "31" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_ikev2_assoc(dev[0], hapd) start_ikev2_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short EAP-IKEV2 header - missing Message Length field hapd.note("EAP-IKEV2: Message underflow") msg = resp[0:4] + "0006" + resp[8:12] + "0006" + "3180" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_ikev2_assoc(dev[0], hapd) start_ikev2_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short EAP-IKEV2 header - too small Message Length hapd.note("EAP-IKEV2: Invalid Message Length (0; 1 remaining in this msg)") msg = resp[0:4] + "000b" + resp[8:12] + "000b" + "318000000000ff" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_ikev2_assoc(dev[0], hapd) start_ikev2_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short EAP-IKEV2 header - too large Message Length hapd.note("EAP-IKEV2: Ignore too long message") msg = resp[0:4] + "000b" + resp[8:12] + "000b" + "31c0bbccddeeff" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_ikev2_assoc(dev[0], hapd) start_ikev2_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # No Message Length in first fragment hapd.note("EAP-IKEV2: No Message Length field in a fragmented packet") msg = resp[0:4] + "0007" + resp[8:12] + "0007" + "3140ff" tx_msg(dev[0], hapd, msg) rx_msg(hapd) stop_ikev2_assoc(dev[0], hapd) start_ikev2_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # First fragment (valid) hapd.note("EAP-IKEV2: Received 1 bytes in first fragment, waiting for 255 bytes more") msg = resp[0:4] + "000b" + resp[8:12] + "000b" + "31c000000100ff" tx_msg(dev[0], hapd, msg) req = rx_msg(hapd) id, = struct.unpack('B', binascii.unhexlify(req)[5:6]) hapd.note("EAP-IKEV2: Received 1 bytes in first fragment, waiting for 254 bytes more") payload = struct.pack('BBB', 49, 0x40, 0) msg = struct.pack('>BBHBBH', 1, 0, 4 + len(payload), 2, id, 4 + len(payload)) + payload tx_msg(dev[0], hapd, binascii.hexlify(msg).decode()) req = rx_msg(hapd) id, = struct.unpack('B', binascii.unhexlify(req)[5:6]) hapd.note("EAP-IKEV2: Fragment overflow") payload = struct.pack('BB', 49, 0x40) + 255*b'\x00' msg = struct.pack('>BBHBBH', 1, 0, 4 + len(payload), 2, id, 4 + len(payload)) + payload tx_msg(dev[0], hapd, binascii.hexlify(msg).decode()) rx_msg(hapd) stop_ikev2_assoc(dev[0], hapd) start_ikev2_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # IKEV2 2 req = proxy_msg(hapd, dev[0]) # IKEV2 3 id, = struct.unpack('B', binascii.unhexlify(req)[5:6]) # Missing ICV hapd.note("EAP-IKEV2: The message should have included integrity checksum") payload = struct.pack('BB', 49, 0) + b'\x00' msg = struct.pack('>BBHBBH', 1, 0, 4 + len(payload), 2, id, 4 + len(payload)) + payload tx_msg(dev[0], hapd, binascii.hexlify(msg).decode()) rx_msg(hapd) stop_ikev2_assoc(dev[0], hapd) tests = [("Unsupported HDR version 0x0 (expected 0x20)", struct.pack('BB', 49, 0) + 16*b'\x00' + struct.pack('>BBBBLL', 0, 0, 0, 0, 0, 0)), ("IKEV2: Invalid length (HDR: 0 != RX: 28)", struct.pack('BB', 49, 0) + 16*b'\x00' + struct.pack('>BBBBLL', 0, 0x20, 0, 0, 0, 0)), ("IKEV2: Unexpected Exchange Type 0 in SA_INIT state", struct.pack('BB', 49, 0) + 16*b'\x00' + struct.pack('>BBBBLL', 0, 0x20, 0, 0, 0, 28)), ("IKEV2: Unexpected Flags value 0x0", struct.pack('BB', 49, 0) + 16*b'\x00' + struct.pack('>BBBBLL', 0, 0x20, 34, 0, 0, 28)), ("IKEV2: SAr1 not received", struct.pack('BB', 49, 0) + 16*b'\x00' + struct.pack('>BBBBLL', 0, 0x20, 34, 0x20, 0, 28))] for txt, payload in tests: start_ikev2_assoc(dev[0], hapd) resp = rx_msg(dev[0]) id, = struct.unpack('B', binascii.unhexlify(resp)[5:6]) hapd.note(txt) msg = struct.pack('>BBHBBH', 1, 0, 4 + len(payload), 2, id, 4 + len(payload)) + payload tx_msg(dev[0], hapd, binascii.hexlify(msg).decode()) rx_msg(hapd) stop_ikev2_assoc(dev[0], hapd) def test_eap_proto_mschapv2(dev, apdev): """EAP-MSCHAPv2 protocol tests""" check_eap_capa(dev[0], "MSCHAPV2") def mschapv2_handler(ctx, req): logger.info("mschapv2_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Missing payload") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_MSCHAPV2) idx += 1 if ctx['num'] == idx: logger.info("Test: Unknown MSCHAPv2 op_code") return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + 1, EAP_TYPE_MSCHAPV2, 0, 0, 5, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid ms_len and unknown MSCHAPv2 op_code") return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + 1, EAP_TYPE_MSCHAPV2, 255, 0, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Success before challenge") return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + 1, EAP_TYPE_MSCHAPV2, 3, 0, 5, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Failure before challenge - required challenge field not present") return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + 1, EAP_TYPE_MSCHAPV2, 4, 0, 5, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Failure before challenge - invalid failure challenge len") payload = b'C=12' return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 4, 0, 4 + len(payload)) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Failure before challenge - invalid failure challenge len") payload = b'C=12 V=3' return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 4, 0, 4 + len(payload)) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Failure before challenge - invalid failure challenge") payload = b'C=00112233445566778899aabbccddeefQ ' return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 4, 0, 4 + len(payload)) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Failure before challenge - password expired") payload = b'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired' return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 4, 0, 4 + len(payload)) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Success after password change") payload = b"S=1122334455667788990011223344556677889900" return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 3, 0, 4 + len(payload)) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid challenge length") return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + 1, EAP_TYPE_MSCHAPV2, 1, 0, 4 + 1, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Too short challenge packet") return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + 1, EAP_TYPE_MSCHAPV2, 1, 0, 4 + 1, 16) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge") return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + 1 + 16 + 6, EAP_TYPE_MSCHAPV2, 1, 0, 4 + 1 + 16 + 6, 16) + 16*b'A' + b'foobar' idx += 1 if ctx['num'] == idx: logger.info("Test: Failure - password expired") payload = b'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired' return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 4, 0, 4 + len(payload)) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Success after password change") if len(req) != 591: logger.info("Unexpected Change-Password packet length: %s" % len(req)) return None data = req[9:] enc_pw = data[0:516] data = data[516:] enc_hash = data[0:16] data = data[16:] peer_challenge = data[0:16] data = data[16:] # Reserved data = data[8:] nt_response = data[0:24] data = data[24:] flags = data logger.info("enc_hash: " + binascii.hexlify(enc_hash).decode()) logger.info("peer_challenge: " + binascii.hexlify(peer_challenge).decode()) logger.info("nt_response: " + binascii.hexlify(nt_response).decode()) logger.info("flags: " + binascii.hexlify(flags).decode()) auth_challenge = binascii.unhexlify("00112233445566778899aabbccddeeff") logger.info("auth_challenge: " + binascii.hexlify(auth_challenge).decode()) auth_resp = GenerateAuthenticatorResponse("new-pw", nt_response, peer_challenge, auth_challenge, "user") payload = b"S=" + binascii.hexlify(auth_resp).decode().upper().encode() logger.info("Success message payload: " + payload.decode()) return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 3, 0, 4 + len(payload)) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Failure - password expired") payload = b'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired' return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 4, 0, 4 + len(payload)) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Success after password change") if len(req) != 591: logger.info("Unexpected Change-Password packet length: %s" % len(req)) return None data = req[9:] enc_pw = data[0:516] data = data[516:] enc_hash = data[0:16] data = data[16:] peer_challenge = data[0:16] data = data[16:] # Reserved data = data[8:] nt_response = data[0:24] data = data[24:] flags = data logger.info("enc_hash: " + binascii.hexlify(enc_hash).decode()) logger.info("peer_challenge: " + binascii.hexlify(peer_challenge).decode()) logger.info("nt_response: " + binascii.hexlify(nt_response).decode()) logger.info("flags: " + binascii.hexlify(flags).decode()) auth_challenge = binascii.unhexlify("00112233445566778899aabbccddeeff") logger.info("auth_challenge: " + binascii.hexlify(auth_challenge).decode()) auth_resp = GenerateAuthenticatorResponse("new-pw", nt_response, peer_challenge, auth_challenge, "user") payload = b"S=" + binascii.hexlify(auth_resp).decode().upper().encode() logger.info("Success message payload: " + payload.decode()) return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 3, 0, 4 + len(payload)) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge") return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + 1 + 16 + 6, EAP_TYPE_MSCHAPV2, 1, 0, 4 + 1 + 16 + 6, 16) + 16*b'A' + b'foobar' idx += 1 if ctx['num'] == idx: logger.info("Test: Failure - authentication failure") payload = b'E=691 R=1 C=00112233445566778899aabbccddeeff V=3 M=Authentication failed' return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 4, 0, 4 + len(payload)) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge") return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + 1 + 16 + 6, EAP_TYPE_MSCHAPV2, 1, 0, 4 + 1 + 16 + 6, 16) + 16*b'A' + b'foobar' idx += 1 if ctx['num'] == idx: logger.info("Test: Failure - authentication failure") payload = b'E=691 R=1 C=00112233445566778899aabbccddeeff V=3 M=Authentication failed (2)' return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 4, 0, 4 + len(payload)) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge - invalid ms_len and workaround disabled") return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + 1 + 16 + 6, EAP_TYPE_MSCHAPV2, 1, 0, 4 + 1 + 16 + 6 + 1, 16) + 16*b'A' + b'foobar' return None srv = start_radius_server(mschapv2_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(0, 16): logger.info("RUN: %d" % i) if i == 12: dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MSCHAPV2", identity="user", password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c", wait_connect=False) elif i == 14: dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MSCHAPV2", identity="user", phase2="mschapv2_retry=0", password="password", wait_connect=False) elif i == 15: dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MSCHAPV2", identity="user", eap_workaround="0", password="password", wait_connect=False) else: dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MSCHAPV2", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") if i in [8, 11, 12]: ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10) if ev is None: raise Exception("Timeout on new password request") id = ev.split(':')[0].split('-')[-1] dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw") if i in [11, 12]: ev = dev[0].wait_event(["CTRL-EVENT-PASSWORD-CHANGED"], timeout=10) if ev is None: raise Exception("Timeout on password change") ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=10) if ev is None: raise Exception("Timeout on EAP success") else: ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("Timeout on EAP failure") if i in [13]: ev = dev[0].wait_event(["CTRL-REQ-IDENTITY"], timeout=10) if ev is None: raise Exception("Timeout on identity request") id = ev.split(':')[0].split('-')[-1] dev[0].request("CTRL-RSP-IDENTITY-" + id + ":user") ev = dev[0].wait_event(["CTRL-REQ-PASSWORD"], timeout=10) if ev is None: raise Exception("Timeout on password request") id = ev.split(':')[0].split('-')[-1] dev[0].request("CTRL-RSP-PASSWORD-" + id + ":password") # TODO: Does this work correctly? ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("Timeout on EAP failure") if i in [4, 5, 6, 7, 14]: ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("Timeout on EAP failure") else: time.sleep(0.05) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) finally: stop_radius_server(srv) def test_eap_proto_mschapv2_errors(dev, apdev): """EAP-MSCHAPv2 protocol tests (error paths)""" check_eap_capa(dev[0], "MSCHAPV2") def mschapv2_fail_password_expired(ctx): logger.info("Test: Failure before challenge - password expired") payload = b'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired' return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 4, 0, 4 + len(payload)) + payload def mschapv2_success_after_password_change(ctx, req=None): logger.info("Test: Success after password change") if req is None or len(req) != 591: payload = b"S=1122334455667788990011223344556677889900" else: data = req[9:] enc_pw = data[0:516] data = data[516:] enc_hash = data[0:16] data = data[16:] peer_challenge = data[0:16] data = data[16:] # Reserved data = data[8:] nt_response = data[0:24] data = data[24:] flags = data logger.info("enc_hash: " + binascii.hexlify(enc_hash).decode()) logger.info("peer_challenge: " + binascii.hexlify(peer_challenge).decode()) logger.info("nt_response: " + binascii.hexlify(nt_response).decode()) logger.info("flags: " + binascii.hexlify(flags).decode()) auth_challenge = binascii.unhexlify("00112233445566778899aabbccddeeff") logger.info("auth_challenge: " + binascii.hexlify(auth_challenge).decode()) auth_resp = GenerateAuthenticatorResponse("new-pw", nt_response, peer_challenge, auth_challenge, "user") payload = b"S=" + binascii.hexlify(auth_resp).decode().upper().encode() return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + len(payload), EAP_TYPE_MSCHAPV2, 3, 0, 4 + len(payload)) + payload def mschapv2_handler(ctx, req): logger.info("mschapv2_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: return mschapv2_fail_password_expired(ctx) idx += 1 if ctx['num'] == idx: return mschapv2_success_after_password_change(ctx, req) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: return mschapv2_fail_password_expired(ctx) idx += 1 if ctx['num'] == idx: return mschapv2_success_after_password_change(ctx, req) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: return mschapv2_fail_password_expired(ctx) idx += 1 if ctx['num'] == idx: return mschapv2_success_after_password_change(ctx, req) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: return mschapv2_fail_password_expired(ctx) idx += 1 if ctx['num'] == idx: return mschapv2_success_after_password_change(ctx, req) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: return mschapv2_fail_password_expired(ctx) idx += 1 if ctx['num'] == idx: return mschapv2_success_after_password_change(ctx, req) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: return mschapv2_fail_password_expired(ctx) idx += 1 if ctx['num'] == idx: return mschapv2_success_after_password_change(ctx, req) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: return mschapv2_fail_password_expired(ctx) idx += 1 if ctx['num'] == idx: return mschapv2_success_after_password_change(ctx, req) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: return mschapv2_fail_password_expired(ctx) idx += 1 if ctx['num'] == idx: return mschapv2_success_after_password_change(ctx, req) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: return mschapv2_fail_password_expired(ctx) idx += 1 if ctx['num'] == idx: return mschapv2_success_after_password_change(ctx, req) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) return None srv = start_radius_server(mschapv2_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) tests = ["os_get_random;eap_mschapv2_change_password", "generate_nt_response;eap_mschapv2_change_password", "get_master_key;eap_mschapv2_change_password", "nt_password_hash;eap_mschapv2_change_password", "old_nt_password_hash_encrypted_with_new_nt_password_hash"] for func in tests: with fail_test(dev[0], 1, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MSCHAPV2", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10) if ev is None: raise Exception("Timeout on new password request") id = ev.split(':')[0].split('-')[-1] dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw") time.sleep(0.1) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) tests = ["encrypt_pw_block_with_password_hash;eap_mschapv2_change_password", "nt_password_hash;eap_mschapv2_change_password", "nt_password_hash;eap_mschapv2_success"] for func in tests: with fail_test(dev[0], 1, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MSCHAPV2", identity="user", password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c", wait_connect=False) ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10) if ev is None: raise Exception("Timeout on new password request") id = ev.split(':')[0].split('-')[-1] dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw") time.sleep(0.1) wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) tests = ["eap_msg_alloc;eap_mschapv2_change_password"] for func in tests: with alloc_fail(dev[0], 1, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MSCHAPV2", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10) if ev is None: raise Exception("Timeout on new password request") id = ev.split(':')[0].split('-')[-1] dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw") time.sleep(0.1) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) finally: stop_radius_server(srv) def test_eap_proto_pwd(dev, apdev): """EAP-pwd protocol tests""" check_eap_capa(dev[0], "PWD") global eap_proto_pwd_test_done, eap_proto_pwd_test_wait eap_proto_pwd_test_done = False eap_proto_pwd_test_wait = False def pwd_handler(ctx, req): logger.info("pwd_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 global eap_proto_pwd_test_wait eap_proto_pwd_test_wait = False idx += 1 if ctx['num'] == idx: logger.info("Test: Missing payload") # EAP-pwd: Got a frame but pos is not NULL and len is 0 return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_PWD) idx += 1 if ctx['num'] == idx: logger.info("Test: Missing Total-Length field") # EAP-pwd: Frame too short to contain Total-Length field payload = struct.pack("B", 0x80) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Too large Total-Length") # EAP-pwd: Incoming fragments whose total length = 65535 payload = struct.pack(">BH", 0x80, 65535) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: First fragment") # EAP-pwd: Incoming fragments whose total length = 10 # EAP-pwd: ACKing a 0 byte fragment payload = struct.pack(">BH", 0xc0, 10) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Total-Length value in the second fragment") # EAP-pwd: Incoming fragments whose total length = 0 # EAP-pwd: Unexpected new fragment start when previous fragment is still in use payload = struct.pack(">BH", 0x80, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: First and only fragment") # EAP-pwd: Incoming fragments whose total length = 0 # EAP-pwd: processing frame: exch 0, len 0 # EAP-pwd: Ignoring message with unknown opcode 128 payload = struct.pack(">BH", 0x80, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: First and only fragment with extra data") # EAP-pwd: Incoming fragments whose total length = 0 # EAP-pwd: processing frame: exch 0, len 1 # EAP-pwd: Ignoring message with unknown opcode 128 payload = struct.pack(">BHB", 0x80, 0, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: First fragment") # EAP-pwd: Incoming fragments whose total length = 2 # EAP-pwd: ACKing a 1 byte fragment payload = struct.pack(">BHB", 0xc0, 2, 1) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Extra data in the second fragment") # EAP-pwd: Buffer overflow attack detected (3 vs. 1)! payload = struct.pack(">BBB", 0x0, 2, 3) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Too short id exchange") # EAP-pwd: processing frame: exch 1, len 0 # EAP-PWD: PWD-ID-Req -> FAILURE payload = struct.pack(">B", 0x01) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unsupported rand func in id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=0 random=0 prf=0 prep=0 # EAP-PWD: PWD-ID-Req -> FAILURE payload = struct.pack(">BHBBLB", 0x01, 0, 0, 0, 0, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unsupported prf in id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=0 prep=0 # EAP-PWD: PWD-ID-Req -> FAILURE payload = struct.pack(">BHBBLB", 0x01, 19, 1, 0, 0, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unsupported password pre-processing technique in id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=255 # EAP-PWD: Unsupported password pre-processing technique (Prep=255) # EAP-PWD: PWD-ID-Req -> FAILURE payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 255) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=0 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected id exchange") # EAP-pwd: processing frame: exch 1, len 9 # EAP-PWD: PWD-Commit-Req -> FAILURE payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected commit exchange") # EAP-pwd: processing frame: exch 2, len 0 # EAP-PWD: PWD-ID-Req -> FAILURE payload = struct.pack(">B", 0x02) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=0 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Commit payload length (prep=None)") # EAP-pwd commit request, password prep is NONE # EAP-pwd: Unexpected Commit payload length 0 (expected 96) payload = struct.pack(">B", 0x02) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=0 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Commit payload with all zeros values --> Shared key at infinity") # EAP-pwd: Invalid coordinate in element payload = struct.pack(">B", 0x02) + 96*b'\0' return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=0 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Commit payload with valid values") # EAP-pwd commit request, password prep is NONE element = binascii.unhexlify("8dcab2862c5396839a6bac0c689ff03d962863108e7c275bbf1d6eedf634ee832a214db99f0d0a1a6317733eecdd97f0fc4cda19f57e1bb9bb9c8dcf8c60ba6f") scalar = binascii.unhexlify("450f31e058cf2ac2636a5d6e2b3c70b1fcc301957f0716e77f13aa69f9a2e5bd") payload = struct.pack(">B", 0x02) + element + scalar return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Confirm payload length 0") # EAP-pwd: Unexpected Confirm payload length 0 (expected 32) payload = struct.pack(">B", 0x03) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=0 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Commit payload with valid values") # EAP-pwd commit request, password prep is NONE element = binascii.unhexlify("8dcab2862c5396839a6bac0c689ff03d962863108e7c275bbf1d6eedf634ee832a214db99f0d0a1a6317733eecdd97f0fc4cda19f57e1bb9bb9c8dcf8c60ba6f") scalar = binascii.unhexlify("450f31e058cf2ac2636a5d6e2b3c70b1fcc301957f0716e77f13aa69f9a2e5bd") payload = struct.pack(">B", 0x02) + element + scalar return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Confirm payload with incorrect value") # EAP-PWD (peer): confirm did not verify payload = struct.pack(">B", 0x03) + 32*b'\0' return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected confirm exchange") # EAP-pwd: processing frame: exch 3, len 0 # EAP-PWD: PWD-ID-Req -> FAILURE payload = struct.pack(">B", 0x03) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unsupported password pre-processing technique SASLprep in id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=2 # EAP-PWD: Unsupported password pre-processing technique (Prep=2) # EAP-PWD: PWD-ID-Req -> FAILURE payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 2) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=1 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 1) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Commit payload length (prep=MS)") # EAP-pwd commit request, password prep is MS # EAP-pwd: Unexpected Commit payload length 0 (expected 96) payload = struct.pack(">B", 0x02) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=3 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 3) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Commit payload length (prep=ssha1)") # EAP-pwd commit request, password prep is salted sha1 # EAP-pwd: Invalid Salt-len payload = struct.pack(">B", 0x02) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=3 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 3) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Commit payload length (prep=ssha1)") # EAP-pwd commit request, password prep is salted sha1 # EAP-pwd: Invalid Salt-len payload = struct.pack(">BB", 0x02, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=3 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 3) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Commit payload length (prep=ssha1)") # EAP-pwd commit request, password prep is salted sha1 # EAP-pwd: Unexpected Commit payload length 1 (expected 98) payload = struct.pack(">BB", 0x02, 1) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=4 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 4) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Commit payload length (prep=ssha256)") # EAP-pwd commit request, password prep is salted sha256 # EAP-pwd: Invalid Salt-len payload = struct.pack(">B", 0x02) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=4 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 4) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Commit payload length (prep=ssha256)") # EAP-pwd commit request, password prep is salted sha256 # EAP-pwd: Invalid Salt-len payload = struct.pack(">BB", 0x02, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=4 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 4) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Commit payload length (prep=ssha256)") # EAP-pwd commit request, password prep is salted sha256 # EAP-pwd: Unexpected Commit payload length 1 (expected 98) payload = struct.pack(">BB", 0x02, 1) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=5 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 5) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Commit payload length (prep=ssha512)") # EAP-pwd commit request, password prep is salted sha512 # EAP-pwd: Invalid Salt-len payload = struct.pack(">B", 0x02) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=5 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 5) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Commit payload length (prep=ssha512)") # EAP-pwd commit request, password prep is salted sha512 # EAP-pwd: Invalid Salt-len payload = struct.pack(">BB", 0x02, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: eap_proto_pwd_test_wait = True logger.info("Test: Valid id exchange") # EAP-PWD: Server EAP-pwd-ID proposal: group=19 random=1 prf=1 prep=5 payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 5) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Commit payload length (prep=ssha512)") # EAP-pwd commit request, password prep is salted sha512 # EAP-pwd: Unexpected Commit payload length 1 (expected 98) payload = struct.pack(">BB", 0x02, 1) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload logger.info("No more test responses available - test case completed") global eap_proto_pwd_test_done eap_proto_pwd_test_done = True return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(pwd_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) i = 0 while not eap_proto_pwd_test_done: i += 1 logger.info("Running connection iteration %d" % i) dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", password="secret password", wait_connect=False) ok = False for j in range(5): ev = dev[0].wait_event(["CTRL-EVENT-EAP-STATUS", "CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") if "CTRL-EVENT-EAP-PROPOSED-METHOD" in ev: ok = True break if "CTRL-EVENT-EAP-STATUS" in ev and "status='completion' parameter='failure'" in ev: ok = True break if not ok: raise Exception("Expected EAP event not seen") if eap_proto_pwd_test_wait: for k in range(20): time.sleep(0.1) if not eap_proto_pwd_test_wait: break if eap_proto_pwd_test_wait: raise Exception("eap_proto_pwd_test_wait not cleared") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) dev[0].dump_monitor() finally: stop_radius_server(srv) def test_eap_proto_pwd_invalid_scalar(dev, apdev): """EAP-pwd protocol tests - invalid server scalar""" check_eap_capa(dev[0], "PWD") run_eap_proto_pwd_invalid_scalar(dev, apdev, 32*b'\0') run_eap_proto_pwd_invalid_scalar(dev, apdev, 31*b'\0' + b'\x01') # Group Order val = binascii.unhexlify("FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551") run_eap_proto_pwd_invalid_scalar(dev, apdev, val) # Group Order - 1 val = binascii.unhexlify("FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632550") run_eap_proto_pwd_invalid_scalar(dev, apdev, val, valid_scalar=True) def run_eap_proto_pwd_invalid_scalar(dev, apdev, scalar, valid_scalar=False): global eap_proto_pwd_invalid_scalar_fail eap_proto_pwd_invalid_scalar_fail = False def pwd_handler(ctx, req): logger.info("pwd_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Valid id exchange") payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Commit payload with invalid scalar") payload = struct.pack(">B", 0x02) + binascii.unhexlify("67feb2b46d59e6dd3af3a429ec9c04a949337564615d3a2c19bdf6826eb6f5efa303aed86af3a072ed819d518d620adb2659f0e84c4f8b739629db8c93088cfc") + scalar return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Confirm message next - should not get here") global eap_proto_pwd_invalid_scalar_fail eap_proto_pwd_invalid_scalar_fail = True payload = struct.pack(">B", 0x03) + 32*b'\0' return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload logger.info("No more test responses available - test case completed") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(pwd_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", password="secret password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5) if ev is None: raise Exception("EAP failure not reported") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) dev[0].dump_monitor() finally: stop_radius_server(srv) if valid_scalar and not eap_proto_pwd_invalid_scalar_fail: raise Exception("Peer did not accept valid EAP-pwd-Commit scalar") if not valid_scalar and eap_proto_pwd_invalid_scalar_fail: raise Exception("Peer did not stop after invalid EAP-pwd-Commit scalar") def test_eap_proto_pwd_invalid_element(dev, apdev): """EAP-pwd protocol tests - invalid server element""" check_eap_capa(dev[0], "PWD") # Invalid x,y coordinates run_eap_proto_pwd_invalid_element(dev, apdev, 64*b'\x00') run_eap_proto_pwd_invalid_element(dev, apdev, 32*b'\x00' + 32*b'\x01') run_eap_proto_pwd_invalid_element(dev, apdev, 32*b'\x01' + 32*b'\x00') run_eap_proto_pwd_invalid_element(dev, apdev, 32*b'\xff' + 32*b'\x01') run_eap_proto_pwd_invalid_element(dev, apdev, 32*b'\x01' + 32*b'\xff') run_eap_proto_pwd_invalid_element(dev, apdev, 64*b'\xff') # Not on curve run_eap_proto_pwd_invalid_element(dev, apdev, 64*b'\x01') def run_eap_proto_pwd_invalid_element(dev, apdev, element): global eap_proto_pwd_invalid_element_fail eap_proto_pwd_invalid_element_fail = False def pwd_handler(ctx, req): logger.info("pwd_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Valid id exchange") payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0) return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Commit payload with invalid element") payload = struct.pack(">B", 0x02) + element + 31*b'\0' + b'\x02' return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload idx += 1 if ctx['num'] == idx: logger.info("Confirm message next - should not get here") global eap_proto_pwd_invalid_element_fail eap_proto_pwd_invalid_element_fail = True payload = struct.pack(">B", 0x03) + 32*b'\0' return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + len(payload), EAP_TYPE_PWD) + payload logger.info("No more test responses available - test case completed") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(pwd_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", password="secret password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5) if ev is None: raise Exception("EAP failure not reported") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) dev[0].dump_monitor() finally: stop_radius_server(srv) if eap_proto_pwd_invalid_element_fail: raise Exception("Peer did not stop after invalid EAP-pwd-Commit element") def rx_msg(src): ev = src.wait_event(["EAPOL-TX"], timeout=5) if ev is None: raise Exception("No EAPOL-TX") return ev.split(' ')[2] def tx_msg(src, dst, msg): dst.request("EAPOL_RX " + src.own_addr() + " " + msg) def proxy_msg(src, dst): msg = rx_msg(src) tx_msg(src, dst, msg) return msg def start_pwd_exchange(dev, ap): check_eap_capa(dev, "PWD") params = hostapd.wpa2_eap_params(ssid="test-wpa2-eap") hapd = hostapd.add_ap(ap, params) hapd.request("SET ext_eapol_frame_io 1") dev.request("SET ext_eapol_frame_io 1") dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", eap="PWD", identity="pwd user", password="secret password", wait_connect=False, scan_freq="2412") proxy_msg(hapd, dev) # EAP-Identity/Request proxy_msg(dev, hapd) # EAP-Identity/Response proxy_msg(hapd, dev) # EAP-pwd-ID/Request proxy_msg(dev, hapd) # EAP-pwd-ID/Response return hapd def test_eap_proto_pwd_unexpected_fragment(dev, apdev): """EAP-pwd protocol tests - unexpected more-fragment frame""" hapd = start_pwd_exchange(dev[0], apdev[0]) # EAP-pwd-Commit/Request req = rx_msg(hapd) if req[18:20] != "02": raise Exception("Unexpected EAP-pwd-Commit/Request flag") msg = req[0:18] + "42" + req[20:] tx_msg(hapd, dev[0], msg) def test_eap_proto_pwd_reflection_attack(dev, apdev): """EAP-pwd protocol tests - reflection attack on the server""" hapd = start_pwd_exchange(dev[0], apdev[0]) # EAP-pwd-Commit/Request req = proxy_msg(hapd, dev[0]) if len(req) != 212: raise Exception("Unexpected EAP-pwd-Commit/Response length") # EAP-pwd-Commit/Response resp = rx_msg(dev[0]) # Reflect same Element/Scalar back to the server msg = resp[0:20] + req[20:] tx_msg(dev[0], hapd, msg) # EAP-pwd-Commit/Response or EAP-Failure req = rx_msg(hapd) if req[8:10] != "04": # reflect EAP-pwd-Confirm/Request msg = req[0:8] + "02" + req[10:] tx_msg(dev[0], hapd, msg) req = rx_msg(hapd) if req[8:10] == "03": raise Exception("EAP-Success after reflected Element/Scalar") raise Exception("No EAP-Failure to reject invalid EAP-pwd-Commit/Response") def test_eap_proto_pwd_invalid_scalar_peer(dev, apdev): """EAP-pwd protocol tests - invalid peer scalar""" run_eap_proto_pwd_invalid_scalar_peer(dev, apdev, 32*"00") run_eap_proto_pwd_invalid_scalar_peer(dev, apdev, 31*"00" + "01") # Group Order run_eap_proto_pwd_invalid_scalar_peer(dev, apdev, "FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551") # Group Order - 1 run_eap_proto_pwd_invalid_scalar_peer(dev, apdev, "FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632550", valid_scalar=True) def run_eap_proto_pwd_invalid_scalar_peer(dev, apdev, scalar, valid_scalar=False): hapd = start_pwd_exchange(dev[0], apdev[0]) proxy_msg(hapd, dev[0]) # EAP-pwd-Commit/Request # EAP-pwd-Commit/Response resp = rx_msg(dev[0]) # Replace scalar with an invalid value msg = resp[0:20] + resp[20:148] + scalar tx_msg(dev[0], hapd, msg) # EAP-pwd-Commit/Response or EAP-Failure req = rx_msg(hapd) if valid_scalar and req[8:10] == "04": raise Exception("Unexpected EAP-Failure with valid scalar") if not valid_scalar and req[8:10] != "04": raise Exception("No EAP-Failure to reject invalid scalar") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) hapd.disable() def test_eap_proto_pwd_invalid_element_peer(dev, apdev): """EAP-pwd protocol tests - invalid peer element""" # Invalid x,y coordinates run_eap_proto_pwd_invalid_element_peer(dev, apdev, 64*'00') run_eap_proto_pwd_invalid_element_peer(dev, apdev, 32*'00' + 32*'01') run_eap_proto_pwd_invalid_element_peer(dev, apdev, 32*'01' + 32*'00') run_eap_proto_pwd_invalid_element_peer(dev, apdev, 32*'ff' + 32*'01') run_eap_proto_pwd_invalid_element_peer(dev, apdev, 32*'01' + 32*'ff') run_eap_proto_pwd_invalid_element_peer(dev, apdev, 64*'ff') # Not on curve run_eap_proto_pwd_invalid_element_peer(dev, apdev, 64*'01') def run_eap_proto_pwd_invalid_element_peer(dev, apdev, element): hapd = start_pwd_exchange(dev[0], apdev[0]) proxy_msg(hapd, dev[0]) # EAP-pwd-Commit/Request # EAP-pwd-Commit/Response resp = rx_msg(dev[0]) # Replace element with an invalid value msg = resp[0:20] + element + resp[148:] tx_msg(dev[0], hapd, msg) # EAP-pwd-Commit/Response or EAP-Failure req = rx_msg(hapd) if req[8:10] != "04": raise Exception("No EAP-Failure to reject invalid element") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) hapd.disable() def test_eap_proto_pwd_errors(dev, apdev): """EAP-pwd local error cases""" check_eap_capa(dev[0], "PWD") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(1, 4): with alloc_fail(dev[0], i, "eap_pwd_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", password="secret password", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with alloc_fail(dev[0], 1, "eap_pwd_get_session_id"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", fragment_size="0", password="secret password") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() funcs = ["eap_pwd_getkey", "eap_pwd_get_emsk", "=wpabuf_alloc;eap_pwd_perform_commit_exchange", "=wpabuf_alloc;eap_pwd_perform_confirm_exchange"] for func in funcs: with alloc_fail(dev[0], 1, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user@domain", password="secret password", erp="1", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() for i in range(1, 5): with alloc_fail(dev[0], i, "eap_pwd_perform_id_exchange"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", password="secret password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ok = False for j in range(10): state = dev[0].request('GET_ALLOC_FAIL') if state.startswith('0:'): ok = True break time.sleep(0.1) if not ok: raise Exception("No allocation failure seen") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with alloc_fail(dev[0], 1, "wpabuf_alloc;eap_pwd_perform_id_exchange"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", password="secret password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() for i in range(1, 9): with alloc_fail(dev[0], i, "eap_pwd_perform_commit_exchange"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", password="secret password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ok = False for j in range(10): state = dev[0].request('GET_ALLOC_FAIL') if state.startswith('0:'): ok = True break time.sleep(0.1) if not ok: raise Exception("No allocation failure seen") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() for i in range(1, 12): with alloc_fail(dev[0], i, "eap_pwd_perform_confirm_exchange"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", password="secret password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") ok = False for j in range(10): state = dev[0].request('GET_ALLOC_FAIL') if state.startswith('0:'): ok = True break time.sleep(0.1) if not ok: raise Exception("No allocation failure seen") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() for i in range(1, 5): with alloc_fail(dev[0], i, "eap_msg_alloc;=eap_pwd_process"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", password="secret password", fragment_size="50", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() # No password configured dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD vendor=0 method=52"], timeout=15) if ev is None: raise Exception("EAP-pwd not started") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() funcs = [(1, "hash_nt_password_hash;eap_pwd_perform_commit_exchange"), (1, "=crypto_bignum_init;eap_pwd_perform_commit_exchange"), (1, "=crypto_ec_point_init;eap_pwd_perform_commit_exchange"), (2, "=crypto_ec_point_init;eap_pwd_perform_commit_exchange"), (1, "=crypto_ec_point_mul;eap_pwd_perform_commit_exchange"), (2, "=crypto_ec_point_mul;eap_pwd_perform_commit_exchange"), (3, "=crypto_ec_point_mul;eap_pwd_perform_commit_exchange"), (1, "=crypto_ec_point_add;eap_pwd_perform_commit_exchange"), (1, "=crypto_ec_point_invert;eap_pwd_perform_commit_exchange"), (1, "=crypto_ec_point_to_bin;eap_pwd_perform_commit_exchange"), (1, "crypto_hash_finish;eap_pwd_kdf"), (1, "crypto_ec_point_from_bin;eap_pwd_get_element"), (3, "crypto_bignum_init;compute_password_element"), (4, "crypto_bignum_init;compute_password_element"), (1, "crypto_bignum_init_set;compute_password_element"), (2, "crypto_bignum_init_set;compute_password_element"), (3, "crypto_bignum_init_set;compute_password_element"), (1, "crypto_bignum_to_bin;compute_password_element"), (1, "crypto_ec_point_compute_y_sqr;compute_password_element"), (1, "crypto_ec_point_solve_y_coord;compute_password_element"), (1, "crypto_bignum_rand;compute_password_element"), (1, "crypto_bignum_sub;compute_password_element")] for count, func in funcs: with fail_test(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd-hash", password_hex="hash:e3718ece8ab74792cbbfffd316d2d19a", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("No EAP-Failure reported") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() params = {"ssid": "eap-test2", "wpa": "2", "wpa_key_mgmt": "WPA-EAP", "rsn_pairwise": "CCMP", "ieee8021x": "1", "eap_server": "1", "eap_user_file": "auth_serv/eap_user.conf", "pwd_group": "19", "fragment_size": "40"} hapd2 = hostapd.add_ap(apdev[1], params) dev[0].scan_for_bss(hapd2.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "wpabuf_alloc;=eap_pwd_process"): dev[0].connect("eap-test2", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd user", password="secret password", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() for i in range(1, 5): with fail_test(dev[0], i, "=crypto_ec_point_to_bin;eap_pwd_perform_confirm_exchange"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PWD", identity="pwd-hash", password_hex="hash:e3718ece8ab74792cbbfffd316d2d19a", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("No EAP-Failure reported") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].dump_monitor() def run_eap_pwd_connect(dev, hash=True, fragment=2000): if hash: dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", fragment_size=str(fragment), eap="PWD", identity="pwd-hash", password_hex="hash:e3718ece8ab74792cbbfffd316d2d19a", scan_freq="2412", wait_connect=False) else: dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", fragment_size=str(fragment), eap="PWD", identity="pwd-hash-sha1", password="secret password", scan_freq="2412", wait_connect=False) ev = dev.wait_event(["CTRL-EVENT-EAP-SUCCESS", "CTRL-EVENT-EAP-FAILURE", "CTRL-EVENT-DISCONNECTED"], timeout=1) dev.request("REMOVE_NETWORK all") if not ev or "CTRL-EVENT-DISCONNECTED" not in ev: dev.wait_disconnected() dev.dump_monitor() def test_eap_proto_pwd_errors_server(dev, apdev): """EAP-pwd local error cases on server""" check_eap_capa(dev[0], "PWD") params = int_eap_server_params() params['erp_domain'] = 'example.com' params['eap_server_erp'] = '1' hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) tests = [(1, "eap_pwd_init"), (2, "eap_pwd_init"), (3, "eap_pwd_init"), (1, "eap_pwd_build_id_req"), (1, "eap_pwd_build_commit_req"), (1, "eap_pwd_build_confirm_req"), (1, "eap_pwd_h_init;eap_pwd_build_confirm_req"), (1, "wpabuf_alloc;eap_pwd_build_confirm_req"), (1, "eap_msg_alloc;eap_pwd_build_req"), (1, "eap_pwd_process_id_resp"), (1, "get_eap_pwd_group;eap_pwd_process_id_resp"), (1, "eap_pwd_process_confirm_resp"), (1, "eap_pwd_h_init;eap_pwd_process_confirm_resp"), (1, "compute_keys;eap_pwd_process_confirm_resp"), (1, "eap_pwd_getkey"), (1, "eap_pwd_get_emsk"), (1, "eap_pwd_get_session_id")] for count, func in tests: with alloc_fail(hapd, count, func): run_eap_pwd_connect(dev[0], hash=True) tests = [(1, "eap_msg_alloc;eap_pwd_build_req"), (2, "eap_msg_alloc;eap_pwd_build_req"), (1, "wpabuf_alloc;eap_pwd_process")] for count, func in tests: with alloc_fail(hapd, count, func): run_eap_pwd_connect(dev[0], hash=True, fragment=13) tests = [(4, "eap_pwd_init")] for count, func in tests: with alloc_fail(hapd, count, func): run_eap_pwd_connect(dev[0], hash=False) tests = [(1, "eap_pwd_build_id_req"), (1, "eap_pwd_build_commit_req"), (1, "crypto_ec_point_mul;eap_pwd_build_commit_req"), (1, "crypto_ec_point_invert;eap_pwd_build_commit_req"), (1, "crypto_ec_point_to_bin;eap_pwd_build_commit_req"), (1, "crypto_ec_point_to_bin;eap_pwd_build_confirm_req"), (2, "=crypto_ec_point_to_bin;eap_pwd_build_confirm_req"), (1, "hash_nt_password_hash;eap_pwd_process_id_resp"), (1, "compute_password_element;eap_pwd_process_id_resp"), (1, "crypto_bignum_init;eap_pwd_process_commit_resp"), (1, "crypto_ec_point_mul;eap_pwd_process_commit_resp"), (2, "crypto_ec_point_mul;eap_pwd_process_commit_resp"), (1, "crypto_ec_point_add;eap_pwd_process_commit_resp"), (1, "crypto_ec_point_to_bin;eap_pwd_process_confirm_resp"), (2, "=crypto_ec_point_to_bin;eap_pwd_process_confirm_resp")] for count, func in tests: with fail_test(hapd, count, func): run_eap_pwd_connect(dev[0], hash=True) def start_pwd_assoc(dev, hapd): dev.connect("test-wpa2-eap", key_mgmt="WPA-EAP", eap="PWD", identity="pwd user", password="secret password", wait_connect=False, scan_freq="2412") proxy_msg(hapd, dev) # EAP-Identity/Request proxy_msg(dev, hapd) # EAP-Identity/Response proxy_msg(hapd, dev) # EAP-pwd-Identity/Request def stop_pwd_assoc(dev, hapd): dev.request("REMOVE_NETWORK all") dev.wait_disconnected() dev.dump_monitor() hapd.dump_monitor() def test_eap_proto_pwd_server(dev, apdev): """EAP-pwd protocol testing for the server""" check_eap_capa(dev[0], "PWD") params = int_eap_server_params() hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) hapd.request("SET ext_eapol_frame_io 1") dev[0].request("SET ext_eapol_frame_io 1") start_pwd_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Replace exch field with unexpected value # --> EAP-pwd: Unexpected opcode=4 in state=0 msg = resp[0:18] + "04" + resp[20:] tx_msg(dev[0], hapd, msg) # Too short EAP-pwd header (no flags/exch field) # --> EAP-pwd: Invalid frame msg = resp[0:4] + "0005" + resp[8:12] + "0005" + "34" tx_msg(dev[0], hapd, msg) # Too short EAP-pwd header (L=1 but only one octet of total length field) # --> EAP-pwd: Frame too short to contain Total-Length field msg = resp[0:4] + "0007" + resp[8:12] + "0007" + "34" + "81ff" tx_msg(dev[0], hapd, msg) # server continues exchange, so start from scratch for the next step rx_msg(hapd) stop_pwd_assoc(dev[0], hapd) start_pwd_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too large total length msg = resp[0:4] + "0008" + resp[8:12] + "0008" + "34" + "c1ffff" tx_msg(dev[0], hapd, msg) # server continues exchange, so start from scratch for the next step rx_msg(hapd) stop_pwd_assoc(dev[0], hapd) start_pwd_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # First fragment msg = resp[0:4] + "0009" + resp[8:12] + "0009" + "34" + "c100ff" + "aa" tx_msg(dev[0], hapd, msg) # Ack req = rx_msg(hapd) # Unexpected first fragment # --> EAP-pwd: Unexpected new fragment start when previous fragment is still in use msg = resp[0:4] + "0009" + resp[8:10] + req[10:12] + "0009" + "34" + "c100ee" + "bb" tx_msg(dev[0], hapd, msg) # server continues exchange, so start from scratch for the next step rx_msg(hapd) stop_pwd_assoc(dev[0], hapd) start_pwd_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too much data in first fragment # --> EAP-pwd: Buffer overflow attack detected! (0+2 > 1) msg = resp[0:4] + "000a" + resp[8:12] + "000a" + "34" + "c10001" + "aabb" tx_msg(dev[0], hapd, msg) # EAP-Failure rx_msg(hapd) stop_pwd_assoc(dev[0], hapd) start_pwd_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Change parameters # --> EAP-pwd: peer changed parameters msg = resp[0:20] + "ff" + resp[22:] tx_msg(dev[0], hapd, msg) # EAP-Failure rx_msg(hapd) stop_pwd_assoc(dev[0], hapd) start_pwd_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Too short ID response # --> EAP-pwd: Invalid ID response msg = resp[0:4] + "000a" + resp[8:12] + "000a" + "34" + "01ffeeddcc" tx_msg(dev[0], hapd, msg) # server continues exchange, so start from scratch for the next step rx_msg(hapd) stop_pwd_assoc(dev[0], hapd) start_pwd_assoc(dev[0], hapd) # EAP-pwd-Identity/Response resp = rx_msg(dev[0]) tx_msg(dev[0], hapd, resp) # EAP-pwd-Commit/Request req = rx_msg(hapd) # Unexpected EAP-pwd-Identity/Response # --> EAP-pwd: Unexpected opcode=1 in state=1 msg = resp[0:10] + req[10:12] + resp[12:] tx_msg(dev[0], hapd, msg) # server continues exchange, so start from scratch for the next step rx_msg(hapd) stop_pwd_assoc(dev[0], hapd) start_pwd_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # EAP-pwd-Identity/Response proxy_msg(hapd, dev[0]) # EAP-pwd-Commit/Request # EAP-pwd-Commit/Response resp = rx_msg(dev[0]) # Too short Commit response # --> EAP-pwd: Unexpected Commit payload length 4 (expected 96) msg = resp[0:4] + "000a" + resp[8:12] + "000a" + "34" + "02ffeeddcc" tx_msg(dev[0], hapd, msg) # EAP-Failure rx_msg(hapd) stop_pwd_assoc(dev[0], hapd) start_pwd_assoc(dev[0], hapd) proxy_msg(dev[0], hapd) # EAP-pwd-Identity/Response proxy_msg(hapd, dev[0]) # EAP-pwd-Commit/Request proxy_msg(dev[0], hapd) # EAP-pwd-Commit/Response proxy_msg(hapd, dev[0]) # EAP-pwd-Confirm/Request # EAP-pwd-Confirm/Response resp = rx_msg(dev[0]) # Too short Confirm response # --> EAP-pwd: Unexpected Confirm payload length 4 (expected 32) msg = resp[0:4] + "000a" + resp[8:12] + "000a" + "34" + "03ffeeddcc" tx_msg(dev[0], hapd, msg) # EAP-Failure rx_msg(hapd) stop_pwd_assoc(dev[0], hapd) start_pwd_assoc(dev[0], hapd) resp = rx_msg(dev[0]) # Set M=1 # --> EAP-pwd: No buffer for reassembly msg = resp[0:18] + "41" + resp[20:] tx_msg(dev[0], hapd, msg) # EAP-Failure rx_msg(hapd) stop_pwd_assoc(dev[0], hapd) def test_eap_proto_erp(dev, apdev): """ERP protocol tests""" check_erp_capa(dev[0]) global eap_proto_erp_test_done eap_proto_erp_test_done = False def erp_handler(ctx, req): logger.info("erp_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] += 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Missing type") return struct.pack(">BBH", EAP_CODE_INITIATE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected type") return struct.pack(">BBHB", EAP_CODE_INITIATE, ctx['id'], 4 + 1, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: Missing Reserved field") return struct.pack(">BBHB", EAP_CODE_INITIATE, ctx['id'], 4 + 1, EAP_ERP_TYPE_REAUTH_START) idx += 1 if ctx['num'] == idx: logger.info("Test: Zero-length TVs/TLVs") payload = b"" return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'], 4 + 1 + 1 + len(payload), EAP_ERP_TYPE_REAUTH_START, 0) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Too short TLV") payload = struct.pack("B", 191) return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'], 4 + 1 + 1 + len(payload), EAP_ERP_TYPE_REAUTH_START, 0) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Truncated TLV") payload = struct.pack("BB", 191, 1) return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'], 4 + 1 + 1 + len(payload), EAP_ERP_TYPE_REAUTH_START, 0) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Ignored unknown TLV and unknown TV/TLV terminating parsing") payload = struct.pack("BBB", 191, 0, 192) return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'], 4 + 1 + 1 + len(payload), EAP_ERP_TYPE_REAUTH_START, 0) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: More than one keyName-NAI") payload = struct.pack("BBBB", EAP_ERP_TLV_KEYNAME_NAI, 0, EAP_ERP_TLV_KEYNAME_NAI, 0) return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'], 4 + 1 + 1 + len(payload), EAP_ERP_TYPE_REAUTH_START, 0) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Too short TLV keyName-NAI") payload = struct.pack("B", EAP_ERP_TLV_KEYNAME_NAI) return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'], 4 + 1 + 1 + len(payload), EAP_ERP_TYPE_REAUTH_START, 0) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Truncated TLV keyName-NAI") payload = struct.pack("BB", EAP_ERP_TLV_KEYNAME_NAI, 1) return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'], 4 + 1 + 1 + len(payload), EAP_ERP_TYPE_REAUTH_START, 0) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Valid rRK lifetime TV followed by too short rMSK lifetime TV") payload = struct.pack(">BLBH", EAP_ERP_TV_RRK_LIFETIME, 0, EAP_ERP_TV_RMSK_LIFETIME, 0) return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'], 4 + 1 + 1 + len(payload), EAP_ERP_TYPE_REAUTH_START, 0) + payload idx += 1 if ctx['num'] == idx: logger.info("Test: Missing type (Finish)") return struct.pack(">BBH", EAP_CODE_FINISH, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected type (Finish)") return struct.pack(">BBHB", EAP_CODE_FINISH, ctx['id'], 4 + 1, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: Missing fields (Finish)") return struct.pack(">BBHB", EAP_CODE_FINISH, ctx['id'], 4 + 1, EAP_ERP_TYPE_REAUTH) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected SEQ (Finish)") return struct.pack(">BBHBBHB", EAP_CODE_FINISH, ctx['id'], 4 + 1 + 4, EAP_ERP_TYPE_REAUTH, 0, 0xffff, 0) logger.info("No more test responses available - test case completed") global eap_proto_erp_test_done eap_proto_erp_test_done = True return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(erp_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) i = 0 while not eap_proto_erp_test_done: i += 1 logger.info("Running connection iteration %d" % i) dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PAX", identity="pax.user@example.com", password_hex="0123456789abcdef0123456789abcdef", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) dev[0].dump_monitor() finally: stop_radius_server(srv) def test_eap_proto_fast_errors(dev, apdev): """EAP-FAST local error cases""" check_eap_capa(dev[0], "FAST") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(1, 5): with alloc_fail(dev[0], i, "eap_fast_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="FAST", anonymous_identity="FAST", identity="user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=GTC", phase1="fast_provisioning=2", pac_file="blob://fast_pac_auth", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "wpabuf_alloc;eap_fast_tlv_eap_payload"), (1, "eap_fast_derive_key;eap_fast_derive_key_auth"), (1, "eap_msg_alloc;eap_peer_tls_phase2_nak"), (1, "wpabuf_alloc;eap_fast_tlv_result"), (1, "wpabuf_alloc;eap_fast_tlv_pac_ack"), (1, "=eap_peer_tls_derive_session_id;eap_fast_process_crypto_binding"), (1, "eap_peer_tls_decrypt;eap_fast_decrypt"), (1, "eap_fast_getKey"), (1, "eap_fast_get_session_id"), (1, "eap_fast_get_emsk")] for count, func in tests: dev[0].request("SET blob fast_pac_auth_errors ") with alloc_fail(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="FAST", anonymous_identity="FAST", identity="user@example.com", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=GTC", phase1="fast_provisioning=2", pac_file="blob://fast_pac_auth_errors", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "eap_fast_derive_key;eap_fast_derive_key_provisioning"), (1, "eap_mschapv2_getKey;eap_fast_get_phase2_key"), (1, "=eap_fast_use_pac_opaque"), (1, "eap_fast_copy_buf"), (1, "=eap_fast_add_pac"), (1, "=eap_fast_init_pac_data"), (1, "=eap_fast_write_pac"), (2, "=eap_fast_write_pac")] for count, func in tests: dev[0].request("SET blob fast_pac_errors ") with alloc_fail(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="FAST", anonymous_identity="FAST", identity="user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2", phase1="fast_provisioning=1", pac_file="blob://fast_pac_errors", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "eap_fast_get_cmk;eap_fast_process_crypto_binding"), (1, "eap_fast_derive_eap_msk;eap_fast_process_crypto_binding"), (1, "eap_fast_derive_eap_emsk;eap_fast_process_crypto_binding")] for count, func in tests: dev[0].request("SET blob fast_pac_auth_errors ") with fail_test(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="FAST", anonymous_identity="FAST", identity="user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=GTC", phase1="fast_provisioning=2", pac_file="blob://fast_pac_auth_errors", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].request("SET blob fast_pac_errors ") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="FAST", anonymous_identity="FAST", identity="user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=GTC", phase1="fast_provisioning=1", pac_file="blob://fast_pac_errors", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") # EAP-FAST: Only EAP-MSCHAPv2 is allowed during unauthenticated # provisioning; reject phase2 type 6 ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5) if ev is None: raise Exception("Timeout on EAP failure") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() logger.info("Wrong password in Phase 2") dev[0].request("SET blob fast_pac_errors ") dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="FAST", anonymous_identity="FAST", identity="user", password="wrong password", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2", phase1="fast_provisioning=1", pac_file="blob://fast_pac_errors", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5) if ev is None: raise Exception("Timeout on EAP failure") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = ["FOOBAR\n", "wpa_supplicant EAP-FAST PAC file - version 1\nFOOBAR\n", "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\n", "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nSTART\n", "wpa_supplicant EAP-FAST PAC file - version 1\nEND\n", "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Type=12345\nEND\n" "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Key=12\nEND\n", "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Key=1\nEND\n", "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Key=1q\nEND\n", "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Opaque=1\nEND\n", "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nA-ID=1\nEND\n", "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nI-ID=1\nEND\n", "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nA-ID-Info=1\nEND\n"] for pac in tests: blob = binascii.hexlify(pac.encode()).decode() dev[0].request("SET blob fast_pac_errors " + blob) dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="FAST", anonymous_identity="FAST", identity="user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=GTC", phase1="fast_provisioning=2", pac_file="blob://fast_pac_errors", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = ["wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nEND\n", "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nEND\nSTART\nEND\nSTART\nEND\n"] for pac in tests: blob = binascii.hexlify(pac.encode()).decode() dev[0].request("SET blob fast_pac_errors " + blob) dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="FAST", anonymous_identity="FAST", identity="user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=GTC", phase1="fast_provisioning=2", pac_file="blob://fast_pac_errors") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].request("SET blob fast_pac_errors ") def test_eap_proto_peap_errors_server(dev, apdev): """EAP-PEAP local error cases on server""" params = int_eap_server_params() hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) tests = [(1, "get_asymetric_start_key;eap_mschapv2_getKey"), (1, "generate_authenticator_response_pwhash;eap_mschapv2_process_response"), (1, "hash_nt_password_hash;eap_mschapv2_process_response"), (1, "get_master_key;eap_mschapv2_process_response")] for count, func in tests: with fail_test(hapd, count, func): dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", eap="PEAP", anonymous_identity="peap", identity="user", password="password", phase1="peapver=0 crypto_binding=2", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("EAP-Failure not reported") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() def test_eap_proto_peap_errors(dev, apdev): """EAP-PEAP local error cases""" check_eap_capa(dev[0], "PEAP") check_eap_capa(dev[0], "MSCHAPV2") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(1, 5): with alloc_fail(dev[0], i, "eap_peap_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PEAP", anonymous_identity="peap", identity="user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "eap_mschapv2_getKey;eap_peap_get_isk;eap_peap_derive_cmk"), (1, "eap_msg_alloc;eap_tlv_build_result"), (1, "eap_mschapv2_init;eap_peap_phase2_request"), (1, "eap_peer_tls_decrypt;eap_peap_decrypt"), (1, "wpabuf_alloc;=eap_peap_decrypt"), (1, "eap_peer_tls_encrypt;eap_peap_decrypt"), (1, "eap_peer_tls_process_helper;eap_peap_process"), (1, "eap_peer_tls_derive_key;eap_peap_process"), (1, "eap_peer_tls_derive_session_id;eap_peap_process"), (1, "eap_peap_getKey"), (1, "eap_peap_get_session_id")] for count, func in tests: with alloc_fail(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PEAP", anonymous_identity="peap", identity="user", password="password", phase1="peapver=0 crypto_binding=2", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "peap_prfplus;eap_peap_derive_cmk"), (1, "eap_tlv_add_cryptobinding;eap_tlv_build_result"), (1, "peap_prfplus;eap_peap_getKey"), (1, "get_asymetric_start_key;eap_mschapv2_getKey")] for count, func in tests: with fail_test(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PEAP", anonymous_identity="peap", identity="user", password="password", phase1="peapver=0 crypto_binding=2", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() with alloc_fail(dev[0], 1, "eap_peer_tls_phase2_nak;eap_peap_phase2_request"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="PEAP", anonymous_identity="peap", identity="cert user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() def test_eap_proto_ttls_errors(dev, apdev): """EAP-TTLS local error cases""" check_eap_capa(dev[0], "TTLS") check_eap_capa(dev[0], "MSCHAPV2") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) for i in range(1, 5): with alloc_fail(dev[0], i, "eap_ttls_init"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="TTLS", anonymous_identity="ttls", identity="user", password="password", ca_cert="auth_serv/ca.pem", phase2="autheap=MSCHAPV2", wait_connect=False) ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "eap_peer_tls_derive_key;eap_ttls_v0_derive_key", "DOMAIN\mschapv2 user", "auth=MSCHAPV2"), (1, "eap_peer_tls_derive_session_id;eap_ttls_v0_derive_key", "DOMAIN\mschapv2 user", "auth=MSCHAPV2"), (1, "wpabuf_alloc;eap_ttls_phase2_request_mschapv2", "DOMAIN\mschapv2 user", "auth=MSCHAPV2"), (1, "eap_peer_tls_derive_key;eap_ttls_phase2_request_mschapv2", "DOMAIN\mschapv2 user", "auth=MSCHAPV2"), (1, "eap_peer_tls_encrypt;eap_ttls_encrypt_response;eap_ttls_implicit_identity_request", "DOMAIN\mschapv2 user", "auth=MSCHAPV2"), (1, "eap_peer_tls_decrypt;eap_ttls_decrypt", "DOMAIN\mschapv2 user", "auth=MSCHAPV2"), (1, "eap_ttls_getKey", "DOMAIN\mschapv2 user", "auth=MSCHAPV2"), (1, "eap_ttls_get_session_id", "DOMAIN\mschapv2 user", "auth=MSCHAPV2"), (1, "eap_ttls_get_emsk", "mschapv2 user@domain", "auth=MSCHAPV2"), (1, "wpabuf_alloc;eap_ttls_phase2_request_mschap", "mschap user", "auth=MSCHAP"), (1, "eap_peer_tls_derive_key;eap_ttls_phase2_request_mschap", "mschap user", "auth=MSCHAP"), (1, "wpabuf_alloc;eap_ttls_phase2_request_chap", "chap user", "auth=CHAP"), (1, "eap_peer_tls_derive_key;eap_ttls_phase2_request_chap", "chap user", "auth=CHAP"), (1, "wpabuf_alloc;eap_ttls_phase2_request_pap", "pap user", "auth=PAP"), (1, "wpabuf_alloc;eap_ttls_avp_encapsulate", "user", "autheap=MSCHAPV2"), (1, "eap_mschapv2_init;eap_ttls_phase2_request_eap_method", "user", "autheap=MSCHAPV2"), (1, "eap_sm_buildIdentity;eap_ttls_phase2_request_eap", "user", "autheap=MSCHAPV2"), (1, "eap_ttls_avp_encapsulate;eap_ttls_phase2_request_eap", "user", "autheap=MSCHAPV2"), (1, "eap_ttls_parse_attr_eap", "user", "autheap=MSCHAPV2"), (1, "eap_peer_tls_encrypt;eap_ttls_encrypt_response;eap_ttls_process_decrypted", "user", "autheap=MSCHAPV2"), (1, "eap_ttls_fake_identity_request", "user", "autheap=MSCHAPV2"), (1, "eap_msg_alloc;eap_tls_process_output", "user", "autheap=MSCHAPV2"), (1, "eap_msg_alloc;eap_peer_tls_build_ack", "user", "autheap=MSCHAPV2"), (1, "tls_connection_decrypt;eap_peer_tls_decrypt", "user", "autheap=MSCHAPV2"), (1, "eap_peer_tls_phase2_nak;eap_ttls_phase2_request_eap_method", "cert user", "autheap=MSCHAPV2")] for count, func, identity, phase2 in tests: with alloc_fail(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="TTLS", anonymous_identity="ttls", identity=identity, password="password", ca_cert="auth_serv/ca.pem", phase2=phase2, erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_ALLOC_FAIL", note="Allocation failure not triggered for: %d:%s" % (count, func)) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "os_get_random;eap_ttls_phase2_request_mschapv2"), (1, "mschapv2_derive_response;eap_ttls_phase2_request_mschapv2")] for count, func in tests: with fail_test(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="TTLS", anonymous_identity="ttls", identity="DOMAIN\mschapv2 user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_FAIL", note="Test failure not triggered for: %d:%s" % (count, func)) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() tests = [(1, "nt_challenge_response;eap_ttls_phase2_request_mschap")] for count, func in tests: with fail_test(dev[0], count, func): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="TTLS", anonymous_identity="ttls", identity="mschap user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAP", erp="1", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15) if ev is None: raise Exception("Timeout on EAP start") wait_fail_trigger(dev[0], "GET_FAIL", note="Test failure not triggered for: %d:%s" % (count, func)) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() def test_eap_proto_expanded(dev, apdev): """EAP protocol tests with expanded header""" global eap_proto_expanded_test_done eap_proto_expanded_test_done = False def expanded_handler(ctx, req): logger.info("expanded_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] += 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: MD5 challenge in expanded header") return struct.pack(">BBHB3BLBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 3, EAP_TYPE_EXPANDED, 0, 0, 0, EAP_TYPE_MD5, 1, 0xaa, ord('n')) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid expanded EAP length") return struct.pack(">BBHB3BH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 2, EAP_TYPE_EXPANDED, 0, 0, 0, EAP_TYPE_MD5) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid expanded frame type") return struct.pack(">BBHB3BL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_EXPANDED, 0, 0, 1, EAP_TYPE_MD5) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: MSCHAPv2 Challenge") return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 4 + 1 + 16 + 6, EAP_TYPE_MSCHAPV2, 1, 0, 4 + 1 + 16 + 6, 16) + 16*b'A' + b'foobar' idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid expanded frame type") return struct.pack(">BBHB3BL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4, EAP_TYPE_EXPANDED, 0, 0, 1, EAP_TYPE_MSCHAPV2) logger.info("No more test responses available - test case completed") global eap_proto_expanded_test_done eap_proto_expanded_test_done = True return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(expanded_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) i = 0 while not eap_proto_expanded_test_done: i += 1 logger.info("Running connection iteration %d" % i) if i == 4: dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MSCHAPV2", identity="user", password="password", wait_connect=False) else: dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") if i in [1]: ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5) if ev is None: raise Exception("Timeout on EAP method start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5) if ev is None: raise Exception("Timeout on EAP failure") elif i in [2, 3]: ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=5) if ev is None: raise Exception("Timeout on EAP proposed method") ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5) if ev is None: raise Exception("Timeout on EAP failure") else: time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) dev[0].dump_monitor() finally: stop_radius_server(srv) def test_eap_proto_tls(dev, apdev): """EAP-TLS protocol tests""" check_eap_capa(dev[0], "TLS") global eap_proto_tls_test_done, eap_proto_tls_test_wait eap_proto_tls_test_done = False eap_proto_tls_test_wait = False def tls_handler(ctx, req): logger.info("tls_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] += 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 global eap_proto_tls_test_wait idx += 1 if ctx['num'] == idx: logger.info("Test: Too much payload in TLS/Start: TLS Message Length (0 bytes) smaller than this fragment (1 bytes)") return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4 + 1, EAP_TYPE_TLS, 0xa0, 0, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Fragmented TLS/Start") return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4 + 1, EAP_TYPE_TLS, 0xe0, 2, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Too long fragment of TLS/Start: Invalid reassembly state: tls_in_left=2 tls_in_len=0 in_len=0") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2, EAP_TYPE_TLS, 0x00, 2, 3) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: TLS/Start") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TLS, 0x20) idx += 1 if ctx['num'] == idx: logger.info("Test: Fragmented TLS message") return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4 + 1, EAP_TYPE_TLS, 0xc0, 2, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid TLS message: no Flags octet included + workaround") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_TLS) idx += 1 if ctx['num'] == idx: logger.info("Test: Too long fragment of TLS message: more data than TLS message length indicated") return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2, EAP_TYPE_TLS, 0x00, 2, 3) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Fragmented TLS/Start and truncated Message Length field") return struct.pack(">BBHBB3B", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 3, EAP_TYPE_TLS, 0xe0, 1, 2, 3) idx += 1 if ctx['num'] == idx: logger.info("Test: TLS/Start") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TLS, 0x20) idx += 1 if ctx['num'] == idx: logger.info("Test: Fragmented TLS message") return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4 + 1, EAP_TYPE_TLS, 0xc0, 2, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid TLS message: no Flags octet included + workaround disabled") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_TLS) idx += 1 if ctx['num'] == idx: logger.info("Test: TLS/Start") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TLS, 0x20) idx += 1 if ctx['num'] == idx: logger.info("Test: Fragmented TLS message (long; first)") payload = 1450*b'A' return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4 + len(payload), EAP_TYPE_TLS, 0xc0, 65536) + payload # "Too long TLS fragment (size over 64 kB)" on the last one for i in range(44): idx += 1 if ctx['num'] == idx: logger.info("Test: Fragmented TLS message (long; cont %d)" % i) eap_proto_tls_test_wait = True payload = 1470*b'A' return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(payload), EAP_TYPE_TLS, 0x40) + payload eap_proto_tls_test_wait = False idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: TLS/Start") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TLS, 0x20) idx += 1 if ctx['num'] == idx: logger.info("Test: Non-ACK to more-fragment message") return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 1, EAP_TYPE_TLS, 0x00, 255) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Failure") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) logger.info("No more test responses available - test case completed") global eap_proto_tls_test_done eap_proto_tls_test_done = True return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(tls_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) i = 0 while not eap_proto_tls_test_done: i += 1 logger.info("Running connection iteration %d" % i) workaround = "0" if i == 6 else "1" fragment_size = "100" if i == 8 else "1400" dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="TLS", identity="tls user", ca_cert="auth_serv/ca.pem", client_cert="auth_serv/user.pem", private_key="auth_serv/user.key", eap_workaround=workaround, fragment_size=fragment_size, wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD", "CTRL-EVENT-EAP-STATUS"], timeout=5) if ev is None: raise Exception("Timeout on EAP method start") time.sleep(0.1) start = os.times()[4] while eap_proto_tls_test_wait: now = os.times()[4] if now - start > 10: break time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) dev[0].dump_monitor() finally: stop_radius_server(srv) def test_eap_proto_tnc(dev, apdev): """EAP-TNC protocol tests""" check_eap_capa(dev[0], "TNC") global eap_proto_tnc_test_done eap_proto_tnc_test_done = False def tnc_handler(ctx, req): logger.info("tnc_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] += 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: TNC start with unsupported version") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x20) idx += 1 if ctx['num'] == idx: logger.info("Test: TNC without Flags field") return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1, EAP_TYPE_TNC) idx += 1 if ctx['num'] == idx: logger.info("Test: Message underflow due to missing Message Length") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0xa1) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid Message Length") return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4 + 1, EAP_TYPE_TNC, 0xa1, 0, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid Message Length") return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4, EAP_TYPE_TNC, 0xe1, 75001) idx += 1 if ctx['num'] == idx: logger.info("Test: Start with Message Length") return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4, EAP_TYPE_TNC, 0xa1, 1) idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Server used start flag again") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Test: Fragmentation and unexpected payload in ack") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x01) idx += 1 if ctx['num'] == idx: return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 1, EAP_TYPE_TNC, 0x01, 0) idx += 1 if ctx['num'] == idx: logger.info("Test: Server fragmenting and fragment overflow") return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4 + 1, EAP_TYPE_TNC, 0xe1, 2, 1) idx += 1 if ctx['num'] == idx: return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 2, EAP_TYPE_TNC, 0x01, 2, 3) idx += 1 if ctx['num'] == idx: logger.info("Test: Server fragmenting and no message length in a fragment") return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 1, EAP_TYPE_TNC, 0x61, 2) idx += 1 if ctx['num'] == idx: logger.info("Test: TNC start followed by invalid TNCCS-Batch") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b"FOO" return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: logger.info("Test: TNC start followed by invalid TNCCS-Batch (2)") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b"</TNCCS-Batch><TNCCS-Batch>" return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: logger.info("Test: TNCCS-Batch missing BatchId attribute") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b"<TNCCS-Batch foo=3></TNCCS-Batch>" return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected IF-TNCCS BatchId") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b"<TNCCS-Batch BatchId=123456789></TNCCS-Batch>" return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: logger.info("Test: Missing IMC-IMV-Message and TNCC-TNCS-Message end tags") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b"<TNCCS-Batch BatchId=2><IMC-IMV-Message><TNCC-TNCS-Message></TNCCS-Batch>" return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Missing IMC-IMV-Message and TNCC-TNCS-Message Type") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b"<TNCCS-Batch BatchId=2><IMC-IMV-Message></IMC-IMV-Message><TNCC-TNCS-Message></TNCC-TNCS-Message></TNCCS-Batch>" return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Missing TNCC-TNCS-Message XML end tag") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b"<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML></TNCC-TNCS-Message></TNCCS-Batch>" return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Missing TNCC-TNCS-Message Base64 start tag") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b"<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type></TNCC-TNCS-Message></TNCCS-Batch>" return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Missing TNCC-TNCS-Message Base64 end tag") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b"<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><Base64>abc</TNCC-TNCS-Message></TNCCS-Batch>" return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: TNCC-TNCS-Message Base64 message") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b"<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><Base64>aGVsbG8=</Base64></TNCC-TNCS-Message></TNCCS-Batch>" return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid TNCC-TNCS-Message XML message") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b"<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML>hello</XML></TNCC-TNCS-Message></TNCCS-Batch>" return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: Missing TNCCS-Recommendation type") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b'<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML><TNCCS-Recommendation foo=1></TNCCS-Recommendation></XML></TNCC-TNCS-Message></TNCCS-Batch>' return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: TNCCS-Recommendation type=none") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b'<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML><TNCCS-Recommendation type="none"></TNCCS-Recommendation></XML></TNCC-TNCS-Message></TNCCS-Batch>' return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: TNCCS-Recommendation type=isolate") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_TNC, 0x21) idx += 1 if ctx['num'] == idx: logger.info("Received TNCCS-Batch: " + binascii.hexlify(req[6:]).decode()) resp = b'<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML><TNCCS-Recommendation type="isolate"></TNCCS-Recommendation></XML></TNCC-TNCS-Message></TNCCS-Batch>' return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(resp), EAP_TYPE_TNC, 0x01) + resp idx += 1 if ctx['num'] == idx: return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) logger.info("No more test responses available - test case completed") global eap_proto_tnc_test_done eap_proto_tnc_test_done = True return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(tnc_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) i = 0 while not eap_proto_tnc_test_done: i += 1 logger.info("Running connection iteration %d" % i) frag = 1400 if i == 8: frag = 150 dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="TNC", identity="tnc", fragment_size=str(frag), wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD", "CTRL-EVENT-EAP-STATUS"], timeout=5) if ev is None: raise Exception("Timeout on EAP method start") time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) dev[0].dump_monitor() finally: stop_radius_server(srv) def test_eap_canned_success_after_identity(dev, apdev): """EAP protocol tests for canned EAP-Success after identity""" check_eap_capa(dev[0], "MD5") def eap_canned_success_handler(ctx, req): logger.info("eap_canned_success_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4) idx += 1 if ctx['num'] == idx: logger.info("Test: EAP-Success") return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4) return None srv = start_radius_server(eap_canned_success_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", phase1="allow_canned_success=1", eap="MD5", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15) if ev is None: raise Exception("Timeout on EAP success") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5) if ev is None: raise Exception("Timeout on EAP start") ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=0.1) if ev is not None: raise Exception("Unexpected EAP success") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() finally: stop_radius_server(srv) def test_eap_proto_wsc(dev, apdev): """EAP-WSC protocol tests""" global eap_proto_wsc_test_done, eap_proto_wsc_wait_failure eap_proto_wsc_test_done = False def wsc_handler(ctx, req): logger.info("wsc_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] += 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 global eap_proto_wsc_wait_failure eap_proto_wsc_wait_failure = False idx += 1 if ctx['num'] == idx: logger.info("Test: Missing Flags field") return struct.pack(">BBHB3BLB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 1, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Message underflow (missing Message Length field)") return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 2, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 1, 0x02) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid Message Length (> 50000)") return struct.pack(">BBHB3BLBBH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 4, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 1, 0x02, 65535) idx += 1 if ctx['num'] == idx: logger.info("Test: Invalid Message Length (< current payload)") return struct.pack(">BBHB3BLBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 5, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 1, 0x02, 0, 0xff) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Op-Code 5 in WAIT_START state") return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 2, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 5, 0x00) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid WSC Start to start the sequence") return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 2, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 1, 0x00) idx += 1 if ctx['num'] == idx: logger.info("Test: No Message Length field in a fragmented packet") return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 2, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 4, 0x01) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid WSC Start to start the sequence") return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 2, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 1, 0x00) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid first fragmented packet") return struct.pack(">BBHB3BLBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 5, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 4, 0x03, 10, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Op-Code 5 in fragment (expected 4)") return struct.pack(">BBHB3BLBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 3, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 5, 0x01, 2) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid WSC Start to start the sequence") return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 2, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 1, 0x00) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid first fragmented packet") return struct.pack(">BBHB3BLBBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 5, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 4, 0x03, 2, 1) idx += 1 if ctx['num'] == idx: logger.info("Test: Fragment overflow") return struct.pack(">BBHB3BLBBBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 4, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 4, 0x01, 2, 3) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid WSC Start to start the sequence") return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 2, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 1, 0x00) idx += 1 if ctx['num'] == idx: logger.info("Test: Unexpected Op-Code 5 in WAIT_FRAG_ACK state") return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 2, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 5, 0x00) idx += 1 if ctx['num'] == idx: logger.info("Test: Valid WSC Start") return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 3 + 4 + 2, EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1, 1, 0x00) idx += 1 if ctx['num'] == idx: logger.info("No more test responses available - test case completed") global eap_proto_wsc_test_done eap_proto_wsc_test_done = True eap_proto_wsc_wait_failure = True return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(wsc_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) i = 0 while not eap_proto_wsc_test_done: i += 1 logger.info("Running connection iteration %d" % i) fragment_size = 1398 if i != 9 else 50 dev[0].connect("eap-test", key_mgmt="WPA-EAP", eap="WSC", fragment_size=str(fragment_size), identity="WFA-SimpleConfig-Enrollee-1-0", phase1="pin=12345670", scan_freq="2412", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5) if ev is None: raise Exception("Timeout on EAP method start") if eap_proto_wsc_wait_failure: ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5) if ev is None: raise Exception("Timeout on EAP failure") else: time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected(timeout=1) dev[0].dump_monitor() finally: stop_radius_server(srv) def test_eap_canned_success_before_method(dev, apdev): """EAP protocol tests for canned EAP-Success before any method""" params = int_eap_server_params() hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) bssid = apdev[0]['bssid'] hapd.request("SET ext_eapol_frame_io 1") dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", phase1="allow_canned_success=1", eap="MD5", identity="user", password="password", wait_connect=False) ev = hapd.wait_event(["EAPOL-TX"], timeout=10) if ev is None: raise Exception("Timeout on EAPOL-TX from hostapd") res = dev[0].request("EAPOL_RX " + bssid + " 0200000403020004") if "OK" not in res: raise Exception("EAPOL_RX to wpa_supplicant failed") ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=5) if ev is None: raise Exception("Timeout on EAP success") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() def test_eap_canned_failure_before_method(dev, apdev): """EAP protocol tests for canned EAP-Failure before any method""" params = int_eap_server_params() hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) bssid = apdev[0]['bssid'] hapd.request("SET ext_eapol_frame_io 1") dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412", phase1="allow_canned_success=1", eap="MD5", identity="user", password="password", wait_connect=False) ev = hapd.wait_event(["EAPOL-TX"], timeout=10) if ev is None: raise Exception("Timeout on EAPOL-TX from hostapd") res = dev[0].request("EAPOL_RX " + bssid + " 0200000404020004") if "OK" not in res: raise Exception("EAPOL_RX to wpa_supplicant failed") ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5) if ev is None: raise Exception("Timeout on EAP failure") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() def test_eap_nak_oom(dev, apdev): """EAP-Nak OOM""" check_eap_capa(dev[0], "MD5") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_sm_buildNak"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="MD5", identity="sake user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() def test_eap_nak_expanded(dev, apdev): """EAP-Nak with expanded method""" check_eap_capa(dev[0], "MD5") check_eap_capa(dev[0], "VENDOR-TEST") params = hostapd.wpa2_eap_params(ssid="eap-test") hapd = hostapd.add_ap(apdev[0], params) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="VENDOR-TEST WSC", identity="sake user", password="password", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=10) if ev is None or "NAK" not in ev: raise Exception("No NAK event seen") ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10) if ev is None: raise Exception("No EAP-Failure seen") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() EAP_TLV_RESULT_TLV = 3 EAP_TLV_NAK_TLV = 4 EAP_TLV_ERROR_CODE_TLV = 5 EAP_TLV_CONNECTION_BINDING_TLV = 6 EAP_TLV_VENDOR_SPECIFIC_TLV = 7 EAP_TLV_URI_TLV = 8 EAP_TLV_EAP_PAYLOAD_TLV = 9 EAP_TLV_INTERMEDIATE_RESULT_TLV = 10 EAP_TLV_PAC_TLV = 11 EAP_TLV_CRYPTO_BINDING_TLV = 12 EAP_TLV_CALLING_STATION_ID_TLV = 13 EAP_TLV_CALLED_STATION_ID_TLV = 14 EAP_TLV_NAS_PORT_TYPE_TLV = 15 EAP_TLV_SERVER_IDENTIFIER_TLV = 16 EAP_TLV_IDENTITY_TYPE_TLV = 17 EAP_TLV_SERVER_TRUSTED_ROOT_TLV = 18 EAP_TLV_REQUEST_ACTION_TLV = 19 EAP_TLV_PKCS7_TLV = 20 EAP_TLV_RESULT_SUCCESS = 1 EAP_TLV_RESULT_FAILURE = 2 EAP_TLV_TYPE_MANDATORY = 0x8000 EAP_TLV_TYPE_MASK = 0x3fff PAC_TYPE_PAC_KEY = 1 PAC_TYPE_PAC_OPAQUE = 2 PAC_TYPE_CRED_LIFETIME = 3 PAC_TYPE_A_ID = 4 PAC_TYPE_I_ID = 5 PAC_TYPE_A_ID_INFO = 7 PAC_TYPE_PAC_ACKNOWLEDGEMENT = 8 PAC_TYPE_PAC_INFO = 9 PAC_TYPE_PAC_TYPE = 10 def eap_fast_start(ctx): logger.info("Send EAP-FAST/Start") return struct.pack(">BBHBBHH", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + 4 + 16, EAP_TYPE_FAST, 0x21, 4, 16) + 16*b'A' def test_eap_fast_proto(dev, apdev): """EAP-FAST Phase protocol testing""" check_eap_capa(dev[0], "FAST") global eap_fast_proto_ctx eap_fast_proto_ctx = None def eap_handler(ctx, req): logger.info("eap_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 global eap_fast_proto_ctx eap_fast_proto_ctx = ctx ctx['test_done'] = False idx += 1 if ctx['num'] == idx: return eap_fast_start(ctx) idx += 1 if ctx['num'] == idx: logger.info("EAP-FAST: TLS processing failed") data = b'ABCDEFGHIK' return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(data), EAP_TYPE_FAST, 0x01) + data idx += 1 if ctx['num'] == idx: ctx['test_done'] = True return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) logger.info("Past last test case") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(eap_handler) try: hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="FAST", anonymous_identity="FAST", identity="user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2", phase1="fast_provisioning=1", pac_file="blob://fast_pac_proto", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5) if ev is None: raise Exception("Could not start EAP-FAST") ok = False for i in range(100): if eap_fast_proto_ctx: if eap_fast_proto_ctx['test_done']: ok = True break time.sleep(0.05) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() finally: stop_radius_server(srv) def run_eap_fast_phase2(dev, test_payload, test_failure=True): global eap_fast_proto_ctx eap_fast_proto_ctx = None def ssl_info_callback(conn, where, ret): logger.debug("SSL: info where=%d ret=%d" % (where, ret)) def log_conn_state(conn): try: state = conn.state_string() except AttributeError: state = conn.get_state_string() if state: logger.info("State: " + str(state)) def process_clienthello(ctx, payload): logger.info("Process ClientHello") ctx['sslctx'] = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD) ctx['sslctx'].set_info_callback(ssl_info_callback) ctx['sslctx'].load_tmp_dh("auth_serv/dh.conf") if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000: ctx['sslctx'].set_cipher_list("ADH-AES128-SHA:@SECLEVEL=0") else: ctx['sslctx'].set_cipher_list("ADH-AES128-SHA") ctx['conn'] = OpenSSL.SSL.Connection(ctx['sslctx'], None) ctx['conn'].set_accept_state() log_conn_state(ctx['conn']) ctx['conn'].bio_write(payload) try: ctx['conn'].do_handshake() except OpenSSL.SSL.WantReadError: pass log_conn_state(ctx['conn']) data = ctx['conn'].bio_read(4096) log_conn_state(ctx['conn']) return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(data), EAP_TYPE_FAST, 0x01) + data def process_clientkeyexchange(ctx, payload, appl_data): logger.info("Process ClientKeyExchange") log_conn_state(ctx['conn']) ctx['conn'].bio_write(payload) try: ctx['conn'].do_handshake() except OpenSSL.SSL.WantReadError: pass ctx['conn'].send(appl_data) log_conn_state(ctx['conn']) data = ctx['conn'].bio_read(4096) log_conn_state(ctx['conn']) return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1 + len(data), EAP_TYPE_FAST, 0x01) + data def eap_handler(ctx, req): logger.info("eap_handler - RX " + binascii.hexlify(req).decode()) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 global eap_fast_proto_ctx eap_fast_proto_ctx = ctx ctx['test_done'] = False logger.debug("ctx['num']=%d" % ctx['num']) idx += 1 if ctx['num'] == idx: return eap_fast_start(ctx) idx += 1 if ctx['num'] == idx: return process_clienthello(ctx, req[6:]) idx += 1 if ctx['num'] == idx: if not test_failure: ctx['test_done'] = True return process_clientkeyexchange(ctx, req[6:], test_payload) idx += 1 if ctx['num'] == idx: ctx['test_done'] = True return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) logger.info("Past last test case") return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(eap_handler) try: dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="FAST", anonymous_identity="FAST", identity="user", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2", phase1="fast_provisioning=1", pac_file="blob://fast_pac_proto", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5) if ev is None: raise Exception("Could not start EAP-FAST") dev[0].dump_monitor() ok = False for i in range(100): if eap_fast_proto_ctx: if eap_fast_proto_ctx['test_done']: ok = True break time.sleep(0.05) time.sleep(0.1) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() if not ok: raise Exception("EAP-FAST TLS exchange did not complete") for i in range(3): dev[i].dump_monitor() finally: stop_radius_server(srv) def test_eap_fast_proto_phase2(dev, apdev): """EAP-FAST Phase 2 protocol testing""" if not openssl_imported: raise HwsimSkip("OpenSSL python method not available") check_eap_capa(dev[0], "FAST") hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) tests = [("Too short Phase 2 TLV frame (len=3)", "ABC", False), ("EAP-FAST: TLV overflow", struct.pack(">HHB", 0, 2, 0xff), False), ("EAP-FAST: Unknown TLV (optional and mandatory)", struct.pack(">HHB", 0, 1, 0xff) + struct.pack(">HHB", EAP_TLV_TYPE_MANDATORY, 1, 0xff), True), ("EAP-FAST: More than one EAP-Payload TLV in the message", struct.pack(">HHBHHB", EAP_TLV_EAP_PAYLOAD_TLV, 1, 0xff, EAP_TLV_EAP_PAYLOAD_TLV, 1, 0xff), True), ("EAP-FAST: Unknown Result 255 and More than one Result TLV in the message", struct.pack(">HHHHHH", EAP_TLV_RESULT_TLV, 2, 0xff, EAP_TLV_RESULT_TLV, 2, 0xff), True), ("EAP-FAST: Too short Result TLV", struct.pack(">HHB", EAP_TLV_RESULT_TLV, 1, 0xff), True), ("EAP-FAST: Unknown Intermediate Result 255 and More than one Intermediate-Result TLV in the message", struct.pack(">HHHHHH", EAP_TLV_INTERMEDIATE_RESULT_TLV, 2, 0xff, EAP_TLV_INTERMEDIATE_RESULT_TLV, 2, 0xff), True), ("EAP-FAST: Too short Intermediate-Result TLV", struct.pack(">HHB", EAP_TLV_INTERMEDIATE_RESULT_TLV, 1, 0xff), True), ("EAP-FAST: More than one Crypto-Binding TLV in the message", struct.pack(">HH", EAP_TLV_CRYPTO_BINDING_TLV, 60) + 60*b'A' + struct.pack(">HH", EAP_TLV_CRYPTO_BINDING_TLV, 60) + 60*b'A', True), ("EAP-FAST: Too short Crypto-Binding TLV", struct.pack(">HHB", EAP_TLV_CRYPTO_BINDING_TLV, 1, 0xff), True), ("EAP-FAST: More than one Request-Action TLV in the message", struct.pack(">HHBBHHBB", EAP_TLV_REQUEST_ACTION_TLV, 2, 0xff, 0xff, EAP_TLV_REQUEST_ACTION_TLV, 2, 0xff, 0xff), True), ("EAP-FAST: Too short Request-Action TLV", struct.pack(">HHB", EAP_TLV_REQUEST_ACTION_TLV, 1, 0xff), True), ("EAP-FAST: More than one PAC TLV in the message", struct.pack(">HHBHHB", EAP_TLV_PAC_TLV, 1, 0xff, EAP_TLV_PAC_TLV, 1, 0xff), True), ("EAP-FAST: Too short EAP Payload TLV (Len=3)", struct.pack(">HH3B", EAP_TLV_EAP_PAYLOAD_TLV, 3, 0, 0, 0), False), ("EAP-FAST: Too short Phase 2 request (Len=0)", struct.pack(">HHBBH", EAP_TLV_EAP_PAYLOAD_TLV, 4, EAP_CODE_REQUEST, 0, 0), False), ("EAP-FAST: EAP packet overflow in EAP Payload TLV", struct.pack(">HHBBH", EAP_TLV_EAP_PAYLOAD_TLV, 4, EAP_CODE_REQUEST, 0, 4 + 1), False), ("EAP-FAST: Unexpected code=0 in Phase 2 EAP header", struct.pack(">HHBBH", EAP_TLV_EAP_PAYLOAD_TLV, 4, 0, 0, 0), False), ("EAP-FAST: PAC TLV without Result TLV acknowledging success", struct.pack(">HHB", EAP_TLV_PAC_TLV, 1, 0xff), True), ("EAP-FAST: PAC TLV does not include all the required fields", struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2, EAP_TLV_RESULT_SUCCESS) + struct.pack(">HHB", EAP_TLV_PAC_TLV, 1, 0xff), True), ("EAP-FAST: Invalid PAC-Key length 0, Ignored unknown PAC type 0, and PAC TLV overrun (type=0 len=2 left=1)", struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2, EAP_TLV_RESULT_SUCCESS) + struct.pack(">HHHHHHHHB", EAP_TLV_PAC_TLV, 4 + 4 + 5, PAC_TYPE_PAC_KEY, 0, 0, 0, 0, 2, 0), True), ("EAP-FAST: PAC-Info does not include all the required fields", struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2, EAP_TLV_RESULT_SUCCESS) + struct.pack(">HHHHHHHH", EAP_TLV_PAC_TLV, 4 + 4 + 4 + 32, PAC_TYPE_PAC_OPAQUE, 0, PAC_TYPE_PAC_INFO, 0, PAC_TYPE_PAC_KEY, 32) + 32*b'A', True), ("EAP-FAST: Invalid CRED_LIFETIME length, Ignored unknown PAC-Info type 0, and Invalid PAC-Type length 1", struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2, EAP_TLV_RESULT_SUCCESS) + struct.pack(">HHHHHHHHHHHHBHH", EAP_TLV_PAC_TLV, 4 + 4 + 13 + 4 + 32, PAC_TYPE_PAC_OPAQUE, 0, PAC_TYPE_PAC_INFO, 13, PAC_TYPE_CRED_LIFETIME, 0, 0, 0, PAC_TYPE_PAC_TYPE, 1, 0, PAC_TYPE_PAC_KEY, 32) + 32*b'A', True), ("EAP-FAST: Unsupported PAC-Type 0", struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2, EAP_TLV_RESULT_SUCCESS) + struct.pack(">HHHHHHHHHHH", EAP_TLV_PAC_TLV, 4 + 4 + 6 + 4 + 32, PAC_TYPE_PAC_OPAQUE, 0, PAC_TYPE_PAC_INFO, 6, PAC_TYPE_PAC_TYPE, 2, 0, PAC_TYPE_PAC_KEY, 32) + 32*b'A', True), ("EAP-FAST: PAC-Info overrun (type=0 len=2 left=1)", struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2, EAP_TLV_RESULT_SUCCESS) + struct.pack(">HHHHHHHHBHH", EAP_TLV_PAC_TLV, 4 + 4 + 5 + 4 + 32, PAC_TYPE_PAC_OPAQUE, 0, PAC_TYPE_PAC_INFO, 5, 0, 2, 1, PAC_TYPE_PAC_KEY, 32) + 32*b'A', True), ("EAP-FAST: Valid PAC", struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2, EAP_TLV_RESULT_SUCCESS) + struct.pack(">HHHHHHHHBHHBHH", EAP_TLV_PAC_TLV, 4 + 4 + 10 + 4 + 32, PAC_TYPE_PAC_OPAQUE, 0, PAC_TYPE_PAC_INFO, 10, PAC_TYPE_A_ID, 1, 0x41, PAC_TYPE_A_ID_INFO, 1, 0x42, PAC_TYPE_PAC_KEY, 32) + 32*b'A', True), ("EAP-FAST: Invalid version/subtype in Crypto-Binding TLV", struct.pack(">HH", EAP_TLV_CRYPTO_BINDING_TLV, 60) + 60*b'A', True)] for title, payload, failure in tests: logger.info("Phase 2 test: " + title) run_eap_fast_phase2(dev, payload, failure) def test_eap_fast_tlv_nak_oom(dev, apdev): """EAP-FAST Phase 2 TLV NAK OOM""" if not openssl_imported: raise HwsimSkip("OpenSSL python method not available") check_eap_capa(dev[0], "FAST") hapd = start_ap(apdev[0]) dev[0].scan_for_bss(hapd.own_addr(), freq=2412) with alloc_fail(dev[0], 1, "eap_fast_tlv_nak"): run_eap_fast_phase2(dev, struct.pack(">HHB", EAP_TLV_TYPE_MANDATORY, 1, 0xff), False)
simpleSSHFuzzer.py
from pexpect import pxssh import threading import time import os import sys def bruter(host,username,password): s = pxssh.pxssh() try: #print username s.login(host,username,password, login_timeout=500) except Exception as e: print(e) #time.sleep() #os._exit(1) print '[*] Fuzzing SSH service' h = '172.16.218.132' for i in range(151949,300000): user = 'admin' passw = 'B'*i try: t = threading.Thread(target=bruter, args=(h,user,passw)) t.start() print i time.sleep(0.2) except Exception as e: print e time.sleep(3610)
ait_bsc.py
#!/usr/bin/env python # Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT) # Bespoke Link to Instruments and Small Satellites (BLISS) # # Copyright 2016, by the California Institute of Technology. ALL RIGHTS # RESERVED. United States Government Sponsorship acknowledged. Any # commercial use must be negotiated with the Office of Technology Transfer # at the California Institute of Technology. # # This software may be subject to U.S. export control laws. By accepting # this software, the user agrees to comply with all applicable U.S. export # laws and regulations. User has the responsibility to obtain export licenses, # or other export authority as may be required before exporting such # information to foreign countries or providing access to foreign persons. """ Usage: ait-bsc Start the ait BSC for capturing network traffic into PCAP files and the manager server for RESTful manipulation of active loggers. """ import os import threading import yaml import argparse import ait from ait.core import bsc config_file = ait.config.bsc.filename # type: ignore def main(): ap = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter ) args = ap.parse_args() # noqa if not os.path.isfile(config_file): print("Unable to locate config. Starting up handlers with default values ...") host = "localhost" port = "8080" handler_configs = [] root_log_dir = "/tmp" mngr_conf = {"root_log_directory": root_log_dir} else: with open(config_file) as log_conf: conf = yaml.load(log_conf, Loader=yaml.Loader) mngr_conf = conf["capture_manager"] host = mngr_conf["manager_server"]["host"] port = mngr_conf["manager_server"]["port"] handler_configs = [] for handler_conf in conf["handlers"]: if "path" in handler_conf: handler_path = handler_conf.pop("path") if not os.path.isabs(handler_path): handler_path = os.path.join( mngr_conf["root_log_directory"], handler_path ) else: handler_path = mngr_conf["root_log_directory"] handler_configs.append( ( handler_conf.pop("name"), handler_conf.pop("address"), handler_conf.pop("conn_type"), handler_path, handler_conf, ) ) lgr_mngr = bsc.StreamCaptureManager(mngr_conf, handler_configs) manager_server = bsc.StreamCaptureManagerServer( logger_manager=lgr_mngr, host=host, port=port ) t = threading.Thread(target=manager_server.start) t.setDaemon(True) t.start() lgr_mngr.run_socket_event_loop() if __name__ == "__main__": main()
uniGridDistribution.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # import sys import time import logging import getopt from multiprocessing import Process from util.preprocess import mergeMatrixs from util.preprocess import mergeSmallRecords from util.UniGridDisBasic import UniGridDisBasic def processTask(x, city, directory, inum, onum, judDay, judHour, GRIDSNUM, subpath): PROP = { 'INDEX': x, 'CITY': city, 'DIRECTORY': directory, 'INUM': inum, 'ONUM': onum, 'DAY': judDay, 'HOUR': judHour, 'GRIDSNUM': GRIDSNUM, 'SUBPATH': subpath } task = UniGridDisBasic(PROP) task.run() def usage(): """ 使用说明函数 """ print '''Usage Guidance help -h get usage guidance city -c city or region name, such as beijing directory -d the root directory of records and results, such as /China/beijing inum -i number of input files onum -o number of output files e.g. python ./uniGridDistribution.py -i 3999 -o 20 -d /enigma/tao.jiang/datasets/JingJinJi/records ''' def main(argv): """ 主入口函数 :param argv: city 表示城市, directory 表示路径, inum 表示输入文件总数, onum 表示输出文件总数, jnum 表示处理进程数,通常和 onum 一致, subpath 为结果存储的子目录名字 """ try: opts, args = getopt.getopt(argv, "hc:d:i:o:j:", ["help", "city=", 'directory=', 'inum=', 'onum=', 'jnum=']) except getopt.GetoptError as err: print str(err) usage() sys.exit(2) city, directory, inum, onum, jnum, subpath = 'beijing', '/home/tao.jiang/datasets/JingJinJi/records', 3999, 20, 20, 'bj-newvis' dayBase, judDay, judHour = 187, 0, 0 for opt, arg in opts: if opt == '-h': usage() sys.exit() elif opt in ("-c", "--city"): city = arg elif opt in ("-d", "--directory"): directory = arg elif opt in ('-i', '--inum'): inum = int(arg) elif opt in ('-o', '--onum'): onum = int(arg) elif opt in ('-j', '--jnum'): jnum = int(arg) STARTTIME = time.time() print "Start approach at %s" % STARTTIME # 连接数据获取网格信息,包括总数,具有有效POI的网格 # 固定到北京大小 GRIDSNUM = 2000 for dayCount in xrange(0, 87): for hourCount in xrange(0, 24): judDay = dayCount + dayBase judHour = hourCount # 从时段 118 开始计算 if ((judDay - 187) * 24 + judHour) < 118: continue # @多进程运行程序 START jobs = [] for x in xrange(0, jnum): jobs.append(Process(target=processTask, args=(x, city, directory, inum, onum, judDay, judHour, GRIDSNUM, subpath))) jobs[x].start() for job in jobs: job.join() # 处理剩余数据进文件 # 合并操作 oTime = (judDay - 187) * 24 + judHour mergeMatrixs(city, GRIDSNUM, directory, subpath, oTime) mergeSmallRecords(city, directory, subpath, oTime) # @多进程运行程序 END print "END TIME: %s" % time.time() if __name__ == '__main__': logging.basicConfig(filename='logger-unitGridDistribution.log', level=logging.DEBUG) main(sys.argv[1:])
Interface.py
import socket import struct import threading import netifaces import ipaddress import traceback from fcntl import ioctl from abc import ABCMeta, abstractmethod SIOCGIFMTU = 0x8921 class Interface(metaclass=ABCMeta): MCAST_GRP = '224.0.0.13' def __init__(self, interface_name, recv_socket, send_socket, vif_index): self.interface_name = interface_name # virtual interface index for the multicast routing table self.vif_index = vif_index # set receive socket and send socket self._send_socket = send_socket self._recv_socket = recv_socket self.interface_enabled = False def enable(self): """ Enable this interface This will start a thread to be executed in the background to be used in the reception of control packets """ self.interface_enabled = True # run receive method in background receive_thread = threading.Thread(target=self.receive) receive_thread.daemon = True receive_thread.start() def receive(self): """ Method that will be executed in the background for the reception of control packets """ while self.interface_enabled: try: (raw_bytes, ancdata, _, src_addr) = self._recv_socket.recvmsg(256 * 1024, 500) if raw_bytes: self._receive(raw_bytes, ancdata, src_addr) except Exception: traceback.print_exc() continue @abstractmethod def _receive(self, raw_bytes, ancdata, src_addr): """ Subclass method to be implemented This method will be invoked whenever a new control packet is received """ raise NotImplementedError def send(self, data: bytes, group_ip: str): """ Send a control packet through this interface Explicitly destined to group_ip (can be unicast or multicast IP) """ if self.interface_enabled and data: try: self._send_socket.sendto(data, (group_ip, 0)) except socket.error: pass def remove(self): """ This interface is no longer active.... Clear all state regarding it """ self.interface_enabled = False try: self._recv_socket.shutdown(socket.SHUT_RDWR) except Exception: pass self._recv_socket.close() self._send_socket.close() def is_enabled(self): """ Verify if this interface is enabled """ return self.interface_enabled @abstractmethod def get_ip(self): """ Get IP of this interface """ raise NotImplementedError def get_all_interface_networks(self): """ Get all subnets associated with this interface. Used to verify if interface is directly connected to a multicast source This is extremely relevant on IPv6, where an interface can be connected to multiple subnets (global, link-local, unique-local) """ all_networks = set() for if_addr in netifaces.ifaddresses(self.interface_name)[self._get_address_family()]: ip_addr = if_addr["addr"].split("%")[0] netmask = if_addr["netmask"].split("/")[0] prefix_length = str(bin(int(ipaddress.ip_address(netmask).packed.hex(), 16)).count('1')) network = ip_addr + "/" + prefix_length all_networks.add(str(ipaddress.ip_interface(network).network)) return all_networks @staticmethod @abstractmethod def _get_address_family(): raise NotImplementedError def get_mtu(self): """ Get MTU of this interface """ '''Use socket ioctl call to get MTU size''' s = socket.socket(type=socket.SOCK_DGRAM) ifr = self.interface_name + '\x00'*(32-len(self.interface_name)) try: ifs = ioctl(s, SIOCGIFMTU, ifr) mtu = struct.unpack('<H', ifs[16:18])[0] except: traceback.print_exc() raise #log.debug('get_mtu: mtu of {0} = {1}'.format(self.ifname, mtu)) return mtu
111.py
##liuhuihui 1-5 plan ###see https://blog.csdn.net/typing_yes_no/article/details/51758938 ''' WebSocketApp是websocket的一个类 封装了websocket里面的connect、send消息等函数,更方便于调用 ''' import websocket import json import redis import threading import threadpool pool = redis.ConnectionPool(host='127.0.0.1', port=6379, decode_responses=True) ##decode_response=True,写入的键值对中的value为str类型,无则为字节类型 r = redis.Redis(connection_pool=pool) ##全局变量 币对 SYMBOL_LIST= ['ETHBTC','LTCBTC','NEOBTC','BNBBTC'] ##全局变量 交易所列表 EX_NAME_LIST = [] ##建立连接 def on_open(ws): pass ##接收到服务器返回的消息时调用 def on_message(ws, message): print('data update: %s' % message) ##撰写自定义函数 ##线程函数 def run(ws): ws.run_forever() if __name__ == "__main__": WS_LIST = [] for symbol in SYMBOL_LIST: print(symbol) apiUrl = "wss://stream.binance.com:9443/ws/"+symbol.lower()+"@depth" ##实例化websocket对象 ws = websocket.WebSocketApp(apiUrl, on_message = on_message, on_open = on_open) WS_LIST.append(ws) ''' ##threading模块手动创建线程跑symbol对 threadpool = [] t1 = threading.Thread(target=run, args=(WS_LIST[0],)) threadpool.append(t1) t2 = threading.Thread(target=run, args=(WS_LIST[1],)) threadpool.append(t2) t3 = threading.Thread(target=run, args=(WS_LIST[2],)) threadpool.append(t3) t4 = threading.Thread(target=run, args=(WS_LIST[3],)) threadpool.append(t4) for th in threadpool: th.start() ''' ##使用线程池执行每个无线循环 pool = threadpool.ThreadPool(len(WS_LIST)) requests = threadpool.makeRequests(run,WS_LIST) [pool.putRequest(req) for req in requests] pool.wait() ##阻塞直至完成 print('finish')
demoAll-prg-multi.py
"Launching GUIs as programs other ways: multiprocessing" """ 4 demo classes run as independent program processes: multiprocessing; multiprocessing allows us to launch named functions with arguments, but not lambdas, because they are not pickleable on Windows (Chapter 5); multiprocessing also has its own IPC tools like pipes for communication; """ from tkinter import * from multiprocessing import Process demoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale'] def runDemo(modname): # run in a new process module = __import__(modname) # build gui from scratch module.Demo().mainloop() if __name__ == '__main__': for modname in demoModules: # in __main__ only! Process(target=runDemo, args=(modname,)).start() root = Tk() # parent process GUI root.title('Processes') Label(root, text='Multiple program demo: multiprocessing', bg='white').pack() root.mainloop()
crestprocessor.py
# crestprocessor.py import threading from PyQt5 import QtCore from .crest.crest import Crest class CrestProcessor(QtCore.QObject): """ CREST Middle-ware """ login_response = QtCore.pyqtSignal(str) logout_response = QtCore.pyqtSignal() location_response = QtCore.pyqtSignal(str) destination_response = QtCore.pyqtSignal(bool) def __init__(self, implicit, client_id, client_secret, parent=None): super(CrestProcessor, self).__init__(parent) self.crest = Crest(implicit, client_id, client_secret, self._login_callback, self._logout_callback) def login(self): return self.crest.start_server() def logout(self): self.crest.logout() def get_location(self): server_thread = threading.Thread(target=self._get_location) server_thread.setDaemon(True) server_thread.start() def _get_location(self): location = self.crest.get_char_location() self.location_response.emit(location) def set_destination(self, sys_id): server_thread = threading.Thread(target=self._set_destination, args=(sys_id, )) server_thread.setDaemon(True) server_thread.start() def _set_destination(self, sys_id): response = self.crest.set_char_destination(sys_id) self.destination_response.emit(response) def _login_callback(self, char_name): self.login_response.emit(char_name) def _logout_callback(self): self.logout_response.emit()
_test_multiprocessing.py
# # Unit tests for the multiprocessing package # import unittest import unittest.mock import queue as pyqueue import time import io import itertools import sys import os import gc import errno import signal import array import socket import random import logging import subprocess import struct import operator import pickle import weakref import warnings import test.support import test.support.script_helper from test import support # Skip tests if _multiprocessing wasn't built. _multiprocessing = test.support.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. support.skip_if_broken_multiprocessing_synchronize() import threading import multiprocessing.connection import multiprocessing.dummy import multiprocessing.heap import multiprocessing.managers import multiprocessing.pool import multiprocessing.queues from multiprocessing import util try: from multiprocessing import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocessing.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: from multiprocessing import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False try: import msvcrt except ImportError: msvcrt = None # # # # Timeout to wait until a process completes TIMEOUT = 60.0 # seconds def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocessing.queues.Queue): queue.close() queue.join_thread() def join_process(process): # Since multiprocessing.Process has the same API than threading.Thread # (join() and is_alive(), the support function can be reused support.join_thread(process, timeout=TIMEOUT) if os.name == "posix": from multiprocessing import resource_tracker def _resource_unlink(name, rtype): resource_tracker._CLEANUP_FUNCS[rtype](name) # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") from multiprocessing.connection import wait def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double, c_longlong except ImportError: Structure = object c_int = c_double = c_longlong = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.monotonic() try: return self.func(*args, **kwds) finally: self.elapsed = time.monotonic() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_parent_process_attributes(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) self.assertIsNone(self.parent_process()) rconn, wconn = self.Pipe(duplex=False) p = self.Process(target=self._test_send_parent_process, args=(wconn,)) p.start() p.join() parent_pid, parent_name = rconn.recv() self.assertEqual(parent_pid, self.current_process().pid) self.assertEqual(parent_pid, os.getpid()) self.assertEqual(parent_name, self.current_process().name) @classmethod def _test_send_parent_process(cls, wconn): from multiprocessing.process import parent_process wconn.send([parent_process().pid, parent_process().name]) def test_parent_process(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # Launch a child process. Make it launch a grandchild process. Kill the # child process and make sure that the grandchild notices the death of # its parent (a.k.a the child process). rconn, wconn = self.Pipe(duplex=False) p = self.Process( target=self._test_create_grandchild_process, args=(wconn, )) p.start() if not rconn.poll(timeout=60): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "alive") p.terminate() p.join() if not rconn.poll(timeout=60): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "not alive") @classmethod def _test_create_grandchild_process(cls, wconn): p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) p.start() time.sleep(300) @classmethod def _test_report_parent_status(cls, wconn): from multiprocessing.process import parent_process wconn.send("alive" if parent_process().is_alive() else "not alive") parent_process().join(timeout=5) wconn.send("alive" if parent_process().is_alive() else "not alive") def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") def test_process_mainthread_native_id(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current_mainthread_native_id = threading.main_thread().native_id q = self.Queue(1) p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) p.start() child_mainthread_native_id = q.get() p.join() close_queue(q) self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) @classmethod def _test_process_mainthread_native_id(cls, q): mainthread_native_id = threading.main_thread().native_id q.put(mainthread_native_id) @classmethod def _sleep_some(cls): time.sleep(100) @classmethod def _test_sleep(cls, delay): time.sleep(delay) def _kill_process(self, meth): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._sleep_some) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) meth(p) if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() return p.exitcode def test_terminate(self): exitcode = self._kill_process(multiprocessing.Process.terminate) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGTERM) def test_kill(self): exitcode = self._kill_process(multiprocessing.Process.kill) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGKILL) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) @classmethod def _test_close(cls, rc=0, q=None): if q is not None: q.get() sys.exit(rc) def test_close(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) q = self.Queue() p = self.Process(target=self._test_close, kwargs={'q': q}) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) # Child is still alive, cannot close with self.assertRaises(ValueError): p.close() q.put(None) p.join() self.assertEqual(p.is_alive(), False) self.assertEqual(p.exitcode, 0) p.close() with self.assertRaises(ValueError): p.is_alive() with self.assertRaises(ValueError): p.join() with self.assertRaises(ValueError): p.terminate() p.close() wr = weakref.ref(p) del p gc.collect() self.assertIs(wr(), None) close_queue(q) def test_many_processes(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() N = 5 if sm == 'spawn' else 100 # Try to overwhelm the forkserver loop with events procs = [self.Process(target=self._test_sleep, args=(0.01,)) for i in range(N)] for p in procs: p.start() for p in procs: join_process(p) for p in procs: self.assertEqual(p.exitcode, 0) procs = [self.Process(target=self._sleep_some) for i in range(N)] for p in procs: p.start() time.sleep(0.001) # let the children start... for p in procs: p.terminate() for p in procs: join_process(p) if os.name != 'nt': exitcodes = [-signal.SIGTERM] if sys.platform == 'darwin': # bpo-31510: On macOS, killing a freshly started process with # SIGTERM sometimes kills the process with SIGKILL. exitcodes.append(-signal.SIGKILL) for p in procs: self.assertIn(p.exitcode, exitcodes) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_child_fd_inflation(self, evt, q): q.put(test.support.fd_count()) evt.wait() def test_child_fd_inflation(self): # Number of fds in child processes should not grow with the # number of running children. if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm == 'fork': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) N = 5 evt = self.Event() q = self.Queue() procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) for i in range(N)] for p in procs: p.start() try: fd_counts = [q.get() for i in range(N)] self.assertEqual(len(set(fd_counts)), 1, fd_counts) finally: evt.set() for p in procs: p.join() close_queue(q) @classmethod def _test_wait_for_threads(self, evt): def func1(): time.sleep(0.5) evt.set() def func2(): time.sleep(20) evt.clear() threading.Thread(target=func1).start() threading.Thread(target=func2, daemon=True).start() def test_wait_for_threads(self): # A child process should wait for non-daemonic threads to end # before exiting if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) evt = self.Event() proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocessing.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = test.support.TESTFN self.addCleanup(test.support.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, 'r') as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("test_multiprocessing.py", err) self.assertIn("1/0 # MARKER", err) @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = test.support.TESTFN self.addCleanup(test.support.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, 1) with open(testfn, 'r') as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) for reason in (True, False, 8): p = self.Process(target=sys.exit, args=(reason,)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, reason) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with test.support.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w') as f: f.write("""if 1: import multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with test.support.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocessing.Queue() start = time.monotonic() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = time.monotonic() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with test.support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) self.assertTrue(q.get(timeout=TIMEOUT)) close_queue(q) with test.support.captured_stderr(): # bpo-33078: verify that the queue size is correctly handled # on errors. q = self.Queue(maxsize=1) q.put(NotSerializable()) q.put(True) try: self.assertEqual(q.qsize(), 1) except NotImplementedError: # qsize is not available on all platform as it # relies on sem_getvalue pass # bpo-30595: use a timeout of 1 second for slow buildbots self.assertTrue(q.get(timeout=1.0)) # Check that the size of the queue is correct self.assertTrue(q.empty()) close_queue(q) def test_queue_feeder_on_queue_feeder_error(self): # bpo-30006: verify feeder handles exceptions using the # _on_queue_feeder_error hook. if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): """Mock unserializable object""" def __init__(self): self.reduce_was_called = False self.on_queue_feeder_error_was_called = False def __reduce__(self): self.reduce_was_called = True raise AttributeError class SafeQueue(multiprocessing.queues.Queue): """Queue with overloaded _on_queue_feeder_error hook""" @staticmethod def _on_queue_feeder_error(e, obj): if (isinstance(e, AttributeError) and isinstance(obj, NotSerializable)): obj.on_queue_feeder_error_was_called = True not_serializable_obj = NotSerializable() # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): q = SafeQueue(ctx=multiprocessing.get_context()) q.put(not_serializable_obj) # Verify that q is still functioning correctly q.put(True) self.assertTrue(q.get(timeout=1.0)) # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) def test_closed_queue_put_get_exceptions(self): for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): q.close() with self.assertRaisesRegex(ValueError, 'is closed'): q.put('foo') with self.assertRaisesRegex(ValueError, 'is closed'): q.get() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def assertReachesEventually(self, func, value): for i in range(10): try: if func() == value: break except NotImplementedError: break time.sleep(DELTA) time.sleep(DELTA) self.assertReturnsIfImplemented(value, func) def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken self.assertReachesEventually(lambda: get_value(woken), 6) # check state is not mucked up self.check_invariant(cond) def test_notify_n(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake some of them up cond.acquire() cond.notify(n=2) cond.release() # check 2 have woken self.assertReachesEventually(lambda: get_value(woken), 2) # wake the rest of them cond.acquire() cond.notify(n=4) cond.release() self.assertReachesEventually(lambda: get_value(woken), 6) # doesn't do anything more cond.acquire() cond.notify(n=3) cond.release() self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.1 dt = time.monotonic() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = time.monotonic() - dt # borrow logic in assertTimeout() from test/lock_tests.py if not result and expected * 0.6 < dt < expected * 10.0: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=TIMEOUT)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() # # Tests for Barrier - adapted from tests in test/lock_tests.py # # Many of the tests for threading.Barrier use a list as an atomic # counter: a value is appended to increment the counter, and the # length of the list gives the value. We use the class DummyList # for the same purpose. class _DummyList(object): def __init__(self): wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) lock = multiprocessing.Lock() self.__setstate__((wrapper, lock)) self._lengthbuf[0] = 0 def __setstate__(self, state): (self._wrapper, self._lock) = state self._lengthbuf = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._wrapper, self._lock) def append(self, _): with self._lock: self._lengthbuf[0] += 1 def __len__(self): with self._lock: return self._lengthbuf[0] def _wait(): # A crude wait/yield function not relying on synchronization primitives. time.sleep(0.01) class Bunch(object): """ A bunch of threads. """ def __init__(self, namespace, f, args, n, wait_before_exit=False): """ Construct a bunch of `n` threads running the same function `f`. If `wait_before_exit` is True, the threads won't terminate until do_finish() is called. """ self.f = f self.args = args self.n = n self.started = namespace.DummyList() self.finished = namespace.DummyList() self._can_exit = namespace.Event() if not wait_before_exit: self._can_exit.set() threads = [] for i in range(n): p = namespace.Process(target=self.task) p.daemon = True p.start() threads.append(p) def finalize(threads): for p in threads: p.join() self._finalizer = weakref.finalize(self, finalize, threads) def task(self): pid = os.getpid() self.started.append(pid) try: self.f(*self.args) finally: self.finished.append(pid) self._can_exit.wait(30) assert self._can_exit.is_set() def wait_for_started(self): while len(self.started) < self.n: _wait() def wait_for_finished(self): while len(self.finished) < self.n: _wait() def do_finish(self): self._can_exit.set() def close(self): self._finalizer() class AppendTrue(object): def __init__(self, obj): self.obj = obj def __call__(self): self.obj.append(True) class _TestBarrier(BaseTestCase): """ Tests for Barrier objects. """ N = 5 defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout def setUp(self): self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) def tearDown(self): self.barrier.abort() self.barrier = None def DummyList(self): if self.TYPE == 'threads': return [] elif self.TYPE == 'manager': return self.manager.list() else: return _DummyList() def run_threads(self, f, args): b = Bunch(self, f, args, self.N-1) try: f(*args) b.wait_for_finished() finally: b.close() @classmethod def multipass(cls, barrier, results, n): m = barrier.parties assert m == cls.N for i in range(n): results[0].append(True) assert len(results[1]) == i * m barrier.wait() results[1].append(True) assert len(results[0]) == (i + 1) * m barrier.wait() try: assert barrier.n_waiting == 0 except NotImplementedError: pass assert not barrier.broken def test_barrier(self, passes=1): """ Test that a barrier is passed in lockstep """ results = [self.DummyList(), self.DummyList()] self.run_threads(self.multipass, (self.barrier, results, passes)) def test_barrier_10(self): """ Test that a barrier works for 10 consecutive runs """ return self.test_barrier(10) @classmethod def _test_wait_return_f(cls, barrier, queue): res = barrier.wait() queue.put(res) def test_wait_return(self): """ test the return value from barrier.wait """ queue = self.Queue() self.run_threads(self._test_wait_return_f, (self.barrier, queue)) results = [queue.get() for i in range(self.N)] self.assertEqual(results.count(0), 1) close_queue(queue) @classmethod def _test_action_f(cls, barrier, results): barrier.wait() if len(results) != 1: raise RuntimeError def test_action(self): """ Test the 'action' callback """ results = self.DummyList() barrier = self.Barrier(self.N, action=AppendTrue(results)) self.run_threads(self._test_action_f, (barrier, results)) self.assertEqual(len(results), 1) @classmethod def _test_abort_f(cls, barrier, results1, results2): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() def test_abort(self): """ Test that an abort will put the barrier in a broken state """ results1 = self.DummyList() results2 = self.DummyList() self.run_threads(self._test_abort_f, (self.barrier, results1, results2)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertTrue(self.barrier.broken) @classmethod def _test_reset_f(cls, barrier, results1, results2, results3): i = barrier.wait() if i == cls.N//2: # Wait until the other threads are all in the barrier. while barrier.n_waiting < cls.N-1: time.sleep(0.001) barrier.reset() else: try: barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) # Now, pass the barrier again barrier.wait() results3.append(True) def test_reset(self): """ Test that a 'reset' on a barrier frees the waiting threads """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() self.run_threads(self._test_reset_f, (self.barrier, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_abort_and_reset_f(cls, barrier, barrier2, results1, results2, results3): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() # Synchronize and reset the barrier. Must synchronize first so # that everyone has left it when we reset, and after so that no # one enters it before the reset. if barrier2.wait() == cls.N//2: barrier.reset() barrier2.wait() barrier.wait() results3.append(True) def test_abort_and_reset(self): """ Test that a barrier can be reset after being broken. """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() barrier2 = self.Barrier(self.N) self.run_threads(self._test_abort_and_reset_f, (self.barrier, barrier2, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_timeout_f(cls, barrier, results): i = barrier.wait() if i == cls.N//2: # One thread is late! time.sleep(1.0) try: barrier.wait(0.5) except threading.BrokenBarrierError: results.append(True) def test_timeout(self): """ Test wait(timeout) """ results = self.DummyList() self.run_threads(self._test_timeout_f, (self.barrier, results)) self.assertEqual(len(results), self.barrier.parties) @classmethod def _test_default_timeout_f(cls, barrier, results): i = barrier.wait(cls.defaultTimeout) if i == cls.N//2: # One thread is later than the default timeout time.sleep(1.0) try: barrier.wait() except threading.BrokenBarrierError: results.append(True) def test_default_timeout(self): """ Test the barrier's default timeout """ barrier = self.Barrier(self.N, timeout=0.5) results = self.DummyList() self.run_threads(self._test_default_timeout_f, (barrier, results)) self.assertEqual(len(results), barrier.parties) def test_single_thread(self): b = self.Barrier(1) b.wait() b.wait() @classmethod def _test_thousand_f(cls, barrier, passes, conn, lock): for i in range(passes): barrier.wait() with lock: conn.send(i) def test_thousand(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) passes = 1000 lock = self.Lock() conn, child_conn = self.Pipe(False) for j in range(self.N): p = self.Process(target=self._test_thousand_f, args=(self.barrier, passes, child_conn, lock)) p.start() self.addCleanup(p.join) for i in range(passes): for j in range(self.N): self.assertEqual(conn.recv(), i) # # # class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('q', 2 ** 33, 2 ** 34), ('c', latin('x'), latin('y')) ] def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocessing.sharedctypes") @classmethod def _test(cls, values): for sv, cv in zip(values, cls.codes_values): sv.value = cv[2] def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.daemon = True proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) def test_rawvalue(self): self.test_value(raw=True) def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() obj1 = val1.get_obj() val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() obj2 = val2.get_obj() lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() obj3 = val3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def f(cls, seq): for i in range(1, len(seq)): seq[i] += seq[i-1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.daemon = True p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_array_from_size(self): size = 10 # Test for zeroing (see issue #11675). # The repetition below strengthens the test by increasing the chances # of previously allocated non-zero memory being used for the new array # on the 2nd and 3rd loops. for _ in range(3): arr = self.Array('i', size) self.assertEqual(len(arr), size) self.assertEqual(list(arr), [0] * size) arr[:] = range(10) self.assertEqual(list(arr), list(range(10))) del arr @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', list(range(10))) lock1 = arr1.get_lock() obj1 = arr1.get_obj() arr2 = self.Array('i', list(range(10)), lock=None) lock2 = arr2.get_lock() obj2 = arr2.get_obj() lock = self.Lock() arr3 = self.Array('i', list(range(10)), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) # # # class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(list(range(10))) self.assertEqual(a[:], list(range(10))) b = self.list() self.assertEqual(b[:], []) b.extend(list(range(5))) self.assertEqual(b[:], list(range(5))) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2,3,4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], list(range(10))) d = [a, b] e = self.list(d) self.assertEqual( [element[:] for element in e], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) def test_list_iter(self): a = self.list(list(range(10))) it = iter(a) self.assertEqual(list(it), list(range(10))) self.assertEqual(list(it), []) # exhausted # list modified during iteration it = iter(a) a[0] = 100 self.assertEqual(next(it), 100) def test_list_proxy_in_list(self): a = self.list([self.list(range(3)) for _i in range(3)]) self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) a[0][-1] = 55 self.assertEqual(a[0][:], [0, 1, 55]) for i in range(1, 3): self.assertEqual(a[i][:], [0, 1, 2]) self.assertEqual(a[1].pop(), 2) self.assertEqual(len(a[1]), 2) for i in range(0, 3, 2): self.assertEqual(len(a[i]), 3) del a b = self.list() b.append(b) del b def test_dict(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) def test_dict_iter(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) it = iter(d) self.assertEqual(list(it), indices) self.assertEqual(list(it), []) # exhausted # dictionary changed size during iteration it = iter(d) d.clear() self.assertRaises(RuntimeError, next, it) def test_dict_proxy_nested(self): pets = self.dict(ferrets=2, hamsters=4) supplies = self.dict(water=10, feed=3) d = self.dict(pets=pets, supplies=supplies) self.assertEqual(supplies['water'], 10) self.assertEqual(d['supplies']['water'], 10) d['supplies']['blankets'] = 5 self.assertEqual(supplies['blankets'], 5) self.assertEqual(d['supplies']['blankets'], 5) d['supplies']['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) del pets del supplies self.assertEqual(d['pets']['ferrets'], 2) d['supplies']['blankets'] = 11 self.assertEqual(d['supplies']['blankets'], 11) pets = d['pets'] supplies = d['supplies'] supplies['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) d.clear() self.assertEqual(len(d), 0) self.assertEqual(supplies['water'], 7) self.assertEqual(pets['hamsters'], 4) l = self.list([pets, supplies]) l[0]['marmots'] = 1 self.assertEqual(pets['marmots'], 1) self.assertEqual(l[0]['marmots'], 1) del pets del supplies self.assertEqual(l[0]['marmots'], 1) outer = self.list([[88, 99], l]) self.assertIsInstance(outer[0], list) # Not a ListProxy self.assertEqual(outer[-1][-1]['feed'], 3) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) # # # def sqr(x, wait=0.0): time.sleep(wait) return x*x def mul(x, y): return x*y def raise_large_valuerror(wait): time.sleep(wait) raise ValueError("x" * 1024**2) def identity(x): return x class CountedObject(object): n_instances = 0 def __new__(cls): cls.n_instances += 1 return object.__new__(cls) def __del__(self): type(self).n_instances -= 1 class SayWhenError(ValueError): pass def exception_throwing_generator(total, when): if when == -1: raise SayWhenError("Somebody said when") for i in range(total): if i == when: raise SayWhenError("Somebody said when") yield i class _TestPool(BaseTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.pool = cls.Pool(4) @classmethod def tearDownClass(cls): cls.pool.terminate() cls.pool.join() cls.pool = None super().tearDownClass() def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), list(map(sqr, list(range(100))))) def test_starmap(self): psmap = self.pool.starmap tuples = list(zip(range(10), range(9,-1, -1))) self.assertEqual(psmap(mul, tuples), list(itertools.starmap(mul, tuples))) tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(psmap(mul, tuples, chunksize=20), list(itertools.starmap(mul, tuples))) def test_starmap_async(self): tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(self.pool.starmap_async(mul, tuples).get(), list(itertools.starmap(mul, tuples))) def test_map_async(self): self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), list(map(sqr, list(range(10))))) def test_map_async_callbacks(self): call_args = self.manager.list() if self.TYPE == 'manager' else [] self.pool.map_async(int, ['1'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(1, len(call_args)) self.assertEqual([1], call_args[0]) self.pool.map_async(int, ['a'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(2, len(call_args)) self.assertIsInstance(call_args[1], ValueError) def test_map_unplicklable(self): # Issue #19425 -- failure to pickle should not cause a hang if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class A(object): def __reduce__(self): raise RuntimeError('cannot pickle') with self.assertRaises(RuntimeError): self.pool.map(sqr, [A()]*10) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except multiprocessing.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_map_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) # again, make sure it's reentrant with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(10, 3), 1) class SpecialIterable: def __iter__(self): return self def __next__(self): raise SayWhenError def __len__(self): return 1 with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) get = TimingWrapper(res.get) self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, list(range(10))) self.assertEqual(list(it), list(map(sqr, list(range(10))))) it = self.pool.imap(sqr, list(range(10))) for i in range(10): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) it = self.pool.imap(sqr, list(range(1000)), chunksize=100) for i in range(1000): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) def test_imap_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) for i in range(3): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) # SayWhenError seen at start of problematic chunk's results it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) for i in range(6): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) for i in range(4): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, list(range(10))) self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) def test_imap_unordered_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap_unordered(sqr, exception_throwing_generator(10, 3), 1) expected_values = list(map(sqr, list(range(10)))) with self.assertRaises(SayWhenError): # imap_unordered makes it difficult to anticipate the SayWhenError for i in range(10): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) it = self.pool.imap_unordered(sqr, exception_throwing_generator(20, 7), 2) expected_values = list(map(sqr, list(range(20)))) with self.assertRaises(SayWhenError): for i in range(20): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) def test_make_pool(self): expected_error = (RemoteError if self.TYPE == 'manager' else ValueError) self.assertRaises(expected_error, self.Pool, -1) self.assertRaises(expected_error, self.Pool, 0) if self.TYPE != 'manager': p = self.Pool(3) try: self.assertEqual(3, len(p._pool)) finally: p.close() p.join() def test_terminate(self): result = self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() # Sanity check the pool didn't wait for all tasks to finish self.assertLess(join.elapsed, 2.0) def test_empty_iterable(self): # See Issue 12157 p = self.Pool(1) self.assertEqual(p.map(sqr, []), []) self.assertEqual(list(p.imap(sqr, [])), []) self.assertEqual(list(p.imap_unordered(sqr, [])), []) self.assertEqual(p.map_async(sqr, []).get(), []) p.close() p.join() def test_context(self): if self.TYPE == 'processes': L = list(range(10)) expected = [sqr(i) for i in L] with self.Pool(2) as p: r = p.map_async(sqr, L) self.assertEqual(r.get(), expected) p.join() self.assertRaises(ValueError, p.map_async, sqr, L) @classmethod def _test_traceback(cls): raise RuntimeError(123) # some comment def test_traceback(self): # We want ensure that the traceback from the child process is # contained in the traceback raised in the main process. if self.TYPE == 'processes': with self.Pool(1) as p: try: p.apply(self._test_traceback) except Exception as e: exc = e else: self.fail('expected RuntimeError') p.join() self.assertIs(type(exc), RuntimeError) self.assertEqual(exc.args, (123,)) cause = exc.__cause__ self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) self.assertIn('raise RuntimeError(123) # some comment', cause.tb) with test.support.captured_stderr() as f1: try: raise exc except RuntimeError: sys.excepthook(*sys.exc_info()) self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) # _helper_reraises_exception should not make the error # a remote exception with self.Pool(1) as p: try: p.map(sqr, exception_throwing_generator(1, -1), 1) except Exception as e: exc = e else: self.fail('expected SayWhenError') self.assertIs(type(exc), SayWhenError) self.assertIs(exc.__cause__, None) p.join() @classmethod def _test_wrapped_exception(cls): raise RuntimeError('foo') def test_wrapped_exception(self): # Issue #20980: Should not wrap exception when using thread pool with self.Pool(1) as p: with self.assertRaises(RuntimeError): p.apply(self._test_wrapped_exception) p.join() def test_map_no_failfast(self): # Issue #23992: the fail-fast behaviour when an exception is raised # during map() would make Pool.join() deadlock, because a worker # process would fill the result queue (after the result handler thread # terminated, hence not draining it anymore). t_start = time.monotonic() with self.assertRaises(ValueError): with self.Pool(2) as p: try: p.map(raise_large_valuerror, [0, 1]) finally: time.sleep(0.5) p.close() p.join() # check that we indeed waited for all jobs self.assertGreater(time.monotonic() - t_start, 0.9) def test_release_task_refs(self): # Issue #29861: task arguments and results should not be kept # alive after we are done with them. objs = [CountedObject() for i in range(10)] refs = [weakref.ref(o) for o in objs] self.pool.map(identity, objs) del objs time.sleep(DELTA) # let threaded cleanup code run self.assertEqual(set(wr() for wr in refs), {None}) # With a process pool, copies of the objects are returned, check # they were released too. self.assertEqual(CountedObject.n_instances, 0) def test_enter(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) with pool: pass # call pool.terminate() # pool is no longer running with self.assertRaises(ValueError): # bpo-35477: pool.__enter__() fails if the pool is not running with pool: pass pool.join() def test_resource_warning(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) pool.terminate() pool.join() # force state to RUN to emit ResourceWarning in __del__() pool._state = multiprocessing.pool.RUN with support.check_warnings(('unclosed running multiprocessing pool', ResourceWarning)): pool = None support.gc_collect() def raising(): raise KeyError("key") def unpickleable_result(): return lambda: 42 class _TestPoolWorkerErrors(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_async_error_callback(self): p = multiprocessing.Pool(2) scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(raising, error_callback=errback) self.assertRaises(KeyError, res.get) self.assertTrue(scratchpad[0]) self.assertIsInstance(scratchpad[0], KeyError) p.close() p.join() def test_unpickleable_result(self): from multiprocessing.pool import MaybeEncodingError p = multiprocessing.Pool(2) # Make sure we don't lose pool processes because of encoding errors. for iteration in range(20): scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(unpickleable_result, error_callback=errback) self.assertRaises(MaybeEncodingError, res.get) wrapped = scratchpad[0] self.assertTrue(wrapped) self.assertIsInstance(scratchpad[0], MaybeEncodingError) self.assertIsNotNone(wrapped.exc) self.assertIsNotNone(wrapped.value) p.close() p.join() class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): p = multiprocessing.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive # (countdown * DELTA = 5 seconds max startup process time) countdown = 50 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [w.pid for w in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() def test_pool_worker_lifetime_early_close(self): # Issue #10332: closing a pool whose workers have limited lifetimes # before all the tasks completed would make join() hang. p = multiprocessing.Pool(3, maxtasksperchild=1) results = [] for i in range(6): results.append(p.apply_async(sqr, (i, 0.3))) p.close() p.join() # check the results for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) def test_worker_finalization_via_atexit_handler_of_multiprocessing(self): # tests cases against bpo-38744 and bpo-39360 cmd = '''if 1: from multiprocessing import Pool problem = None class A: def __init__(self): self.pool = Pool(processes=1) def test(): global problem problem = A() problem.pool.map(float, tuple(range(10))) if __name__ == "__main__": test() ''' rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd) self.assertEqual(rc, 0) # # Test of creating a customized manager class # from multiprocessing.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in range(10): yield i*i class IteratorProxy(BaseProxy): _exposed_ = ('__next__',) def __iter__(self): return self def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() self.common(manager) manager.shutdown() # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context(self): with MyManager() as manager: self.common(manager) # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context_prestarted(self): manager = MyManager() manager.start() with manager: self.common(manager) self.assertEqual(manager._process.exitcode, 0) def common(self, manager): foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i*i for i in range(10)]) # # Test of connecting to a remote server and using xmlrpclib for serialization # _queue = pyqueue.Queue() def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) values = ['hello world', None, True, 2.25, 'hall\xe5 v\xe4rlden', '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', b'hall\xe5 v\xe4rlden', ] result = values[:] @classmethod def _putter(cls, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple queue.put(tuple(cls.values)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER ) manager.start() self.addCleanup(manager.shutdown) p = self.Process(target=self._putter, args=(manager.address, authkey)) p.daemon = True p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() self.assertEqual(queue.get(), self.result) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue class _TestManagerRestart(BaseTestCase): @classmethod def _putter(cls, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER) try: srvr = manager.get_server() addr = srvr.address # Close the connection.Listener socket which gets opened as a part # of manager.get_server(). It's not needed for the test. srvr.listener.close() manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() p.join() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue finally: if hasattr(manager, "shutdown"): manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) try: manager.start() self.addCleanup(manager.shutdown) except OSError as e: if e.errno != errno.EADDRINUSE: raise # Retry after some time, in case the old socket was lingering # (sporadic failure on buildbots) time.sleep(1.0) manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) if hasattr(manager, "shutdown"): self.addCleanup(manager.shutdown) # # # SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _echo(cls, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', list(range(4))) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0]*10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0]*10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except multiprocessing.BufferTooShort as e: self.assertEqual(e.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(-1), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) time.sleep(.1) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(OSError, reader.send, 2) self.assertRaises(OSError, writer.recv) self.assertRaises(OSError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7+8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) @classmethod def _is_fd_assigned(cls, fd): try: os.fstat(fd) except OSError as e: if e.errno == errno.EBADF: return False raise else: return True @classmethod def _writefd(cls, conn, data, create_dummy_fds=False): if create_dummy_fds: for i in range(0, 256): if not cls._is_fd_assigned(i): os.dup2(conn.fileno(), i) fd = reduction.recv_handle(conn) if msvcrt: fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) os.write(fd, data) os.close(fd) @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") def test_fd_transfer(self): if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"foo")) p.daemon = True p.start() self.addCleanup(test.support.unlink, test.support.TESTFN) with open(test.support.TESTFN, "wb") as f: fd = f.fileno() if msvcrt: fd = msvcrt.get_osfhandle(fd) reduction.send_handle(conn, fd, p.pid) p.join() with open(test.support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"foo") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") @unittest.skipIf(MAXFD <= 256, "largest assignable fd number is too small") @unittest.skipUnless(hasattr(os, "dup2"), "test needs os.dup2()") def test_large_fd_transfer(self): # With fd > 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(test.support.unlink, test.support.TESTFN) with open(test.support.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(test.support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) @unittest.skipUnless(util.abstract_sockets_supported, "test needs abstract socket support") def test_abstract_socket(self): with self.connection.Listener("\0something") as listener: with self.connection.Client(listener.address) as client: with listener.accept() as d: client.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, listener.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocessing import resource_sharer resource_sharer.stop(timeout=TIMEOUT) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.create_server((test.support.HOST, 0)) conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): super().setUp() # Make pristine heap for these tests self.old_heap = multiprocessing.heap.BufferWrapper._heap multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() def tearDown(self): multiprocessing.heap.BufferWrapper._heap = self.old_heap super().tearDown() def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] del b # verify the state of the heap with heap._lock: all = [] free = 0 occupied = 0 for L in list(heap._len_to_seq.values()): # count all free blocks in arenas for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) free += (stop-start) for arena, arena_blocks in heap._allocated_blocks.items(): # count all allocated blocks in arenas for start, stop in arena_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) self.assertEqual(free + occupied, sum(arena.size for arena in heap._arenas)) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] if arena != narena: # Two different arenas self.assertEqual(stop, heap._arenas[arena].size) # last block self.assertEqual(nstart, 0) # first block else: # Same arena: two adjacent blocks self.assertEqual(stop, nstart) # test free'ing all blocks random.shuffle(blocks) while blocks: blocks.pop() self.assertEqual(heap._n_frees, heap._n_mallocs) self.assertEqual(len(heap._pending_free_blocks), 0) self.assertEqual(len(heap._arenas), 0) self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) self.assertEqual(len(heap._len_to_seq), 0) def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double), ('z', c_longlong,) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocessing.sharedctypes") @classmethod def _double(cls, x, y, z, foo, arr, string): x.value *= 2 y.value *= 2 z.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) z = Value(c_longlong, 2 ** 33, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(z.value, 2 ** 34) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0, 2 ** 33) bar = copy(foo) foo.x = 0 foo.y = 0 foo.z = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) self.assertEqual(bar.z, 2 ** 33) @unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory") class _TestSharedMemory(BaseTestCase): ALLOWED_TYPES = ('processes',) @staticmethod def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): if isinstance(shmem_name_or_obj, str): local_sms = shared_memory.SharedMemory(shmem_name_or_obj) else: local_sms = shmem_name_or_obj local_sms.buf[:len(binary_data)] = binary_data local_sms.close() def test_shared_memory_basics(self): sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512) self.addCleanup(sms.unlink) # Verify attributes are readable. self.assertEqual(sms.name, 'test01_tsmb') self.assertGreaterEqual(sms.size, 512) self.assertGreaterEqual(len(sms.buf), sms.size) # Modify contents of shared memory segment through memoryview. sms.buf[0] = 42 self.assertEqual(sms.buf[0], 42) # Attach to existing shared memory segment. also_sms = shared_memory.SharedMemory('test01_tsmb') self.assertEqual(also_sms.buf[0], 42) also_sms.close() # Attach to existing shared memory segment but specify a new size. same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size) self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. same_sms.close() if shared_memory._USE_POSIX: # Posix Shared Memory can only be unlinked once. Here we # test an implementation detail that is not observed across # all supported platforms (since WindowsNamedSharedMemory # manages unlinking on its own and unlink() does nothing). # True release of shared memory segment does not necessarily # happen until process exits, depending on the OS platform. with self.assertRaises(FileNotFoundError): sms_uno = shared_memory.SharedMemory( 'test01_dblunlink', create=True, size=5000 ) try: self.assertGreaterEqual(sms_uno.size, 5000) sms_duo = shared_memory.SharedMemory('test01_dblunlink') sms_duo.unlink() # First shm_unlink() call. sms_duo.close() sms_uno.close() finally: sms_uno.unlink() # A second shm_unlink() call is bad. with self.assertRaises(FileExistsError): # Attempting to create a new shared memory segment with a # name that is already in use triggers an exception. there_can_only_be_one_sms = shared_memory.SharedMemory( 'test01_tsmb', create=True, size=512 ) if shared_memory._USE_POSIX: # Requesting creation of a shared memory segment with the option # to attach to an existing segment, if that name is currently in # use, should not trigger an exception. # Note: Using a smaller size could possibly cause truncation of # the existing segment but is OS platform dependent. In the # case of MacOS/darwin, requesting a smaller size is disallowed. class OptionalAttachSharedMemory(shared_memory.SharedMemory): _flags = os.O_CREAT | os.O_RDWR ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb') self.assertEqual(ok_if_exists_sms.size, sms.size) ok_if_exists_sms.close() # Attempting to attach to an existing shared memory segment when # no segment exists with the supplied name triggers an exception. with self.assertRaises(FileNotFoundError): nonexisting_sms = shared_memory.SharedMemory('test01_notthere') nonexisting_sms.unlink() # Error should occur on prior line. sms.close() # Test creating a shared memory segment with negative size with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=-1) # Test creating a shared memory segment with size 0 with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=0) # Test creating a shared memory segment without size argument with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True) def test_shared_memory_across_processes(self): # bpo-40135: don't define shared memory block's name in case of # the failure when we run multiprocessing tests in parallel. sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) # Verify remote attachment to existing block by name is working. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms.name, b'howdy') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'howdy') # Verify pickling of SharedMemory instance also works. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms, b'HELLO') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'HELLO') sms.close() @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") def test_shared_memory_SharedMemoryServer_ignores_sigint(self): # bpo-36368: protect SharedMemoryManager server process from # KeyboardInterrupt signals. smm = multiprocessing.managers.SharedMemoryManager() smm.start() # make sure the manager works properly at the beginning sl = smm.ShareableList(range(10)) # the manager's server should ignore KeyboardInterrupt signals, and # maintain its connection with the current process, and success when # asked to deliver memory segments. os.kill(smm._process.pid, signal.SIGINT) sl2 = smm.ShareableList(range(10)) # test that the custom signal handler registered in the Manager does # not affect signal handling in the parent process. with self.assertRaises(KeyboardInterrupt): os.kill(os.getpid(), signal.SIGINT) smm.shutdown() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): # bpo-36867: test that a SharedMemoryManager uses the # same resource_tracker process as its parent. cmd = '''if 1: from multiprocessing.managers import SharedMemoryManager smm = SharedMemoryManager() smm.start() sl = smm.ShareableList(range(10)) smm.shutdown() ''' rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd) # Before bpo-36867 was fixed, a SharedMemoryManager not using the same # resource_tracker process as its parent would make the parent's # tracker complain about sl being leaked even though smm.shutdown() # properly released sl. self.assertFalse(err) def test_shared_memory_SharedMemoryManager_basics(self): smm1 = multiprocessing.managers.SharedMemoryManager() with self.assertRaises(ValueError): smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started smm1.start() lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) self.assertEqual(len(doppleganger_list0), 5) doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) held_name = lom[0].name smm1.shutdown() if sys.platform != "win32": # Calls to unlink() have no effect on Windows platform; shared # memory will only be released once final process exits. with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_shm = shared_memory.SharedMemory(name=held_name) with multiprocessing.managers.SharedMemoryManager() as smm2: sl = smm2.ShareableList("howdy") shm = smm2.SharedMemory(size=128) held_name = sl.shm.name if sys.platform != "win32": with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_sl = shared_memory.ShareableList(name=held_name) def test_shared_memory_ShareableList_basics(self): sl = shared_memory.ShareableList( ['howdy', b'HoWdY', -273.154, 100, None, True, 42] ) self.addCleanup(sl.shm.unlink) # Verify attributes are readable. self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') # Exercise len(). self.assertEqual(len(sl), 7) # Exercise index(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') with self.assertRaises(ValueError): sl.index('100') self.assertEqual(sl.index(100), 3) # Exercise retrieving individual values. self.assertEqual(sl[0], 'howdy') self.assertEqual(sl[-2], True) # Exercise iterability. self.assertEqual( tuple(sl), ('howdy', b'HoWdY', -273.154, 100, None, True, 42) ) # Exercise modifying individual values. sl[3] = 42 self.assertEqual(sl[3], 42) sl[4] = 'some' # Change type at a given position. self.assertEqual(sl[4], 'some') self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[4] = 'far too many' self.assertEqual(sl[4], 'some') sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data self.assertEqual(sl[0], 'encodés') self.assertEqual(sl[1], b'HoWdY') # no spillage with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data self.assertEqual(sl[1], b'HoWdY') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[1] = b'123456789' self.assertEqual(sl[1], b'HoWdY') # Exercise count(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') self.assertEqual(sl.count(42), 2) self.assertEqual(sl.count(b'HoWdY'), 1) self.assertEqual(sl.count(b'adios'), 0) # Exercise creating a duplicate. sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate') try: self.assertNotEqual(sl.shm.name, sl_copy.shm.name) self.assertEqual('test03_duplicate', sl_copy.shm.name) self.assertEqual(list(sl), list(sl_copy)) self.assertEqual(sl.format, sl_copy.format) sl_copy[-1] = 77 self.assertEqual(sl_copy[-1], 77) self.assertNotEqual(sl[-1], 77) sl_copy.shm.close() finally: sl_copy.shm.unlink() # Obtain a second handle on the same ShareableList. sl_tethered = shared_memory.ShareableList(name=sl.shm.name) self.assertEqual(sl.shm.name, sl_tethered.shm.name) sl_tethered[-1] = 880 self.assertEqual(sl[-1], 880) sl_tethered.shm.close() sl.shm.close() # Exercise creating an empty ShareableList. empty_sl = shared_memory.ShareableList() try: self.assertEqual(len(empty_sl), 0) self.assertEqual(empty_sl.format, '') self.assertEqual(empty_sl.count('any'), 0) with self.assertRaises(ValueError): empty_sl.index(None) empty_sl.shm.close() finally: empty_sl.shm.unlink() def test_shared_memory_ShareableList_pickling(self): sl = shared_memory.ShareableList(range(10)) self.addCleanup(sl.shm.unlink) serialized_sl = pickle.dumps(sl) deserialized_sl = pickle.loads(serialized_sl) self.assertTrue( isinstance(deserialized_sl, shared_memory.ShareableList) ) self.assertTrue(deserialized_sl[-1], 9) self.assertFalse(sl is deserialized_sl) deserialized_sl[4] = "changed" self.assertEqual(sl[4], "changed") # Verify data is not being put into the pickled representation. name = 'a' * len(sl.shm.name) larger_sl = shared_memory.ShareableList(range(400)) self.addCleanup(larger_sl.shm.unlink) serialized_larger_sl = pickle.dumps(larger_sl) self.assertTrue(len(serialized_sl) == len(serialized_larger_sl)) larger_sl.shm.close() deserialized_sl.shm.close() sl.shm.close() def test_shared_memory_cleaned_after_process_termination(self): cmd = '''if 1: import os, time, sys from multiprocessing import shared_memory # Create a shared_memory segment, and send the segment name sm = shared_memory.SharedMemory(create=True, size=10) sys.stdout.write(sm.name + '\\n') sys.stdout.flush() time.sleep(100) ''' with subprocess.Popen([sys.executable, '-E', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: name = p.stdout.readline().strip().decode() # killing abruptly processes holding reference to a shared memory # segment should not leak the given memory segment. p.terminate() p.wait() deadline = time.monotonic() + 60 t = 0.1 while time.monotonic() < deadline: time.sleep(t) t = min(t*2, 5) try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break else: raise AssertionError("A SharedMemory segment was leaked after" " a process was abruptly terminated.") if os.name == 'posix': # A warning was emitted by the subprocess' own # resource_tracker (on Windows, shared memory segments # are released automatically by the OS). err = p.stderr.read().decode() self.assertIn( "resource_tracker: There appear to be 1 leaked " "shared_memory objects to clean up at shutdown", err) # # # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with test.support.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocessing.__file__) pattern = os.path.join(glob.escape(folder), '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocessing.' + m for m in modules] modules.remove('multiprocessing.__init__') modules.append('multiprocessing') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocessing.popen_fork') modules.remove('multiprocessing.popen_forkserver') modules.remove('multiprocessing.popen_spawn_posix') else: modules.remove('multiprocessing.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocessing.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocessing.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() p.close() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() p.close() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocessing.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocessing.connection.Connection, -1) class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocessing.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random()*0.1) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocessing.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random()*0.1) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocessing.connection import wait l = socket.create_server((test.support.HOST, 0)) addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocessing.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) def test_wait_timeout(self): from multiprocessing.connection import wait expected = 5 a, b = multiprocessing.Pipe() start = time.monotonic() res = wait([a, b], expected) delta = time.monotonic() - start self.assertEqual(res, []) self.assertLess(delta, expected * 2) self.assertGreater(delta, expected * 0.5) b.send(None) start = time.monotonic() res = wait([a, b], 20) delta = time.monotonic() - start self.assertEqual(res, [a]) self.assertLess(delta, 0.4) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) def test_wait_integer(self): from multiprocessing.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocessing.Semaphore(0) a, b = multiprocessing.Pipe() p = multiprocessing.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = time.monotonic() res = wait([a, p.sentinel, b], expected + 20) delta = time.monotonic() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocessing.connection import wait a, b = multiprocessing.Pipe() t = time.monotonic() res = wait([a], timeout=-1) t = time.monotonic() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls): import json r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def test_flags(self): import json # start child process using unusual flags prog = ('from test._test_multiprocessing import TestFlags; ' + 'TestFlags.run_in_child()') data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() join_process(p) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocessing.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = test.support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = test.support.script_helper.assert_python_ok(name, sm) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() join_process(p) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocessing.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocessing.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() join_process(p) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocessing.Pipe() fd = self.get_high_socket_fd() try: p = multiprocessing.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() join_process(p) finally: self.close(fd) writer.close() reader.close() if multiprocessing.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocessing.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocessing.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocessing.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocessing.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['spawn', 'fork'] or methods == ['fork', 'spawn', 'forkserver'] or methods == ['spawn', 'fork', 'forkserver']) def test_preload_resources(self): if multiprocessing.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = test.support.script_helper.assert_python_ok(name) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestResourceTracker(unittest.TestCase): def test_resource_tracker(self): # # Check that killing process does not leak named semaphores # cmd = '''if 1: import time, os, tempfile import multiprocessing as mp from multiprocessing import resource_tracker from multiprocessing.shared_memory import SharedMemory mp.set_start_method("spawn") rand = tempfile._RandomNameSequence() def create_and_register_resource(rtype): if rtype == "semaphore": lock = mp.Lock() return lock, lock._semlock.name elif rtype == "shared_memory": sm = SharedMemory(create=True, size=10) return sm, sm._name else: raise ValueError( "Resource type {{}} not understood".format(rtype)) resource1, rname1 = create_and_register_resource("{rtype}") resource2, rname2 = create_and_register_resource("{rtype}") os.write({w}, rname1.encode("ascii") + b"\\n") os.write({w}, rname2.encode("ascii") + b"\\n") time.sleep(10) ''' for rtype in resource_tracker._CLEANUP_FUNCS: with self.subTest(rtype=rtype): if rtype == "noop": # Artefact resource type used by the resource_tracker continue r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd.format(w=w, rtype=rtype)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _resource_unlink(name1, rtype) p.terminate() p.wait() deadline = time.monotonic() + 60 while time.monotonic() < deadline: time.sleep(.5) try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, but OSX seems to give # EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break else: raise AssertionError( f"A {rtype} resource was leaked after a process was " f"abruptly terminated.") err = p.stderr.read().decode('utf-8') p.stderr.close() expected = ('resource_tracker: There appear to be 2 leaked {} ' 'objects'.format( rtype)) self.assertRegex(err, expected) self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) def check_resource_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocessing.resource_tracker import _resource_tracker pid = _resource_tracker._pid if pid is not None: os.kill(pid, signal.SIGKILL) os.waitpid(pid, 0) with warnings.catch_warnings(): warnings.simplefilter("ignore") _resource_tracker.ensure_running() pid = _resource_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocessing.get_context("spawn") with warnings.catch_warnings(record=True) as all_warn: warnings.simplefilter("always") sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) if should_die: self.assertEqual(len(all_warn), 1) the_warn = all_warn[0] self.assertTrue(issubclass(the_warn.category, UserWarning)) self.assertTrue("resource_tracker: process died" in str(the_warn.message)) else: self.assertEqual(len(all_warn), 0) def test_resource_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGINT, False) def test_resource_tracker_sigterm(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGTERM, False) def test_resource_tracker_sigkill(self): # Uncatchable signal. self.check_resource_tracker_death(signal.SIGKILL, True) @staticmethod def _is_resource_tracker_reused(conn, pid): from multiprocessing.resource_tracker import _resource_tracker _resource_tracker.ensure_running() # The pid should be None in the child process, expect for the fork # context. It should not be a new value. reused = _resource_tracker._pid in (None, pid) reused &= _resource_tracker._check_alive() conn.send(reused) def test_resource_tracker_reused(self): from multiprocessing.resource_tracker import _resource_tracker _resource_tracker.ensure_running() pid = _resource_tracker._pid r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._is_resource_tracker_reused, args=(w, pid)) p.start() is_resource_tracker_reused = r.recv() # Clean up p.join() w.close() r.close() self.assertTrue(is_resource_tracker_reused) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocessing.SimpleQueue() child_can_start = multiprocessing.Event() parent_can_continue = multiprocessing.Event() proc = multiprocessing.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() class TestPoolNotLeakOnFailure(unittest.TestCase): def test_release_unused_processes(self): # Issue #19675: During pool creation, if we can't create a process, # don't leak already created ones. will_fail_in = 3 forked_processes = [] class FailingForkProcess: def __init__(self, **kwargs): self.name = 'Fake Process' self.exitcode = None self.state = None forked_processes.append(self) def start(self): nonlocal will_fail_in if will_fail_in <= 0: raise OSError("Manually induced OSError") will_fail_in -= 1 self.state = 'started' def terminate(self): self.state = 'stopping' def join(self): if self.state == 'stopping': self.state = 'stopped' def is_alive(self): return self.state == 'started' or self.state == 'stopping' with self.assertRaisesRegex(OSError, 'Manually induced OSError'): p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( Process=FailingForkProcess)) p.close() p.join() self.assertFalse( any(process.is_alive() for process in forked_processes)) class TestSyncManagerTypes(unittest.TestCase): """Test all the types which can be shared between a parent and a child process by using a manager which acts as an intermediary between them. In the following unit-tests the base type is created in the parent process, the @classmethod represents the worker process and the shared object is readable and editable between the two. # The child. @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.append(6) # The parent. def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert o[1] == 6 """ manager_class = multiprocessing.managers.SyncManager def setUp(self): self.manager = self.manager_class() self.manager.start() self.proc = None def tearDown(self): if self.proc is not None and self.proc.is_alive(): self.proc.terminate() self.proc.join() self.manager.shutdown() self.manager = None self.proc = None @classmethod def setUpClass(cls): support.reap_children() tearDownClass = setUpClass def wait_proc_exit(self): # Only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395). join_process(self.proc) start_time = time.monotonic() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = time.monotonic() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocessing.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break def run_worker(self, worker, obj): self.proc = multiprocessing.Process(target=worker, args=(obj, )) self.proc.daemon = True self.proc.start() self.wait_proc_exit() self.assertEqual(self.proc.exitcode, 0) @classmethod def _test_event(cls, obj): assert obj.is_set() obj.wait() obj.clear() obj.wait(0.001) def test_event(self): o = self.manager.Event() o.set() self.run_worker(self._test_event, o) assert not o.is_set() o.wait(0.001) @classmethod def _test_lock(cls, obj): obj.acquire() def test_lock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_lock, o) o.release() self.assertRaises(RuntimeError, o.release) # already released @classmethod def _test_rlock(cls, obj): obj.acquire() obj.release() def test_rlock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_rlock, o) @classmethod def _test_semaphore(cls, obj): obj.acquire() def test_semaphore(self, sname="Semaphore"): o = getattr(self.manager, sname)() self.run_worker(self._test_semaphore, o) o.release() def test_bounded_semaphore(self): self.test_semaphore(sname="BoundedSemaphore") @classmethod def _test_condition(cls, obj): obj.acquire() obj.release() def test_condition(self): o = self.manager.Condition() self.run_worker(self._test_condition, o) @classmethod def _test_barrier(cls, obj): assert obj.parties == 5 obj.reset() def test_barrier(self): o = self.manager.Barrier(5) self.run_worker(self._test_barrier, o) @classmethod def _test_pool(cls, obj): # TODO: fix https://bugs.python.org/issue35919 with obj: pass def test_pool(self): o = self.manager.Pool(processes=4) self.run_worker(self._test_pool, o) @classmethod def _test_queue(cls, obj): assert obj.qsize() == 2 assert obj.full() assert not obj.empty() assert obj.get() == 5 assert not obj.empty() assert obj.get() == 6 assert obj.empty() def test_queue(self, qname="Queue"): o = getattr(self.manager, qname)(2) o.put(5) o.put(6) self.run_worker(self._test_queue, o) assert o.empty() assert not o.full() def test_joinable_queue(self): self.test_queue("JoinableQueue") @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.count(5) == 1 assert obj.index(5) == 0 obj.sort() obj.reverse() for x in obj: pass assert len(obj) == 1 assert obj.pop(0) == 5 def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_dict(cls, obj): assert len(obj) == 1 assert obj['foo'] == 5 assert obj.get('foo') == 5 assert list(obj.items()) == [('foo', 5)] assert list(obj.keys()) == ['foo'] assert list(obj.values()) == [5] assert obj.copy() == {'foo': 5} assert obj.popitem() == ('foo', 5) def test_dict(self): o = self.manager.dict() o['foo'] = 5 self.run_worker(self._test_dict, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_value(cls, obj): assert obj.value == 1 assert obj.get() == 1 obj.set(2) def test_value(self): o = self.manager.Value('i', 1) self.run_worker(self._test_value, o) self.assertEqual(o.value, 2) self.assertEqual(o.get(), 2) @classmethod def _test_array(cls, obj): assert obj[0] == 0 assert obj[1] == 1 assert len(obj) == 2 assert list(obj) == [0, 1] def test_array(self): o = self.manager.Array('i', [0, 1]) self.run_worker(self._test_array, o) @classmethod def _test_namespace(cls, obj): assert obj.x == 0 assert obj.y == 1 def test_namespace(self): o = self.manager.Namespace() o.x = 0 o.y = 1 self.run_worker(self._test_namespace, o) class MiscTestCase(unittest.TestCase): def test__all__(self): # Just make sure names in blacklist are excluded support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, blacklist=['SUBDEBUG', 'SUBWARNING']) # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocessing.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) if processes: test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocessing.Process connection = multiprocessing.connection current_process = staticmethod(multiprocessing.current_process) parent_process = staticmethod(multiprocessing.parent_process) active_children = staticmethod(multiprocessing.active_children) Pool = staticmethod(multiprocessing.Pool) Pipe = staticmethod(multiprocessing.Pipe) Queue = staticmethod(multiprocessing.Queue) JoinableQueue = staticmethod(multiprocessing.JoinableQueue) Lock = staticmethod(multiprocessing.Lock) RLock = staticmethod(multiprocessing.RLock) Semaphore = staticmethod(multiprocessing.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) Condition = staticmethod(multiprocessing.Condition) Event = staticmethod(multiprocessing.Event) Barrier = staticmethod(multiprocessing.Barrier) Value = staticmethod(multiprocessing.Value) Array = staticmethod(multiprocessing.Array) RawValue = staticmethod(multiprocessing.RawValue) RawArray = staticmethod(multiprocessing.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocessing.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocessing.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) start_time = time.monotonic() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = time.monotonic() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocessing.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. test.support.environment_altered = True support.print_warning('Shared objects which still exist ' 'at manager shutdown:') support.print_warning(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocessing.dummy.Process connection = multiprocessing.dummy.connection current_process = staticmethod(multiprocessing.dummy.current_process) active_children = staticmethod(multiprocessing.dummy.active_children) Pool = staticmethod(multiprocessing.dummy.Pool) Pipe = staticmethod(multiprocessing.dummy.Pipe) Queue = staticmethod(multiprocessing.dummy.Queue) JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) Lock = staticmethod(multiprocessing.dummy.Lock) RLock = staticmethod(multiprocessing.dummy.RLock) Semaphore = staticmethod(multiprocessing.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) Condition = staticmethod(multiprocessing.dummy.Condition) Event = staticmethod(multiprocessing.dummy.Event) Barrier = staticmethod(multiprocessing.dummy.Barrier) Value = staticmethod(multiprocessing.dummy.Value) Array = staticmethod(multiprocessing.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocessing.set_forkserver_preload(PRELOAD) multiprocessing.process._cleanup() dangling[0] = multiprocessing.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocessing.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() multiprocessing.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocessing.process._dangling) - set(dangling[0]) if processes: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocessing.util._cleanup_tests() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule