source
stringlengths
3
86
python
stringlengths
75
1.04M
ping_tests.py
import asyncio import win32com.client import time wmi = win32com.client.GetObject(r"winmgmts:\\.\root\cimv2") # Windows wmi service """ Ping StatusCode_ReturnValues 0='Success' 11001='Buffer Too Small' 11002='Destination Net Unreachable' 11003='Destination Host Unreachable' 11004='Destination Protocol Unreachable' 11005='Destination Port Unreachable' 11006='No Resources' 11007='Bad Option' 11008='Hardware Error' 11009='Packet Too Big' 11010='Request Timed Out' 11011='Bad Request' 11012='Bad Route' 11013='TimeToLive Expired Transit' 11014='TimeToLive Expired Reassembly' 11015='Parameter Problem' 11016='Source Quench' 11017='Option Too Big' 11018='Bad Destination' 11032='Negotiating IPSEC' 11050='General Failure' """ async def _ping_coroutine(ip, timeout): response = None wmi_resp = wmi.ExecQuery( f"Select ResponseTime, StatusCode from Win32_PingStatus Where Address = '{ip}' and timeout = {timeout}") await asyncio.sleep(0) # not documented, but special case .sleep(0) will yield control to the main loop # print(f"{ip} wmi finished") for item in wmi_resp: if item.StatusCode == 0: response = f"{item.ResponseTime}ms" elif item.StatusCode == 11002: response = "Network unreachable" elif item.StatusCode == 11003: response = "Host unreachable" elif item.StatusCode == 11005: response = "Port unreachable" elif item.StatusCode == 11010: response = "Timed Out" elif item.StatusCode is None: response = "Not Found" else: response = "Failed" return ip, response async def _batch_it(host_list, batch_size=100, ping_timeout=4000): """ Asynchronous ping using WMI service as the ping instigator, Windows only of course :param host_list: List of ip addresses or host names or machine id's :param batch_size: default 100; how many hosts to ping at one time consider cpu and network load before going crazy :param ping_timeout: default 1000ms, there seems to be a practical lower limit ~ 300ms? :return: currently using a tuple with host name and status/response """ ping_results = [] pages = (len(host_list) // batch_size) + 1 for i in range(0, pages): tasks = [] for host in host_list[i * batch_size:(i + 1) * batch_size]: # doesn't seem to throw an error if range exceeds actual items tasks.append(asyncio.ensure_future(_ping_coroutine(host, timeout=ping_timeout))) await asyncio.gather(*tasks) for resp in tasks: # print(resp.result()) ping_results.append(resp.result()) return ping_results def main_start(site_list, batch=100, timeout=4000): asyncio.set_event_loop(asyncio.new_event_loop()) # because this will be called repeatedly, I create a new loop, the option is to not close the first loop? loop = asyncio.get_event_loop() # >> > import threading # >> > t = threading.Thread(target=loop_in_thread, args=(loop,)) # >> > t.start() # print("loop starting") elapse = time.time() ping_result = loop.run_until_complete(_batch_it(site_list, batch, timeout)) # loop.run_in_executor(None, _batch_it, (site_list,)) # how many IPs to ping at one time - consider cpu/network load # print("loop finished") # print(loop.is_closed()) loop.close() # print("loop closed") print(time.time() - elapse) # print(bob) return ping_result async def _batch_it_generators(host_gen, host_gen_count, batch_size=100, ping_timeout=4000): """ Asynchronous ping using WMI service as the ping instigator, Windows only of course :param host_gen: generator yielding ip addresses or host names or machine id's :param host_gen_count: number of addresses to get from the generator :param batch_size: default 100; how many hosts to ping at one time consider cpu and network load before going crazy :param ping_timeout: default 1000ms, there seems to be a practical lower limit ~ 300ms? :return: currently using a tuple with host name and status/response """ ping_results = [] pages = (host_gen_count // batch_size) + 1 part_page = host_gen_count % batch_size print(host_gen_count, batch_size, pages, part_page) for i in range(0, pages): tasks = [] if i == pages - 1: batch_size = part_page for _ in range(batch_size): tasks.append(asyncio.ensure_future(_ping_coroutine(next(host_gen), timeout=ping_timeout))) # for host in host_list: # # for host in host_list[i * batch_size:(i + 1) * batch_size]: # # doesn't seem to throw an error if range exceeds actual items # tasks.append(asyncio.ensure_future(_ping_coroutine(host, timeout=ping_timeout))) await asyncio.gather(*tasks) for resp in tasks: # print(resp.result()) ping_results.append(resp.result()) # print(f'page {i} finished') return ping_results def main_start_generators(site_gen, site_gen_count, batch=100, timeout=1000): asyncio.set_event_loop(asyncio.new_event_loop()) # because this will be called repeatedly, I create a new loop, the option is to not close the first loop? loop = asyncio.get_event_loop() elapse = time.time() ping_result = loop.run_until_complete(_batch_it_generators(site_gen, site_gen_count, batch, timeout)) loop.close() time_taken = time.time() - elapse return ping_result, time_taken if __name__ == '__main__': import random from collections import Counter from faker import Faker from faker.providers import internet """ Batch size effects time to process (smaller=longer time: 100 is low load, slow; 2000 is high load, fast) Timeout also effects time to process (smaller = faster, 300 is lowest practical, 1000 is safe, 4000 is default) """ gen_size = random.randint(100, 100) # Generate using faker for public ip addresses: https://faker.readthedocs.io/en/stable/ fake = Faker() fake.add_provider(internet) # create a little generator for a random batch of addresses (rather than a hard list) sites_gen = (fake.ipv4_public() for p in range(0, gen_size)) # Generate from short list of addresses/urls/names... # gen_size = random.randint(10, 100) # 2000 take about 3.5 seconds :):):):):):) # address_list = ["slither.io", "uwa.edu.au", "www.uwa.edu.au", "google.com", "its00364", "35.160.169.47"] # # sites_gen = (random.choice(address_list) for p in range(0, gen_size)) results, duration = main_start_generators(sites_gen, gen_size, batch=100, timeout=1000) # for result in results: # host, status = result # print(f"Host: {host} -> {status}") counts = Counter(x[1] for x in results) print(counts.most_common(10)) # leave out parameter for counts of every value of x[1] print(f'{len(results)} of {gen_size} items returned in {duration} seconds')
yb_backup.py
#!/usr/bin/env python # # Copyright 2022 YugaByte, Inc. and Contributors # # Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt from __future__ import print_function import argparse import atexit import copy import logging import pipes import random import string import subprocess import traceback import time import json import signal import sys from argparse import RawDescriptionHelpFormatter from boto.utils import get_instance_metadata from datetime import timedelta from multiprocessing.pool import ThreadPool from contextlib import contextmanager import os import re import threading TABLET_UUID_LEN = 32 UUID_RE_STR = '[0-9a-f-]{32,36}' COLOCATED_UUID_SUFFIX = '.colocated.parent.uuid' COLOCATED_NAME_SUFFIX = '.colocated.parent.tablename' COLOCATED_UUID_RE_STR = UUID_RE_STR + COLOCATED_UUID_SUFFIX UUID_ONLY_RE = re.compile('^' + UUID_RE_STR + '$') NEW_OLD_UUID_RE = re.compile(UUID_RE_STR + '[ ]*\t' + UUID_RE_STR) COLOCATED_NEW_OLD_UUID_RE = re.compile(COLOCATED_UUID_RE_STR + '[ ]*\t' + COLOCATED_UUID_RE_STR) LEADING_UUID_RE = re.compile('^(' + UUID_RE_STR + r')\b') LIST_TABLET_SERVERS_RE = re.compile('.*list_tablet_servers.*(' + UUID_RE_STR + ').*') IMPORTED_TABLE_RE = re.compile(r'(?:Colocated t|T)able being imported: ([^\.]*)\.(.*)') RESTORATION_RE = re.compile('^Restoration id: (' + UUID_RE_STR + r')\b') STARTED_SNAPSHOT_CREATION_RE = re.compile(r'[\S\s]*Started snapshot creation: (?P<uuid>.*)') YSQL_CATALOG_VERSION_RE = re.compile(r'[\S\s]*Version: (?P<version>.*)') ROCKSDB_PATH_PREFIX = '/yb-data/tserver/data/rocksdb' SNAPSHOT_DIR_GLOB = '*' + ROCKSDB_PATH_PREFIX + '/table-*/tablet-*.snapshots/*' SNAPSHOT_DIR_SUFFIX_RE = re.compile( '^.*/tablet-({})[.]snapshots/({})$'.format(UUID_RE_STR, UUID_RE_STR)) TABLE_PATH_PREFIX_TEMPLATE = ROCKSDB_PATH_PREFIX + '/table-{}' TABLET_MASK = 'tablet-????????????????????????????????' TABLET_DIR_GLOB = '*' + TABLE_PATH_PREFIX_TEMPLATE + '/' + TABLET_MASK MANIFEST_FILE_NAME = 'Manifest' METADATA_FILE_NAME = 'SnapshotInfoPB' SQL_TBSP_DUMP_FILE_NAME = 'YSQLDump_tablespaces' SQL_DUMP_FILE_NAME = 'YSQLDump' SQL_DATA_DUMP_FILE_NAME = 'YSQLDump_data' CREATE_METAFILES_MAX_RETRIES = 10 CLOUD_CFG_FILE_NAME = 'cloud_cfg' CLOUD_CMD_MAX_RETRIES = 10 LOCAL_FILE_MAX_RETRIES = 3 RESTORE_DOWNLOAD_LOOP_MAX_RETRIES = 20 REPLICAS_SEARCHING_LOOP_MAX_RETRIES = 30 SLEEP_IN_REPLICAS_SEARCHING_ROUND_SEC = 20 # 30*20 sec = 10 minutes LEADERS_SEARCHING_LOOP_MAX_RETRIES = 5 SLEEP_IN_LEADERS_SEARCHING_ROUND_SEC = 20 # 5*(100 + 20) sec = 10 minutes CREATE_SNAPSHOT_TIMEOUT_SEC = 60 * 60 # hour RESTORE_SNAPSHOT_TIMEOUT_SEC = 24 * 60 * 60 # day SHA_TOOL_PATH = '/usr/bin/sha256sum' SHA_FILE_EXT = 'sha256' # Try to read home dir from environment variable, else assume it's /home/yugabyte. YB_HOME_DIR = os.environ.get("YB_HOME_DIR", "/home/yugabyte") DEFAULT_REMOTE_YB_ADMIN_PATH = os.path.join(YB_HOME_DIR, 'master/bin/yb-admin') DEFAULT_REMOTE_YSQL_DUMP_PATH = os.path.join(YB_HOME_DIR, 'master/postgres/bin/ysql_dump') DEFAULT_REMOTE_YSQL_SHELL_PATH = os.path.join(YB_HOME_DIR, 'master/bin/ysqlsh') DEFAULT_YB_USER = 'yugabyte' SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) PLATFORM_VERSION_FILE_PATH = os.path.join(SCRIPT_DIR, '../../yugaware/conf/version_metadata.json') YB_VERSION_RE = re.compile(r'^version (\d+\.\d+\.\d+\.\d+).*') YB_ADMIN_HELP_RE = re.compile(r'^ \d+\. (\w+).*') DISABLE_SPLITTING_MS = 30000 DISABLE_SPLITTING_FREQ_SEC = 10 IS_SPLITTING_DISABLED_MAX_RETRIES = 100 TEST_SLEEP_AFTER_FIND_SNAPSHOT_DIRS_SEC = 100 DEFAULT_TS_WEB_PORT = 9000 @contextmanager def terminating(thing): try: yield thing finally: thing.terminate() class BackupException(Exception): """A YugaByte backup exception.""" pass class CompatibilityException(BackupException): """Exception which can be ignored for compatibility.""" pass class YbAdminOpNotSupportedException(BackupException): """Exception raised if the attempted operation is not supported by the version of yb-admin we are using.""" pass def split_by_tab(line): return [item.replace(' ', '') for item in line.split("\t")] def split_by_space(line): items = [] for item in line.split(" "): item = item.strip() if item: items.append(item) return items def quote_cmd_line_for_bash(cmd_line): if not isinstance(cmd_line, list) and not isinstance(cmd_line, tuple): raise BackupException("Expected a list/tuple, got: [[ {} ]]".format(cmd_line)) return ' '.join([pipes.quote(str(arg)) for arg in cmd_line]) class BackupTimer: def __init__(self): # Store the start time as phase 0. self.logged_times = [time.time()] self.phases = ["START"] self.num_phases = 0 self.finished = False def log_new_phase(self, msg="", last_phase=False): self.logged_times.append(time.time()) self.phases.append(msg) self.num_phases += 1 # Print completed time of last stage. time_taken = self.logged_times[self.num_phases] - self.logged_times[self.num_phases - 1] logging.info("Completed phase {}: {} [Time taken for phase: {}]".format( self.num_phases - 1, self.phases[self.num_phases - 1], str(timedelta(seconds=time_taken)))) if last_phase: self.finished = True else: logging.info("[app] Starting phase {}: {}".format(self.num_phases, msg)) def print_summary(self): if not self.finished: self.log_new_phase(last_phase=True) log_str = "Summary of run:\n" # Print info for each phase (ignore phase-0: START). for i in range(1, self.num_phases): t = self.logged_times[i + 1] - self.logged_times[i] log_str += "{} : PHASE {} : {}\n".format(str(timedelta(seconds=t)), i, self.phases[i]) # Also print info for total runtime. log_str += "Total runtime: {}".format( str(timedelta(seconds=self.logged_times[-1] - self.logged_times[0]))) # Add [app] for YW platform filter. logging.info("[app] " + log_str) def start_time_str(self): return time_to_str(self.logged_times[0]) def end_time_str(self): return time_to_str(self.logged_times[self.num_phases]) class SingleArgParallelCmd: """ Invokes a single-argument function on the given set of argument values in a parallel way using the given thread pool. Arguments are first deduplicated, so they have to be hashable. Example: SingleArgParallelCmd(fn, [a, b, c]).run(pool) -> run in parallel Thread-1: -> fn(a) Thread-2: -> fn(b) Thread-3: -> fn(c) """ def __init__(self, fn, args): self.fn = fn self.args = args def run(self, pool): fn_args = sorted(set(self.args)) return self._run_internal(self.fn, fn_args, fn_args, pool) def _run_internal(self, internal_fn, internal_fn_srgs, fn_args, pool): values = pool.map(internal_fn, internal_fn_srgs) # Return a map from args to the command results. assert len(fn_args) == len(values) return dict(zip(fn_args, values)) class MultiArgParallelCmd(SingleArgParallelCmd): """ Invokes a function that is allowed to have any number of arguments on the given set of tuples of arguments in a parallel way using the given thread pool. Arguments are first deduplicated, so they have to be hashable. Example: MultiArgParallelCmd p(fn) p.add_args(a1, a2) p.add_args(b1, b2) p.run(pool) -> run in parallel Thread-1: -> fn(a1, a2) Thread-2: -> fn(b1, b2) """ def __init__(self, fn): self.fn = fn self.args = [] def add_args(self, *args_tuple): assert isinstance(args_tuple, tuple) self.args.append(args_tuple) def run(self, pool): def internal_fn(args_tuple): # One tuple - one function run. return self.fn(*args_tuple) fn_args = sorted(set(self.args)) return self._run_internal(internal_fn, fn_args, fn_args, pool) class SequencedParallelCmd(SingleArgParallelCmd): """ Invokes commands in a parallel way using the given thread pool. Each command is a sequence of function calls with the provided arguments. Example: SequencedParallelCmd p(fn) p.start_command() # Start sequence-1 of the function calls. p.add_args(a1, a2) p.add_args(b1, b2) p.start_command() # Start sequence-2 of the function calls. p.add_args(c1, c2) p.add_args(d1, d2) p.run(pool) -> run in parallel Thread-1: -> fn(a1, a2); fn(b1, b2) Thread-2: -> fn(c1, c2); fn(d1, d2) """ def __init__(self, fn, preprocess_args_fn=None, handle_errors=False): self.fn = fn self.args = [] self.preprocess_args_fn = preprocess_args_fn # Whether or not we will throw an error on a cmd failure, or handle it and return a # tuple: ('failed-cmd', handle). self.handle_errors = handle_errors # Handle is returned on failed command if handle_errors=true. # Example handle is a tuple of (tablet_id, tserver_ip). def start_command(self, handle): # Start new set of argument tuples. # Place handle at the front. self.args.append([handle]) def add_args(self, *args_tuple): assert isinstance(args_tuple, tuple) assert len(self.args) > 0, 'Call start_command() before' self.args[-1].append(args_tuple) def run(self, pool): def internal_fn(list_of_arg_tuples): assert isinstance(list_of_arg_tuples, list) # First entry is the handle. handle = list_of_arg_tuples[0] # Pre-process the list of arguments. processed_arg_tuples = (list_of_arg_tuples[1:] if self.preprocess_args_fn is None else self.preprocess_args_fn(list_of_arg_tuples[1:], handle)) results = [] for args_tuple in processed_arg_tuples: assert isinstance(args_tuple, tuple) try: results.append(self.fn(*args_tuple)) except Exception as ex: logging.warning( "Encountered error for handle '{}' while running command '{}'. Error: {}". format(handle, args_tuple, ex)) if (self.handle_errors): # If we handle errors, then return 'failed-cmd' with the handle. return ('failed-cmd', handle) raise ex return results fn_args = [str(list_of_arg_tuples) for list_of_arg_tuples in self.args] return self._run_internal(internal_fn, self.args, fn_args, pool) def check_arg_range(min_value, max_value): """ Return a "checker" function that validates that an argument is within the given range. To be used with argparse. """ def check_fn(value): value = int(value) if value < min_value or value > max_value: raise argparse.ArgumentTypeError("Expected a value between {} and {}, got {}".format( min_value, max_value, value)) return value return check_fn def check_uuid(uuid_str): """ A UUID validator for use with argparse. """ if not UUID_ONLY_RE.match(uuid_str): raise argparse.ArgumentTypeError("Expected a UUID, got {}".format(uuid_str)) return uuid_str def random_string(length): return ''.join(random.choice(string.ascii_lowercase) for i in range(length)) def replace_last_substring(s, old, new): return new.join(s.rsplit(old, 1)) if s else None def strip_dir(dir_path): return dir_path.rstrip('/\\') def checksum_path(file_path): return file_path + '.' + SHA_FILE_EXT def checksum_path_downloaded(file_path): return checksum_path(file_path) + '.downloaded' # TODO: get rid of this sed / test program generation in favor of a more maintainable solution. def key_and_file_filter(checksum_file): return "\" $( sed 's| .*/| |' {} ) \"".format(pipes.quote(checksum_file)) # error_on_failure: If set to true, then the test command will return an error (errno != 0) if the # check fails. This is useful if we're taking advantage of larger retry mechanisms (eg retrying an # entire command chain). # TODO: get rid of this sed / test program generation in favor of a more maintainable solution. def compare_checksums_cmd(checksum_file1, checksum_file2, error_on_failure=False): return "test {} = {}{}".format( key_and_file_filter(checksum_file1), key_and_file_filter(checksum_file2), '' if error_on_failure else ' && echo correct || echo invalid') def get_db_name_cmd(dump_file): return "sed -n '/CREATE DATABASE/{s|CREATE DATABASE||;s|WITH.*||;p}' " + pipes.quote(dump_file) def apply_sed_edit_reg_exp_cmd(dump_file, reg_exp): return "sed -i '{}' {}".format(reg_exp, pipes.quote(dump_file)) def replace_db_name_cmd(dump_file, old_name, new_name): return apply_sed_edit_reg_exp_cmd( dump_file, "s|DATABASE {0}|DATABASE {1}|;s|\\\\connect {0}|\\\\connect {1}|".format( old_name, new_name)) def get_table_names_str(keyspaces, tables, delimeter, space): if len(keyspaces) != len(tables): raise BackupException( "Found {} --keyspace keys and {} --table keys. Number of these keys " "must be equal.".format(len(keyspaces), len(tables))) table_names = [] for i in range(0, len(tables)): table_names.append(delimeter.join([keyspaces[i], tables[i]])) return space.join(table_names) def keyspace_type(keyspace): return 'ysql' if ('.' in keyspace) and (keyspace.split('.')[0].lower() == 'ysql') else 'ycql' def is_parent_colocated_table_name(table_name): return table_name.endswith(COLOCATED_NAME_SUFFIX) def get_postgres_oid_from_table_id(table_id): return table_id[-4:] def verify_colocated_table_ids(old_id, new_id): # Assert that the postgres oids are the same. if (get_postgres_oid_from_table_id(old_id) != get_postgres_oid_from_table_id(new_id)): raise BackupException('Colocated tables have different oids: Old oid: {}, New oid: {}' .format(old_id, new_id)) def keyspace_name(keyspace): return keyspace.split('.')[1] if ('.' in keyspace) else keyspace def time_to_str(time_value): return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(time_value)) def get_yb_backup_version_string(): str = "unknown" try: if os.path.isfile(PLATFORM_VERSION_FILE_PATH): with open(PLATFORM_VERSION_FILE_PATH, 'r') as f: ver_info = json.load(f) str = "version {}".format(ver_info['version_number']) if 'build_number' in ver_info: str += " build {}".format(ver_info['build_number']) if 'git_hash' in ver_info: str += " revision {}".format(ver_info['git_hash']) if 'build_type' in ver_info: str += " build_type {}".format(ver_info['build_type']) if 'build_timestamp' in ver_info: str += " built at {}".format(ver_info['build_timestamp']) else: logging.warning("[app] Version file not found: {}".format(PLATFORM_VERSION_FILE_PATH)) except Exception as ex: logging.warning("[app] Cannot parse JSON version file {}: {}". format(PLATFORM_VERSION_FILE_PATH, ex)) finally: return str class BackupOptions: def __init__(self, args): self.args = args class AbstractBackupStorage(object): def __init__(self, options): self.options = options @staticmethod def storage_type(): raise BackupException("Unimplemented") def _command_list_prefix(self): return [] class AzBackupStorage(AbstractBackupStorage): def __init__(self, options): super(AzBackupStorage, self).__init__(options) @staticmethod def storage_type(): return 'az' def _command_list_prefix(self): return "azcopy" def upload_file_cmd(self, src, dest, local=False): if local is True: dest = dest + os.getenv('AZURE_STORAGE_SAS_TOKEN') return [self._command_list_prefix(), "cp", src, dest] src = "'{}'".format(src) dest = "'{}'".format(dest + os.getenv('AZURE_STORAGE_SAS_TOKEN')) return ["{} {} {} {}".format(self._command_list_prefix(), "cp", src, dest)] def download_file_cmd(self, src, dest, local=False): if local is True: src = src + os.getenv('AZURE_STORAGE_SAS_TOKEN') return [self._command_list_prefix(), "cp", src, dest, "--recursive"] src = "'{}'".format(src + os.getenv('AZURE_STORAGE_SAS_TOKEN')) dest = "'{}'".format(dest) return ["{} {} {} {} {}".format(self._command_list_prefix(), "cp", src, dest, "--recursive")] def upload_dir_cmd(self, src, dest): # azcopy will download the top-level directory as well as the contents without "/*". src = "'{}'".format(os.path.join(src, '*')) dest = "'{}'".format(dest + os.getenv('AZURE_STORAGE_SAS_TOKEN')) return ["{} {} {} {} {}".format(self._command_list_prefix(), "cp", src, dest, "--recursive")] def download_dir_cmd(self, src, dest): src = "'{}'".format(os.path.join(src, '*') + os.getenv('AZURE_STORAGE_SAS_TOKEN')) dest = "'{}'".format(dest) return ["{} {} {} {} {}".format(self._command_list_prefix(), "cp", src, dest, "--recursive")] def delete_obj_cmd(self, dest): if dest is None or dest == '/' or dest == '': raise BackupException("Destination needs to be well formed.") dest = "'{}'".format(dest + os.getenv('AZURE_STORAGE_SAS_TOKEN')) return ["{} {} {} {}".format(self._command_list_prefix(), "rm", dest, "--recursive=true")] def backup_obj_size_cmd(self, backup_obj_location): sas_token = os.getenv('AZURE_STORAGE_SAS_TOKEN') backup_obj_location = "'{}'".format(backup_obj_location + sas_token) return ["{} {} {} {} {} {} {} {} {}".format(self._command_list_prefix(), "list", backup_obj_location, "--machine-readable", "--running-tally", "|", "grep 'Total file size:'", "|", "grep -Eo '[0-9]*'")] class GcsBackupStorage(AbstractBackupStorage): def __init__(self, options): super(GcsBackupStorage, self).__init__(options) @staticmethod def storage_type(): return 'gcs' def _command_list_prefix(self): return ['gsutil', '-o', 'Credentials:gs_service_key_file=%s' % self.options.cloud_cfg_file_path] def upload_file_cmd(self, src, dest): return self._command_list_prefix() + ["cp", src, dest] def download_file_cmd(self, src, dest): return self._command_list_prefix() + ["cp", src, dest] def upload_dir_cmd(self, src, dest): if self.options.args.disable_parallelism: return self._command_list_prefix() + ["rsync", "-r", src, dest] else: return self._command_list_prefix() + ["-m", "rsync", "-r", src, dest] def download_dir_cmd(self, src, dest): return self._command_list_prefix() + ["-m", "rsync", "-r", src, dest] def delete_obj_cmd(self, dest): if dest is None or dest == '/' or dest == '': raise BackupException("Destination needs to be well formed.") return self._command_list_prefix() + ["rm", "-r", dest] def backup_obj_size_cmd(self, backup_obj_location): return self._command_list_prefix() + ["du", "-s", "-a", backup_obj_location] class S3BackupStorage(AbstractBackupStorage): def __init__(self, options): super(S3BackupStorage, self).__init__(options) @staticmethod def storage_type(): return 's3' def _command_list_prefix(self): # If 's3cmd get' fails it creates zero-length file, '--force' is needed to # override this empty file on the next retry-step. return ['s3cmd', '--force', '--no-check-certificate', '--config=%s' % self.options.cloud_cfg_file_path] def upload_file_cmd(self, src, dest): cmd_list = ["put", src, dest] if self.options.args.sse: cmd_list.append("--server-side-encryption") return self._command_list_prefix() + cmd_list def download_file_cmd(self, src, dest): return self._command_list_prefix() + ["get", src, dest] def upload_dir_cmd(self, src, dest): cmd_list = ["sync", "--no-check-md5", src, dest] if self.options.args.sse: cmd_list.append("--server-side-encryption") return self._command_list_prefix() + cmd_list def download_dir_cmd(self, src, dest): return self._command_list_prefix() + ["sync", "--no-check-md5", src, dest] def delete_obj_cmd(self, dest): if dest is None or dest == '/' or dest == '': raise BackupException("Destination needs to be well formed.") return self._command_list_prefix() + ["del", "-r", dest] def backup_obj_size_cmd(self, backup_obj_location): return self._command_list_prefix() + ["du", backup_obj_location] class NfsBackupStorage(AbstractBackupStorage): def __init__(self, options): super(NfsBackupStorage, self).__init__(options) @staticmethod def storage_type(): return 'nfs' def _command_list_prefix(self): result = ['rsync', '-avhW'] if not self.options.args.mac: result.append('--no-compress') return result # This is a single string because that's what we need for doing `mkdir && rsync`. def upload_file_cmd(self, src, dest): return ["mkdir -p {} && {} {} {}".format( pipes.quote(os.path.dirname(dest)), " ".join(self._command_list_prefix()), pipes.quote(src), pipes.quote(dest))] def download_file_cmd(self, src, dest): return self._command_list_prefix() + [src, dest] # This is a list of single string, because a) we need a single string for executing # `mkdir && rsync` and b) we need a list of 1 element, as it goes through a tuple(). def upload_dir_cmd(self, src, dest): return ["mkdir -p {} && {} {} {}".format( pipes.quote(dest), " ".join(self._command_list_prefix()), pipes.quote(src), pipes.quote(dest))] def download_dir_cmd(self, src, dest): if self.options.args.TEST_sleep_during_download_dir: return ["sleep 5 && {} {} {}".format( " ".join(self._command_list_prefix()), pipes.quote(src), pipes.quote(dest))] return self._command_list_prefix() + [src, dest] def delete_obj_cmd(self, dest): if dest is None or dest == '/' or dest == '': raise BackupException("Destination needs to be well formed.") return ["rm", "-rf", pipes.quote(dest)] def backup_obj_size_cmd(self, backup_obj_location): return ["du", "-sb", backup_obj_location] BACKUP_STORAGE_ABSTRACTIONS = { S3BackupStorage.storage_type(): S3BackupStorage, NfsBackupStorage.storage_type(): NfsBackupStorage, GcsBackupStorage.storage_type(): GcsBackupStorage, AzBackupStorage.storage_type(): AzBackupStorage } class KubernetesDetails(): def __init__(self, server_fqdn, config_map): self.namespace = server_fqdn.split('.')[2] self.pod_name = server_fqdn.split('.')[0] # The pod names are <helm fullname>yb-<master|tserver>-n where # n is the pod number. <helm fullname> can be blank. And # yb-master/yb-tserver are the container names. self.container = "yb-master" if self.pod_name.find("master") > 0 else "yb-tserver" self.env_config = os.environ.copy() self.env_config["KUBECONFIG"] = config_map[server_fqdn] def get_instance_profile_credentials(): result = () iam_credentials_endpoint = 'meta-data/iam/security-credentials/' metadata = get_instance_metadata(timeout=1, num_retries=1, data=iam_credentials_endpoint) if metadata: instance_credentials = next(iter(metadata.values())) if isinstance(instance_credentials, dict): try: access_key = instance_credentials['AccessKeyId'] secret_key = instance_credentials['SecretAccessKey'] token = instance_credentials['Token'] result = access_key, secret_key, token except KeyError as e: logging.info("Could not find {} in instance metadata".format(e)) return result class YBVersion: def __init__(self, ver_str, verbose=False): self.string = ver_str.strip() if verbose: logging.info("YB cluster version string: " + self.string) matched = YB_VERSION_RE.match(self.string) self.parts = matched.group(1).split('.') if matched else None if self.parts: logging.info("[app] YB cluster version: " + '.'.join(self.parts)) class YBTSConfig: """ Helper class to store TS configuration parameters. """ FS_DATA_DIRS_ARG = 'fs_data_dirs' PLACEMENT_REGION_ARG = 'placement_region' WEBSERVER_PORT_ARG = 'webserver_port' FS_DATA_DIRS_ARG_PREFIX = '--' + FS_DATA_DIRS_ARG + '=' PLACEMENT_REGION_RE = re.compile(r'[\S\s]*--' + PLACEMENT_REGION_ARG + r'=([\S]*)') def __init__(self, backup): self.backup = backup self.params = {} self.clean() def clean(self): # Clean this TS config (keep only web-port). if self.WEBSERVER_PORT_ARG in self.params: web_port = self.params[self.WEBSERVER_PORT_ARG] else: web_port = DEFAULT_TS_WEB_PORT self.params = {} self.set_web_port(web_port) def has_data_dirs(self): return self.FS_DATA_DIRS_ARG in self.params def data_dirs(self): return self.params[self.FS_DATA_DIRS_ARG] def has_region(self): return self.PLACEMENT_REGION_ARG in self.params def region(self): return self.params[self.PLACEMENT_REGION_ARG] def set_web_port(self, web_port): self.params[self.WEBSERVER_PORT_ARG] = web_port def load(self, tserver_ip, read_region=False): """ Load TS properties for this TS IP via TS web-interface. :param tserver_ip: tablet server ip :param read_region: parse --placement_region value from TS configuration """ self.clean() web_port = self.params[self.WEBSERVER_PORT_ARG] if self.backup.args.verbose: logging.info("Loading TS config via Web UI on {}:{}".format(tserver_ip, web_port)) url = "{}:{}/varz".format(tserver_ip, web_port) output = self.backup.run_program(['curl', url], num_retry=10) # Read '--placement_region'. if read_region: suffix_match = self.PLACEMENT_REGION_RE.match(output) if suffix_match is None: msg = "Cannot get region from {}: [[ {} ]]".format(url, output) logging.error("[app] {}".format(msg)) self.clean() raise BackupException(msg) region = suffix_match.group(1) logging.info("[app] Region for TS IP {} is '{}'".format(tserver_ip, region)) self.params[self.PLACEMENT_REGION_ARG] = region # Read '--fs_data_dirs'. data_dirs = [] for line in output.split('\n'): if line.startswith(self.FS_DATA_DIRS_ARG_PREFIX): for data_dir in line[len(self.FS_DATA_DIRS_ARG_PREFIX):].split(','): data_dir = data_dir.strip() if data_dir: data_dirs.append(data_dir) break if not data_dirs: msg = "Did not find any data directories in tserver by querying /varz endpoint"\ " on tserver '{}:{}'. Was looking for '{}', got this: [[ {} ]]".format( tserver_ip, web_port, self.FS_DATA_DIRS_ARG_PREFIX, output) logging.error("[app] {}".format(msg)) self.clean() raise BackupException(msg) elif self.backup.args.verbose: logging.info("Found data directories on tablet server '{}': {}".format( tserver_ip, data_dirs)) self.params[self.FS_DATA_DIRS_ARG] = data_dirs class YBManifest: """ Manifest is the top level JSON-based file-descriptor with the backup content and properties. The class is responsible for the data initialization, loading/saving from/into a JSON file. The data can be initialized by default/stub values for old backup format (version='0'). The format can be extended. For example: - metadata files can be listed in the locations - checksum value can be stored for any tablet-folder - any tablet-folder can include complete list of SST files Format: { # This Manifest file format version. "version": "1.0", # Source cluster parameters. "source": { # Array of Master IPs. "yb-masters": [ "127.0.0.1", ... ], # Array of table names in the backup. "tables": [ "ysql.yugabyte.tbl1", "ysql.yugabyte.tbl2", ... ], # Is it YSQL backup? "ysql": true, # Is YSQL authentication enabled? "ysql_authentication": false, # Is Kubernetes used? "k8s": false, # YB cluster version string. Usually it includes: # version_number, build_number, git_hash, build_type, build_timestamp. "yb-database-version": "version 2.11.2.0 build PRE_RELEASE ..." }, # Properties of the created backup. "properties": { # The backup creation start/finish time. "start-time": "Mon, 06 Dec 2021 20:45:25 +0000", "end-time": "Mon, 06 Dec 2021 20:45:54 +0000", # The backup script version string. "platform-version": "version 2.11.2.0 build PRE_RELEASE ...", # Target storage provider (AWS/GCP/NFS/etc.). "storage-type": "nfs", # Was additional 'YSQLDump_tablespaces' dump file created? "use-tablespaces": true, # Parallelism level - number of worker threads. "parallelism": 8, # True for pg dump based backups. "pg_based_backup": false }, # Content of the backup folders/locations. "locations": { # Single main location. <default_backup_location>: { # Default location marker. "default": true, # Tablet folders in the location. "tablet-directories": { <tablet_id>: {}, ... }, }, # Multiple regional locations. <regional_location>: { # The region name. "region": <region_name>, # Tablet folders in the location. "tablet-directories": { <tablet_id>: {}, ... } }, ... } } """ def __init__(self, backup): self.backup = backup self.body = {} self.body['version'] = "0" # Data initialization methods. def init(self, snapshot_bucket, pg_based_backup): # Call basic initialization by default to prevent code duplication. self.create_by_default(self.backup.snapshot_location(snapshot_bucket)) self.body['version'] = "1.0" # Source cluster parameters. source = {} self.body['source'] = source source['ysql'] = self.backup.is_ysql_keyspace() source['ysql_authentication'] = self.backup.args.ysql_enable_auth source['k8s'] = self.backup.is_k8s() source['yb-database-version'] = self.backup.database_version.string source['yb-masters'] = self.backup.args.masters.split(",") if not pg_based_backup: source['tables'] = self.backup.table_names_str().split(" ") # The backup properties. properties = self.body['properties'] properties['platform-version'] = get_yb_backup_version_string() properties['parallelism'] = self.backup.args.parallelism properties['use-tablespaces'] = self.backup.args.use_tablespaces properties['pg-based-backup'] = pg_based_backup properties['start-time'] = self.backup.timer.start_time_str() properties['end-time'] = self.backup.timer.end_time_str() properties['size-in-bytes'] = self.backup.calc_size_in_bytes(snapshot_bucket) def init_locations(self, tablet_leaders, snapshot_bucket): locations = self.body['locations'] for (tablet_id, leader_ip, tserver_region) in tablet_leaders: tablet_location = self.backup.snapshot_location(snapshot_bucket, tserver_region) if tablet_location not in locations: # Init the regional location. Single main location was added in init(). assert tablet_location != self.backup.args.backup_location location_data = {} locations[tablet_location] = location_data location_data['tablet-directories'] = {} location_data['region'] = tserver_region locations[tablet_location]['tablet-directories'][tablet_id] = {} self.body['properties']['size-in-bytes'] += len(self.to_string()) # Data saving/loading/printing. def to_string(self): return json.dumps(self.body, indent=2) def save_into_file(self, file_path): logging.info('[app] Exporting manifest data to {}'.format(file_path)) with open(file_path, 'w') as f: f.write(self.to_string()) def load_from_file(self, file_path): logging.info('[app] Loading manifest data from {}'.format(file_path)) with open(file_path, 'r') as f: self.body = json.load(f) def is_loaded(self): return self.body['version'] != "0" # Stub methods for the old backup format. There is no real Manifest file in the backup. def create_by_default(self, default_backup_location): properties = {} self.body['properties'] = properties properties['storage-type'] = self.backup.args.storage_type self.body['locations'] = {} default_location_data = {} # Only one single main location is available in the old backup. self.body['locations'][default_backup_location] = default_location_data default_location_data['default'] = True default_location_data['tablet-directories'] = {} # Helper data getters. def get_tablet_locations(self, tablet_location): assert not tablet_location # Empty. locations = self.body['locations'] for loc in locations: for tablet_id in locations[loc]['tablet-directories']: tablet_location[tablet_id] = loc def is_pg_based_backup(self): pg_based_backup = self.body['properties'].get('pg-based-backup') if pg_based_backup is None: pg_based_backup = False return pg_based_backup def get_backup_size(self): return self.body['properties'].get('size-in-bytes') def get_locations(self): return self.body['locations'].keys() class YBBackup: def __init__(self): signal.signal(signal.SIGINT, self.cleanup_on_exit) signal.signal(signal.SIGTERM, self.cleanup_on_exit) signal.signal(signal.SIGQUIT, self.cleanup_on_exit) self.pools = [] self.leader_master_ip = '' self.ysql_ip = '' self.live_tserver_ip = '' self.tmp_dir_name = '' self.server_ips_with_uploaded_cloud_cfg = {} self.k8s_pod_fqdn_to_cfg = {} self.timer = BackupTimer() self.ts_cfgs = {} self.ip_to_ssh_key_map = {} self.secondary_to_primary_ip_map = {} self.region_to_location = {} self.database_version = YBVersion("unknown") self.manifest = YBManifest(self) self.parse_arguments() def cleanup_on_exit(self, signum, frame): self.terminate_pools() # Runs clean-up callbacks registered to atexit. sys.exit() def terminate_pools(self): for pool in self.pools: logging.info("Terminating threadpool ...") try: pool.close() pool.terminate() pool.join() logging.info("Terminated threadpool ...") except Exception as ex: logging.error("Failed to terminate pool: {}".format(ex)) def sleep_or_raise(self, num_retry, timeout, ex): if num_retry > 0: logging.info("Sleep {}... ({} retries left)".format(timeout, num_retry)) time.sleep(timeout) else: raise ex def run_program(self, args, num_retry=1, timeout=10, env=None, **kwargs): """ Runs the given program with the given set of arguments. Arguments are the same as in subprocess.check_output. Logs the program and the output in verbose mode. Also logs the command line in case of failure. """ cmd_as_str = quote_cmd_line_for_bash(args) if self.args.verbose: logging.info("Running command{}: {}".format( "" if num_retry == 1 else " ({} retries)".format(num_retry), cmd_as_str)) while num_retry > 0: num_retry = num_retry - 1 try: proc_env = os.environ.copy() proc_env.update(env if env is not None else {}) subprocess_result = str(subprocess.check_output( args, stderr=subprocess.STDOUT, env=proc_env, **kwargs).decode('utf-8', errors='replace') .encode("ascii", "ignore") .decode("ascii")) if self.args.verbose: logging.info( "Output from running command [[ {} ]]:\n{}\n[[ END OF OUTPUT ]]".format( cmd_as_str, subprocess_result)) return subprocess_result except subprocess.CalledProcessError as e: logging.error("Failed to run command [[ {} ]]: code={} output={}".format( cmd_as_str, e.returncode, str(e.output.decode('utf-8', errors='replace') .encode("ascii", "ignore") .decode("ascii")))) self.sleep_or_raise(num_retry, timeout, e) except Exception as ex: logging.error("Failed to run command [[ {} ]]: {}".format(cmd_as_str, ex)) self.sleep_or_raise(num_retry, timeout, ex) def parse_arguments(self): parser = argparse.ArgumentParser( description='Backup/restore YB table', epilog="Use the following environment variables to provide AWS access and secret " "keys for S3:\n" " export AWS_ACCESS_KEY_ID=<your_aws_access_key>\n" " export AWS_SECRET_ACCESS_KEY=<your_aws_secret_key>\n" "For GCS:\n" " export GCS_CREDENTIALS_JSON=<contents_of_gcp_credentials>\n" "For YCQL tables:\n" " Keys --keyspace, --table and --table_uuid can be repeated several times.\n" " Recommended order for creating backup: --keyspace ks1 --table tbl1 " " --table_uuid uuid1 --keyspace ks2 --table tbl2 --table_uuid uuid2 ...\n" " Recommended order for restoring backup: --keyspace target_ks --table tbl1 " " --table tbl2 ...\n" "For YSQL DB:\n" " Only one key --keyspace is supported. The script processes the whole DB.\n" " For creating backup: --keyspace ysql.db1\n" " For restoring backup: --keyspace ysql.db1_copy\n", formatter_class=RawDescriptionHelpFormatter) parser.add_argument( '--masters', required=True, help="Comma separated list of masters for the cluster") parser.add_argument( '--ts_web_hosts_ports', help="Custom TS HTTP hosts and ports. " "In form: <IP>:<Port>,<IP>:<Port>") parser.add_argument( # Keeping the "ts_" prefix in the name for backward compatibility only. # In fact this is IP mapping for TServers and Masters. '--ts_secondary_ip_map', default=None, help="Map of secondary IPs to primary for ensuring ssh connectivity") parser.add_argument( '--ip_to_ssh_key_path', default=None, help="Map of IPs to their SSH keys") parser.add_argument( '--k8s_config', required=False, help="Namespace to use for kubectl in case of kubernetes deployment") parser.add_argument( '--keyspace', action='append', help="Repeatable keyspace of the tables to backup, " "or a target keyspace for the backup restoring") parser.add_argument( '--table', action='append', help="Repeatable name of the tables to backup or restore") parser.add_argument( '--table_uuid', action='append', help="Repeatable UUID of the tables to backup.") parser.add_argument( '--local_yb_admin_binary', help="Path to the local yb-admin binary; " "by default remote yb-admin tool is used") parser.add_argument( '--remote_yb_admin_binary', default=DEFAULT_REMOTE_YB_ADMIN_PATH, help="Path to the remote yb-admin binary") parser.add_argument( '--local_ysql_dump_binary', help="Path to the local ysql_dump binary; " "by default remote ysql_dump tool is used") parser.add_argument( '--remote_ysql_dump_binary', default=DEFAULT_REMOTE_YSQL_DUMP_PATH, help="Path to the remote ysql_dump binary") parser.add_argument( '--local_ysql_shell_binary', help="Path to the local ysql shell binary; " "by default remote ysql shell tool is used") parser.add_argument( '--remote_ysql_shell_binary', default=DEFAULT_REMOTE_YSQL_SHELL_PATH, help="Path to the remote ysql shell binary") parser.add_argument( '--pg_based_backup', action='store_true', default=False, help="Use it to trigger " "pg based backup.") parser.add_argument( '--ssh_key_path', required=False, help="Path to the ssh key file") parser.add_argument( '--ssh_user', default=DEFAULT_YB_USER, help="Username to use for the ssh connection.") parser.add_argument( '--remote_user', default=DEFAULT_YB_USER, help="User that will perform backup tasks.") parser.add_argument( '--ssh_port', default='54422', help="Port to use for the ssh connection.") parser.add_argument( '--no_ssh', action='store_true', default=False, help="Don't use SSH to run commands") parser.add_argument( '--mac', action='store_true', default=False, help="Use MacOS tooling") parser.add_argument( '--ysql_port', help="Custom YSQL process port. " "Default port is used if not specified.") parser.add_argument( '--ysql_host', help="Custom YSQL process host. " "First alive TS host is used if not specified.") parser.add_argument( '--ysql_enable_auth', action='store_true', default=False, help="Whether ysql authentication is required. If specified, will connect using local " "UNIX socket as the host. Overrides --local_ysql_dump_binary to always " "use remote binary.") parser.add_argument( '--disable_checksums', action='store_true', default=False, help="Whether checksums will be created and checked. If specified, will skip using " "checksums.") backup_location_group = parser.add_mutually_exclusive_group(required=True) backup_location_group.add_argument( '--backup_location', help="Directory/bucket under which the snapshots should be created or " "an exact snapshot directory in case of snapshot restoring.") # Deprecated flag for backwards compatibility. backup_location_group.add_argument('--s3bucket', required=False, help=argparse.SUPPRESS) parser.add_argument( '--region', action='append', help="Repeatable region to create geo-partitioned backup. Every '--region' must have " "related '--region_location' value. For 'restore' it's not used.") parser.add_argument( '--region_location', action='append', help="Repeatable directory/bucket for a region. For 'create' mode it should be " "related to a '--region'. For 'restore' it's not used.") parser.add_argument( '--no_auto_name', action='store_true', help="Disable automatic generation of a name under the given backup location. If this " "is specified, the backup location will be the exact path of the directory " "storing the snapshot.") parser.add_argument( '--no_snapshot_deleting', action='store_true', help="Disable automatic snapshot deleting after the backup creating or restoring.") parser.add_argument( '--snapshot_id', type=check_uuid, help="Use the existing snapshot ID instead of creating a new one.") parser.add_argument( '--verbose', required=False, action='store_true', help='Verbose mode') parser.add_argument( '-j', '--parallelism', type=check_arg_range(1, 100), default=8, help='Maximum number of parallel commands to launch. ' 'This also affects the amount of outgoing s3cmd sync traffic when copying a ' 'backup to S3.') parser.add_argument( '--disable_parallelism', action='store_true', default=False, help="If specified as False, we add the parallelism flag '-m' during gsutil. " "If speciifed as True, the '-m' flag is not added.") parser.add_argument( '--storage_type', choices=list(BACKUP_STORAGE_ABSTRACTIONS.keys()), default=S3BackupStorage.storage_type(), help="Storage backing for backups, eg: s3, nfs, gcs, ..") parser.add_argument( 'command', choices=['create', 'restore', 'restore_keys', 'delete'], help='Create, restore or delete the backup from the provided backup location.') parser.add_argument( '--certs_dir', required=False, help="The directory containing the certs for secure connections.") parser.add_argument( '--sse', required=False, action='store_true', help='Enable server side encryption on storage') parser.add_argument( '--backup_keys_source', required=False, help="Location of universe encryption keys backup file to upload to backup location" ) parser.add_argument( '--restore_keys_destination', required=False, help="Location to download universe encryption keys backup file to" ) parser.add_argument( '--nfs_storage_path', required=False, help="NFS storage mount path") parser.add_argument( '--restore_time', required=False, help='The Unix microsecond timestamp to which to restore the snapshot.') parser.add_argument( '--use_tablespaces', required=False, action='store_true', default=False, help='Backup/restore YSQL TABLESPACE objects into/from the backup.') parser.add_argument('--upload', dest='upload', action='store_true', default=True) # Please note that we have to use this weird naming (i.e. underscore in the argument name) # style to keep it in sync with YB processes G-flags. parser.add_argument('--no_upload', dest='upload', action='store_false', help="Skip uploading snapshot") parser.add_argument( '--edit_ysql_dump_sed_reg_exp', required=False, help="Regular expression for 'sed' tool to edit on fly YSQL dump file(s) during the " "backup restoring. Example: \"s|OWNER TO yugabyte|OWNER TO admin|\". WARNING: " "Contact support team before use! No any backward compatibility guaranties.") parser.add_argument( '--do_not_disable_splitting', required=False, action='store_true', default=False, help="Do not disable automatic splitting before taking a backup. This is dangerous " "because a tablet might be split and cleaned up just before we try to copy its " "data.") """ Test arguments - Use `argparse.SUPPRESS` to keep these arguments hidden. """ # Adds in a sleep before the rsync command to download data during the restore. Used in # tests to hit tablet move race conditions during restores. parser.add_argument( '--TEST_sleep_during_download_dir', required=False, action='store_true', default=False, help=argparse.SUPPRESS) # Adds in a sleep after finding the list of snapshot directories to upload but before # uploading them, to test that they are not deleted by a completed tablet split (tablet # splitting should be disabled). parser.add_argument( '--TEST_sleep_after_find_snapshot_dirs', required=False, action='store_true', default=False, help=argparse.SUPPRESS) # Simulate an older yb-admin which does not support some command. parser.add_argument( '--TEST_yb_admin_unsupported_commands', required=False, action='store_true', default=False, help=argparse.SUPPRESS) self.args = parser.parse_args() def post_process_arguments(self): if self.args.verbose: logging.info("Parsed arguments: {}".format(vars(self.args))) if self.args.storage_type == 'nfs': logging.info('Checking whether NFS backup storage path mounted on TServers or not') with terminating(ThreadPool(self.args.parallelism)) as pool: self.pools.append(pool) tserver_ips = self.get_live_tservers() SingleArgParallelCmd(self.find_nfs_storage, tserver_ips).run(pool) self.args.backup_location = self.args.backup_location or self.args.s3bucket options = BackupOptions(self.args) self.cloud_cfg_file_path = os.path.join(self.get_tmp_dir(), CLOUD_CFG_FILE_NAME) if self.is_s3(): if not os.getenv('AWS_SECRET_ACCESS_KEY') and not os.getenv('AWS_ACCESS_KEY_ID'): metadata = get_instance_profile_credentials() with open(self.cloud_cfg_file_path, 'w') as s3_cfg: if metadata: s3_cfg.write('[default]\n' + 'access_key = ' + metadata[0] + '\n' + 'secret_key = ' + metadata[1] + '\n' + 'access_token = ' + metadata[2] + '\n') else: s3_cfg.write('[default]\n' + 'access_key = ' + '\n' + 'secret_key = ' + '\n' + 'access_token = ' + '\n') elif os.getenv('AWS_SECRET_ACCESS_KEY') and os.getenv('AWS_ACCESS_KEY_ID'): host_base = os.getenv('AWS_HOST_BASE') path_style_access = True if os.getenv('PATH_STYLE_ACCESS', "false") == "true" else False if host_base: if path_style_access: host_base_cfg = 'host_base = {0}\n' \ 'host_bucket = {1}\n'.format( host_base, self.args.backup_location) else: host_base_cfg = 'host_base = {0}\n' \ 'host_bucket = {1}.{0}\n'.format( host_base, self.args.backup_location) else: host_base_cfg = '' with open(self.cloud_cfg_file_path, 'w') as s3_cfg: s3_cfg.write('[default]\n' + 'access_key = ' + os.environ['AWS_ACCESS_KEY_ID'] + '\n' + 'secret_key = ' + os.environ['AWS_SECRET_ACCESS_KEY'] + '\n' + host_base_cfg) else: raise BackupException( "Missing either AWS access key or secret key for S3 " "in AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment variables.") os.chmod(self.cloud_cfg_file_path, 0o400) options.cloud_cfg_file_path = self.cloud_cfg_file_path elif self.is_gcs(): credentials = os.getenv('GCS_CREDENTIALS_JSON') if not credentials: raise BackupException( "Set GCP credential file for GCS in GCS_CREDENTIALS_JSON " "environment variable.") with open(self.cloud_cfg_file_path, 'w') as cloud_cfg: cloud_cfg.write(credentials) options.cloud_cfg_file_path = self.cloud_cfg_file_path elif self.is_az(): sas_token = os.getenv('AZURE_STORAGE_SAS_TOKEN') if not sas_token: raise BackupException( "Set SAS for Azure Storage in AZURE_STORAGE_SAS_TOKEN environment variable.") if '?sv' not in sas_token: raise BackupException( "SAS tokens must begin with '?sv'.") self.storage = BACKUP_STORAGE_ABSTRACTIONS[self.args.storage_type](options) if self.args.ts_secondary_ip_map is not None: self.secondary_to_primary_ip_map = json.loads(self.args.ts_secondary_ip_map) if self.args.ip_to_ssh_key_path is not None: self.ip_to_ssh_key_map = json.loads(self.args.ip_to_ssh_key_path) if self.is_k8s(): self.k8s_pod_fqdn_to_cfg = json.loads(self.args.k8s_config) if self.k8s_pod_fqdn_to_cfg is None: raise BackupException("Couldn't load k8s configs") self.args.local_ysql_dumpall_binary = replace_last_substring( self.args.local_ysql_dump_binary, "ysql_dump", "ysql_dumpall") self.args.remote_ysql_dumpall_binary = replace_last_substring( self.args.remote_ysql_dump_binary, "ysql_dump", "ysql_dumpall") if self.args.ts_web_hosts_ports: logging.info('TS Web hosts/ports: {}'.format(self.args.ts_web_hosts_ports)) for host_port in self.args.ts_web_hosts_ports.split(','): (host, port) = host_port.split(':') self.ts_cfgs.setdefault(host, YBTSConfig(self)).set_web_port(port) if self.per_region_backup(): if len(self.args.region) != len(self.args.region_location): raise BackupException( "Found {} --region keys and {} --region_location keys. Number of these keys " "must be equal.".format(len(self.args.region), len(self.args.region_location))) for i in range(len(self.args.region)): self.region_to_location[self.args.region[i]] = self.args.region_location[i] def table_names_str(self, delimeter='.', space=' '): return get_table_names_str(self.args.keyspace, self.args.table, delimeter, space) def per_region_backup(self): return self.args.region_location is not None @staticmethod def get_snapshot_location(location, bucket): if bucket is None: return location else: return os.path.join(location, bucket) def snapshot_location(self, bucket, region=None): if region is not None: if not self.per_region_backup(): raise BackupException( "Requested region location for non-geo-partitioned backup. Region: {}. " "Locations: {}.".format(region, self.args.region_location)) if region in self.region_to_location: return self.get_snapshot_location(self.region_to_location[region], bucket) # Default location. return self.get_snapshot_location(self.args.backup_location, bucket) def is_s3(self): return self.args.storage_type == S3BackupStorage.storage_type() def is_gcs(self): return self.args.storage_type == GcsBackupStorage.storage_type() def is_az(self): return self.args.storage_type == AzBackupStorage.storage_type() def is_nfs(self): return self.args.storage_type == NfsBackupStorage.storage_type() def is_k8s(self): return self.args.k8s_config is not None def is_cloud(self): return self.args.storage_type != NfsBackupStorage.storage_type() def has_cfg_file(self): return self.args.storage_type in [ GcsBackupStorage.storage_type(), S3BackupStorage.storage_type()] def is_ysql_keyspace(self): return self.args.keyspace and keyspace_type(self.args.keyspace[0]) == 'ysql' def needs_change_user(self): return self.args.ssh_user != self.args.remote_user def get_main_host_ip(self): if self.is_k8s(): return self.get_live_tserver_ip() else: return self.get_leader_master_ip() def get_leader_master_ip(self): if not self.leader_master_ip: all_masters = self.args.masters.split(",") # Use first Master's ip in list to get list of all masters. self.leader_master_ip = all_masters[0].split(':')[0] # Get LEADER ip, if it's ALIVE, else any alive master ip. output = self.run_yb_admin(['list_all_masters']) for line in output.splitlines(): if LEADING_UUID_RE.match(line): fields = split_by_tab(line) (ip_port, state, role) = (fields[1], fields[2], fields[3]) if state == 'ALIVE': (ip, port) = ip_port.split(':') if self.secondary_to_primary_ip_map: ip = self.secondary_to_primary_ip_map[ip] alive_master_ip = ip if role == 'LEADER': break self.leader_master_ip = alive_master_ip return self.leader_master_ip def get_live_tservers(self): tserver_ips = [] output = self.run_yb_admin(['list_all_tablet_servers']) for line in output.splitlines(): if LEADING_UUID_RE.match(line): fields = split_by_space(line) (ip_port, state) = (fields[1], fields[3]) if state == 'ALIVE': (ip, port) = ip_port.split(':') if self.secondary_to_primary_ip_map: ip = self.secondary_to_primary_ip_map[ip] tserver_ips.append(ip) return tserver_ips def get_live_tserver_ip(self): if not self.live_tserver_ip: alive_ts_ips = self.get_live_tservers() if alive_ts_ips: leader_master = self.get_leader_master_ip() master_hosts = {hp.split(':')[0] for hp in self.args.masters.split(",")} # Exclude the Master Leader because the host has maximum network pressure. # Let's try to use a follower Master node instead. master_hosts.discard(leader_master) selected_ts_ips = master_hosts.intersection(alive_ts_ips) if not selected_ts_ips: # Try the Master Leader if all Master followers are excluded. selected_ts_ips = {leader_master}.intersection(alive_ts_ips) # For rebalancing the user usually adds/removes a TS node. That's why we prefer # to use a Master node to prevent selecting of a just ADDED OR a just REMOVED TS. # Return the first alive TS if the IP is in the list of Master IPs. # Else return just the first alive TS. self.live_tserver_ip =\ selected_ts_ips.pop() if selected_ts_ips else alive_ts_ips[0] if self.args.verbose: logging.info("Selecting alive TS {} from {}".format( self.live_tserver_ip, alive_ts_ips)) else: raise BackupException("Cannot get alive TS: {}".format(alive_ts_ips)) if not self.live_tserver_ip: raise BackupException("No alive TS: {}".format(self.live_tserver_ip)) return self.live_tserver_ip def get_ysql_ip(self): if not self.ysql_ip: output = "" if self.args.ysql_enable_auth: # Note that this requires YSQL commands to be run on the master leader. # In case of k8s, we get live tserver, since master pod does not have # pgsql unix socket. socket_fds = self.run_ssh_cmd( "ls /tmp/.yb.*/.s.PGSQL.*", self.get_main_host_ip()).strip().split() if len(socket_fds): self.ysql_ip = os.path.dirname(socket_fds[0]) else: output = "Failed to find local socket." elif self.args.ysql_host: self.ysql_ip = self.args.ysql_host else: # Get first ALIVE TS. self.ysql_ip = self.get_live_tserver_ip() if not self.ysql_ip: raise BackupException("Cannot get alive TS:\n{}".format(output)) return self.ysql_ip def run_tool(self, local_tool, remote_tool, std_args, cmd_line_args, run_ip=None, env_vars={}): """ Runs the utility from the configured location. :param cmd_line_args: command-line arguments to the tool :return: the standard output of the tool """ # Use local tool if it's specified. if local_tool: if not os.path.exists(local_tool): raise BackupException("Tool binary not found at {}".format(local_tool)) return self.run_program([local_tool] + std_args + cmd_line_args, env=env_vars, num_retry=10) else: if run_ip: run_at_location = run_ip else: run_at_location = self.get_leader_master_ip() # Using remote tool binary on leader master server. return self.run_ssh_cmd( [remote_tool] + std_args + cmd_line_args, run_at_location, num_ssh_retry=10, env_vars=env_vars) def get_master_addresses_for_servers(self): def get_key(val): for key, value in self.secondary_to_primary_ip_map.items(): if val == value: return key master_addresses = self.args.masters if self.args.local_yb_admin_binary: # We are using the local yb-admin, so we should use the management addresses return master_addresses if self.secondary_to_primary_ip_map: master_list_for_servers = list() master_address_list = master_addresses.split(',') for master in master_address_list: master_host, master_port = master.split(":") master_for_server = get_key(master_host) master_for_server = master_for_server + ":" + master_port master_list_for_servers.append(master_for_server) master_addresses = ','.join(master_list_for_servers) return master_addresses def run_yb_admin(self, cmd_line_args, run_ip=None): """ Runs the yb-admin utility from the configured location. :param cmd_line_args: command-line arguments to yb-admin :return: the standard output of yb-admin """ # Convert to list, since some callers like SequencedParallelCmd will send in tuples. cmd_line_args = list(cmd_line_args) # Specify cert file in case TLS is enabled. cert_flag = [] if self.args.certs_dir: cert_flag = ["--certs_dir_name", self.args.certs_dir] cmd_line_args = cert_flag + cmd_line_args master_addresses = self.get_master_addresses_for_servers() try: return self.run_tool(self.args.local_yb_admin_binary, self.args.remote_yb_admin_binary, ['--master_addresses', master_addresses], cmd_line_args, run_ip=run_ip) except Exception as ex: if "Invalid operation" in str(ex.output.decode('utf-8')): raise YbAdminOpNotSupportedException("yb-admin does not support command " "{}".format(cmd_line_args)) raise ex def get_ysql_dump_std_args(self): args = ['--host=' + self.get_ysql_ip()] if self.args.ysql_port: args += ['--port=' + self.args.ysql_port] return args def run_cli_tool(self, cli_tool_with_args): """ Runs a command line tool. :param cli_tool_with_args: command-line tool with arguments as a single string :return: the standard output of the tool """ run_at_ip = self.get_live_tserver_ip() if self.is_k8s() else None return self.run_tool(None, cli_tool_with_args, [], [], run_ip=run_at_ip) def run_dump_tool(self, local_tool_binary, remote_tool_binary, cmd_line_args): """ Runs the ysql_dump/ysql_dumpall utility from the configured location. :param cmd_line_args: command-line arguments to the tool :return: the standard output of the tool """ certs_env = {} if self.args.certs_dir: certs_env = { 'FLAGS_certs_dir': self.args.certs_dir, 'FLAGS_use_node_to_node_encryption': 'true', 'FLAGS_use_node_hostname_for_local_tserver': 'true', } run_at_ip = self.get_live_tserver_ip() if self.is_k8s() else None # If --ysql_enable_auth is passed, connect with ysql through the remote socket. local_binary = None if self.args.ysql_enable_auth else local_tool_binary master_addresses = self.get_master_addresses_for_servers() return self.run_tool(local_binary, remote_tool_binary, # Latest tools do not need '--masters', but keep it for backward # compatibility with older YB releases. self.get_ysql_dump_std_args() + ['--masters=' + master_addresses], cmd_line_args, run_ip=run_at_ip, env_vars=certs_env) def run_ysql_dump(self, cmd_line_args): return self.run_dump_tool(self.args.local_ysql_dump_binary, self.args.remote_ysql_dump_binary, cmd_line_args) def run_ysql_dumpall(self, cmd_line_args): return self.run_dump_tool(self.args.local_ysql_dumpall_binary, self.args.remote_ysql_dumpall_binary, cmd_line_args) def run_ysql_shell(self, cmd_line_args): """ Runs the ysql shell utility from the configured location. :param cmd_line_args: command-line arguments to ysql shell :return: the standard output of ysql shell """ run_at_ip = None if self.is_k8s(): run_at_ip = self.get_live_tserver_ip() return self.run_tool( self.args.local_ysql_shell_binary, self.args.remote_ysql_shell_binary, # Passing dbname template1 explicitly as ysqlsh fails to connect if # yugabyte database is deleted. We assume template1 will always be there # in ysqlsh. self.get_ysql_dump_std_args() + ['--dbname=template1'], cmd_line_args, run_ip=run_at_ip) def calc_size_in_bytes(self, snapshot_bucket): """ Fetches the backup object size by making a call to respective data source. :param snapshot_bucket: the bucket directory under which data directories were uploaded :return: backup size in bytes """ snapshot_filepath = self.snapshot_location(snapshot_bucket) backup_size = 0 backup_size_cmd = self.storage.backup_obj_size_cmd(snapshot_filepath) try: resp = self.run_ssh_cmd(backup_size_cmd, self.get_main_host_ip()) backup_size = int(resp.strip().split()[0]) logging.info('Backup size in bytes: {}'.format(backup_size)) except Exception as ex: logging.error( 'Failed to get backup size, cmd: {}, exception: {}'.format(backup_size_cmd, ex)) return backup_size def create_snapshot(self): """ Creates a new snapshot of the configured table. :return: snapshot id """ if self.args.table: yb_admin_args = ['create_snapshot'] + self.table_names_str(' ').split(' ') elif self.is_ysql_keyspace(): yb_admin_args = ['create_database_snapshot', self.args.keyspace[0]] else: yb_admin_args = ['create_keyspace_snapshot', self.args.keyspace[0]] output = self.run_yb_admin(yb_admin_args) # Ignores any string before and after the creation string + uuid. # \S\s matches every character including newlines. matched = STARTED_SNAPSHOT_CREATION_RE.match(output) if not matched: raise BackupException( "Couldn't parse create snapshot output! Expected " "'Started snapshot creation: <id>' in the end: {}".format(output)) snapshot_id = matched.group('uuid') if not UUID_ONLY_RE.match(snapshot_id): raise BackupException("Did not get a valid snapshot id out of yb-admin output:\n" + output) return snapshot_id def wait_for_snapshot(self, snapshot_id, op, timeout_sec, update_table_list, complete_state='COMPLETE'): """ Waits for the given snapshot to finish being created or restored. """ start_time = time.time() snapshot_done = False snapshot_tables = [] snapshot_keyspaces = [] snapshot_table_uuids = [] failed_state = 'FAILED' yb_admin_args = ['list_snapshots'] if update_table_list: yb_admin_args += ['SHOW_DETAILS'] while time.time() - start_time < timeout_sec and not snapshot_done: output = self.run_yb_admin(yb_admin_args) # Expected format: # Snapshot UUID State # 0436035d-c4c5-40c6-b45b-19538849b0d9 COMPLETE # {"type":"NAMESPACE","id":"e4c5591446db417f83a52c679de03118","data":{"name":"a",...}} # {"type":"TABLE","id":"d9603c2cab0b48ec807936496ac0e70e","data":{"name":"t2",...}} # {"type":"TABLE","id":"28b5cebe9b0c4cdaa70ce9ceab31b1e5","data":{\ # "name":"t2idx","indexed_table_id":"d9603c2cab0b48ec807936496ac0e70e",...}} # c1ad61bf-a42b-4bbb-94f9-28516985c2c5 COMPLETE # ... keyspaces = {} for line in output.splitlines(): if not snapshot_done: if line.find(snapshot_id) == 0: snapshot_data = line.split() found_snapshot_id = snapshot_data[0] state = snapshot_data[1] if found_snapshot_id == snapshot_id: if state == complete_state: snapshot_done = True if not update_table_list: break elif state == failed_state: raise BackupException( 'Snapshot id %s, %s failed!' % (snapshot_id, op)) elif update_table_list: if line[0] != ' ': break loaded_json = json.loads(line) object_type = loaded_json['type'] object_id = loaded_json['id'] data = loaded_json['data'] if object_type == 'NAMESPACE' and object_id not in keyspaces: keyspace_prefix = 'ysql.' \ if data['database_type'] == 'YQL_DATABASE_PGSQL' else '' keyspaces[object_id] = keyspace_prefix + data['name'] elif object_type == 'TABLE': snapshot_keyspaces.append(keyspaces[data['namespace_id']]) snapshot_tables.append(data['name']) snapshot_table_uuids.append(object_id) if not snapshot_done: logging.info('Waiting for snapshot %s to complete...' % (op)) time.sleep(5) if not snapshot_done: raise BackupException('Timed out waiting for snapshot!') if update_table_list: if len(snapshot_tables) == 0: raise CompatibilityException("Created snapshot does not have tables.") if len(snapshot_keyspaces) != len(snapshot_tables): raise BackupException( "In the snapshot found {} keyspaces and {} tables. The numbers must be equal.". format(len(snapshot_keyspaces), len(snapshot_tables))) self.args.keyspace = snapshot_keyspaces self.args.table = snapshot_tables self.args.table_uuid = snapshot_table_uuids logging.info('Updated list of processing tables: ' + self.table_names_str()) logging.info('Snapshot id %s %s completed successfully' % (snapshot_id, op)) def find_tablet_leaders(self): """ Lists all tablets and their leaders for the table of interest. :return: a list of (tablet id, leader host, leader host region) tuples """ assert self.args.table num_loops = 0 while num_loops < LEADERS_SEARCHING_LOOP_MAX_RETRIES: logging.info('[app] Start searching for tablet leaders (try {})'.format(num_loops)) num_loops += 1 found_bad_ts = False tablet_leaders = [] for i in range(0, len(self.args.table)): # Don't call list_tablets on a parent colocated table. if is_parent_colocated_table_name(self.args.table[i]): continue if self.args.table_uuid: yb_admin_args = ['list_tablets', 'tableid.' + self.args.table_uuid[i], '0'] else: yb_admin_args = ['list_tablets', self.args.keyspace[i], self.args.table[i], '0'] output = self.run_yb_admin(yb_admin_args) for line in output.splitlines(): if LEADING_UUID_RE.match(line): fields = split_by_tab(line) (tablet_id, tablet_leader_host_port) = (fields[0], fields[2]) (ts_host, ts_port) = tablet_leader_host_port.split(":") if self.secondary_to_primary_ip_map: ts_host = self.secondary_to_primary_ip_map[ts_host] need_region = self.per_region_backup() ts_config = self.ts_cfgs.setdefault(ts_host, YBTSConfig(self)) load_cfg = not ts_config.has_data_dirs() or\ (need_region and not ts_config.has_region()) if load_cfg: try: ts_config.load(ts_host, need_region) except Exception as ex: found_bad_ts = True logging.warning("Error in TS {} config loading. Retry tablet " "leaders searching. Error: {}".format( ts_host, str(ex))) break if need_region: region = ts_config.region() # Show the warning only once after this TS config loading. if load_cfg and region not in self.region_to_location: logging.warning("[app] Cannot find tablet {} location for region " "{}. Using default location instead.".format( tablet_id, region)) else: region = None tablet_leaders.append((tablet_id, ts_host, region)) if found_bad_ts: break if not found_bad_ts: return tablet_leaders logging.info("Sleep for {} seconds before the next tablet leaders searching round.". format(SLEEP_IN_LEADERS_SEARCHING_ROUND_SEC)) time.sleep(SLEEP_IN_LEADERS_SEARCHING_ROUND_SEC) raise BackupException( "Exceeded max number of retries for the tablet leaders searching loop ({})!". format(LEADERS_SEARCHING_LOOP_MAX_RETRIES)) def create_remote_tmp_dir(self, server_ip): if self.args.verbose: logging.info("Creating {} on server {}".format(self.get_tmp_dir(), server_ip)) atexit.register(self.cleanup_remote_temporary_directory, server_ip, self.get_tmp_dir()) return self.run_ssh_cmd(['mkdir', '-p', self.get_tmp_dir()], server_ip, upload_cloud_cfg=False) def upload_local_file_to_server(self, server_ip, local_file_path, dest_dir): if self.args.verbose: logging.info("Uploading {} to {} on {}".format(local_file_path, dest_dir, server_ip)) if dest_dir == self.get_tmp_dir(): output = self.create_remote_tmp_dir(server_ip) else: if self.args.verbose: logging.info("Creating {} on server {}".format(dest_dir, server_ip)) output = self.run_ssh_cmd(['mkdir', '-p', dest_dir], server_ip, upload_cloud_cfg=False) output += self.upload_file_from_local(server_ip, local_file_path, dest_dir) if self.args.verbose: logging.info("Uploading {} to {} on {} done: {}".format( local_file_path, dest_dir, server_ip, output)) return output def upload_cloud_config(self, server_ip): if server_ip not in self.server_ips_with_uploaded_cloud_cfg: self.server_ips_with_uploaded_cloud_cfg[server_ip] = self.upload_local_file_to_server( server_ip, self.cloud_cfg_file_path, self.get_tmp_dir()) def upload_file_from_local(self, dest_ip, src, dest): output = '' if self.is_k8s(): k8s_details = KubernetesDetails(dest_ip, self.k8s_pod_fqdn_to_cfg) output += self.run_program([ 'kubectl', 'cp', src, '{}/{}:{}'.format( k8s_details.namespace, k8s_details.pod_name, dest), '-c', k8s_details.container, '--no-preserve=true' ], env=k8s_details.env_config, num_retry=LOCAL_FILE_MAX_RETRIES) elif not self.args.no_ssh: ssh_key_path = self.args.ssh_key_path if self.ip_to_ssh_key_map: ssh_key_path = self.ip_to_ssh_key_map.get(dest_ip, ssh_key_path) if self.needs_change_user(): # TODO: Currently ssh_wrapper_with_sudo.sh will only change users to yugabyte, # not args.remote_user. ssh_wrapper_path = os.path.join(SCRIPT_DIR, 'ssh_wrapper_with_sudo.sh') output += self.run_program( ['scp', '-S', ssh_wrapper_path, '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null', '-i', ssh_key_path, '-P', self.args.ssh_port, '-q', src, '%s@%s:%s' % (self.args.ssh_user, dest_ip, dest)], num_retry=LOCAL_FILE_MAX_RETRIES) else: output += self.run_program( ['scp', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null', '-i', ssh_key_path, '-P', self.args.ssh_port, '-q', src, '%s@%s:%s' % (self.args.ssh_user, dest_ip, dest)], num_retry=LOCAL_FILE_MAX_RETRIES) return output def download_file_to_local(self, src_ip, src, dest): output = '' if self.is_k8s(): k8s_details = KubernetesDetails(src_ip, self.k8s_pod_fqdn_to_cfg) output += self.run_program([ 'kubectl', 'cp', '{}/{}:{}'.format( k8s_details.namespace, k8s_details.pod_name, src), dest, '-c', k8s_details.container, '--no-preserve=true' ], env=k8s_details.env_config, num_retry=LOCAL_FILE_MAX_RETRIES) elif not self.args.no_ssh: ssh_key_path = self.args.ssh_key_path if self.ip_to_ssh_key_map: ssh_key_path = self.ip_to_ssh_key_map.get(src_ip, ssh_key_path) if self.needs_change_user(): # TODO: Currently ssh_wrapper_with_sudo.sh will only change users to yugabyte, # not args.remote_user. ssh_wrapper_path = os.path.join(SCRIPT_DIR, 'ssh_wrapper_with_sudo.sh') output += self.run_program( ['scp', '-S', ssh_wrapper_path, '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null', '-i', ssh_key_path, '-P', self.args.ssh_port, '-q', '%s@%s:%s' % (self.args.ssh_user, src_ip, src), dest], num_retry=LOCAL_FILE_MAX_RETRIES) else: output += self.run_program( ['scp', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null', '-i', ssh_key_path, '-P', self.args.ssh_port, '-q', '%s@%s:%s' % (self.args.ssh_user, src_ip, src), dest], num_retry=LOCAL_FILE_MAX_RETRIES) return output def run_ssh_cmd(self, cmd, server_ip, upload_cloud_cfg=True, num_ssh_retry=3, env_vars={}): """ Runs the given command on the given remote server over SSH. :param cmd: either a string, or a list of arguments. In the latter case, each argument is properly escaped before being passed to ssh. :param server_ip: IP address or host name of the server to SSH into. :return: the standard output of the SSH command """ if upload_cloud_cfg and self.has_cfg_file(): self.upload_cloud_config(server_ip) if self.args.verbose: logging.info("Running command {} on server {}".format(cmd, server_ip)) if not isinstance(cmd, str): if len(cmd) == 1: cmd = cmd[0] else: cmd = quote_cmd_line_for_bash(cmd) num_retries = CLOUD_CMD_MAX_RETRIES if self.is_cloud() else num_ssh_retry if env_vars: # Add env vars to the front of the cmd shell-style like "FOO=bar ls -l" bash_env_args = " ".join(["{}={}".format(env_name, pipes.quote(env_val)) for (env_name, env_val) in env_vars.items()]) cmd = "{} {}".format(bash_env_args, cmd) if self.is_k8s(): k8s_details = KubernetesDetails(server_ip, self.k8s_pod_fqdn_to_cfg) return self.run_program([ 'kubectl', 'exec', '-t', '-n={}'.format(k8s_details.namespace), # For k8s, pick the first qualified name, if given a CNAME. k8s_details.pod_name, '-c', k8s_details.container, '--', 'bash', '-c', cmd], num_retry=num_retries, env=k8s_details.env_config) elif not self.args.no_ssh: ssh_key_path = self.args.ssh_key_path if self.ip_to_ssh_key_map: ssh_key_path = self.ip_to_ssh_key_map.get(server_ip, ssh_key_path) change_user_cmd = 'sudo -u %s' % (self.args.remote_user) \ if self.needs_change_user() else '' return self.run_program([ 'ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null', # Control flags here are for ssh multiplexing (reuse the same ssh connections). '-o', 'ControlMaster=auto', '-o', 'ControlPath=~/.ssh/ssh-%r@%h:%p', '-o', 'ControlPersist=1m', '-i', ssh_key_path, '-p', self.args.ssh_port, '-q', '%s@%s' % (self.args.ssh_user, server_ip), 'cd / && %s bash -c ' % (change_user_cmd) + pipes.quote(cmd)], num_retry=num_retries) else: return self.run_program(['bash', '-c', cmd]) def join_ssh_cmds(self, list_of_arg_tuples, handle): (tablet_id, tserver_ip) = handle # A list of commands: execute all of the tuples in a single control connection. joined_cmd = 'set -ex;' # Exit as soon as one command fails. for args_tuple in list_of_arg_tuples: assert isinstance(args_tuple, tuple) for args in args_tuple: if (isinstance(args, tuple)): joined_cmd += ' '.join(args) else: joined_cmd += ' ' + args joined_cmd += ';' # Return a single arg tuple with the entire joined command. # Convert to string to handle python2 converting to 'unicode' by default. return [(str(joined_cmd), tserver_ip)] def find_data_dirs(self, tserver_ip): """ Finds the data directories on the given tserver. This queries the /varz endpoint of tserver and extracts --fs_data_dirs flag from response. :param tserver_ip: tablet server ip :return: a list of top-level YB data directories """ ts_config = self.ts_cfgs.setdefault(tserver_ip, YBTSConfig(self)) if not ts_config.has_data_dirs(): try: ts_config.load(tserver_ip) except Exception as ex: logging.warning("Error in TS {} config loading. Skip TS in this " "downloading round. Error: {}".format(tserver_ip, str(ex))) return None return ts_config.data_dirs() def generate_snapshot_dirs(self, data_dir_by_tserver, snapshot_id, tablets_by_tserver_ip, table_ids): """ Generate snapshot directories under the given data directory for the given snapshot id on the given tservers. :param data_dir_by_tserver: data directory on tservers :param snapshot_id: snapshot UUID :param tablets_by_tserver_ip: a map from tserver ip address to all tablets of our table that it is responsible for. :param table_ids: new table UUIDs for all tables :return: a three-level map: tablet server ip address to a tablet id to all snapshot directories for that tablet id that we found. """ tserver_ip_to_tablet_id_to_snapshot_dirs = {} deleted_tablets_by_tserver_ip = {} tserver_ip_to_tablet_dirs = {} for tserver_ip in tablets_by_tserver_ip: tserver_ip_to_tablet_dirs.setdefault(tserver_ip, []) for table_id in table_ids: for tserver_ip in tablets_by_tserver_ip: data_dirs = data_dir_by_tserver[tserver_ip] # In case of TS config loading error 'data_dirs' is None. if data_dirs is None: logging.warning("No data directories on tablet " "server '{}'.".format(tserver_ip)) continue tablet_dirs = tserver_ip_to_tablet_dirs[tserver_ip] for data_dir in data_dirs: # Find all tablets for this table on this TS in this data_dir: output = self.run_ssh_cmd( ['find', data_dir, '!', '-readable', '-prune', '-o', '-name', TABLET_MASK, '-and', '-wholename', TABLET_DIR_GLOB.format(table_id), '-print'], tserver_ip) tablet_dirs += [line.strip() for line in output.split("\n") if line.strip()] if self.args.verbose: msg = "Found tablet directories for table '{}' on tablet server '{}': {}" logging.info(msg.format(table_id, tserver_ip, tablet_dirs)) if not tablet_dirs: logging.warning("No tablet directory found for table '{}' on " "tablet server '{}'.".format(table_id, tserver_ip)) for tserver_ip in tablets_by_tserver_ip: tablets = tablets_by_tserver_ip[tserver_ip] tablet_dirs = tserver_ip_to_tablet_dirs[tserver_ip] tablet_id_to_snapshot_dirs = \ tserver_ip_to_tablet_id_to_snapshot_dirs.setdefault(tserver_ip, {}) deleted_tablets = deleted_tablets_by_tserver_ip.setdefault(tserver_ip, set()) tablet_dir_by_id = {} for tablet_dir in tablet_dirs: tablet_dir_by_id[tablet_dir[-TABLET_UUID_LEN:]] = tablet_dir for tablet_id in tablets: if tablet_id in tablet_dir_by_id: # Tablet was found in a data dir - use this path. snapshot_dir = tablet_dir_by_id[tablet_id] + '.snapshots/' + snapshot_id tablet_id_to_snapshot_dirs.setdefault(tablet_id, set()).add(snapshot_dir) else: # Tablet was not found. That means that the tablet was deleted from this TS. # Let's ignore the tablet and allow retry-loop to find and process new # tablet location on the next downloading round. # Second case: the TS config is not available (so data directories are not # known). Retry TS config downloading on the next downloading round. deleted_tablets.add(tablet_id) if self.args.verbose: logging.info("Tablet '{}' directory was not found on " "tablet server '{}'.".format(tablet_id, tserver_ip)) if self.args.verbose: logging.info("Downloading list for tablet server '{}': {}".format( tserver_ip, tablet_id_to_snapshot_dirs)) if deleted_tablets: logging.info("No snapshot directories generated on tablet server '{}' " "for tablet ids: '{}'".format(tserver_ip, deleted_tablets)) return (tserver_ip_to_tablet_id_to_snapshot_dirs, deleted_tablets_by_tserver_ip) def find_snapshot_directories(self, data_dir, snapshot_id, tserver_ip): """ Find snapshot directories under the given data directory for the given snapshot id on the given tserver. :param data_dir: top-level data directory :param snapshot_id: snapshot UUID :param tserver_ip: tablet server IP or host name :return: a list of absolute paths of remote snapshot directories for the given snapshot """ output = self.run_ssh_cmd( ['find', data_dir, '!', '-readable', '-prune', '-o', '-name', snapshot_id, '-and', '-wholename', SNAPSHOT_DIR_GLOB, '-print'], tserver_ip) return [line.strip() for line in output.split("\n") if line.strip()] def upload_snapshot_directories(self, tablet_leaders, snapshot_id, snapshot_bucket): """ Uploads snapshot directories from all tablet servers hosting our table to subdirectories of the given target backup directory. :param tablet_leaders: a list of (tablet_id, tserver_ip, tserver_region) tuples :param snapshot_id: self-explanatory :param snapshot_bucket: the bucket directory under which to upload the data directories """ with terminating(ThreadPool(self.args.parallelism)) as pool: self.pools.append(pool) tablets_by_leader_ip = {} location_by_tablet = {} for (tablet_id, leader_ip, tserver_region) in tablet_leaders: tablets_by_leader_ip.setdefault(leader_ip, set()).add(tablet_id) location_by_tablet[tablet_id] = self.snapshot_location(snapshot_bucket, tserver_region) tserver_ips = sorted(tablets_by_leader_ip.keys()) data_dir_by_tserver = {} for tserver_ip in tserver_ips: data_dir_by_tserver[tserver_ip] = copy.deepcopy( # Data dirs must be loaded by the moment in find_tablet_leaders(). self.ts_cfgs[tserver_ip].data_dirs()) # Upload config to every TS here to prevent parallel uploading of the config # in 'find_snapshot_directories' below. if self.has_cfg_file(): SingleArgParallelCmd(self.upload_cloud_config, tserver_ips).run(pool) parallel_find_snapshots = MultiArgParallelCmd(self.find_snapshot_directories) tservers_processed = [] while len(tserver_ips) > len(tservers_processed): for tserver_ip in list(tserver_ips): if tserver_ip not in tservers_processed: data_dirs = data_dir_by_tserver[tserver_ip] if len(data_dirs) > 0: data_dir = data_dirs[0] parallel_find_snapshots.add_args(data_dir, snapshot_id, tserver_ip) data_dirs.remove(data_dir) if len(data_dirs) == 0: tservers_processed += [tserver_ip] else: tservers_processed += [tserver_ip] find_snapshot_dir_results = parallel_find_snapshots.run(pool) leader_ip_to_tablet_id_to_snapshot_dirs = self.rearrange_snapshot_dirs( find_snapshot_dir_results, snapshot_id, tablets_by_leader_ip) if (self.args.TEST_sleep_after_find_snapshot_dirs): logging.info("Sleeping to allow a tablet split to take place and delete snapshot " "dirs.") time.sleep(TEST_SLEEP_AFTER_FIND_SNAPSHOT_DIRS_SEC) parallel_uploads = SequencedParallelCmd( self.run_ssh_cmd, preprocess_args_fn=self.join_ssh_cmds) self.prepare_cloud_ssh_cmds( parallel_uploads, leader_ip_to_tablet_id_to_snapshot_dirs, location_by_tablet, snapshot_id, tablets_by_leader_ip, upload=True, snapshot_metadata=None) # Run a sequence of steps for each tablet, handling different tablets in parallel. parallel_uploads.run(pool) def rearrange_snapshot_dirs( self, find_snapshot_dir_results, snapshot_id, tablets_by_tserver_ip): """ :param find_snapshot_dir_results: a map from (data_dir, snapshot_id, tserver_ip) tuples to the list of snapshot directories under that data directory on that tserver. (snapshot_id here is always the single snapshot_id we're dealing with.) :param snapshot_id: the snapshot id! :param tablets_by_tserver_ip: a map from tserver ip address to all tablets of our table that it is responsible for. :return: a three-level map: tablet server ip address to a tablet id to all snapshot directories for that tablet id that we found. """ tserver_ip_to_tablet_id_to_snapshot_dirs = {} for key in find_snapshot_dir_results: (data_dir, snapshot_id_unused, tserver_ip) = key snapshot_dirs = find_snapshot_dir_results[key] assert snapshot_id_unused == snapshot_id tablet_id_to_snapshot_dirs =\ tserver_ip_to_tablet_id_to_snapshot_dirs.setdefault(tserver_ip, {}) for snapshot_dir in snapshot_dirs: suffix_match = SNAPSHOT_DIR_SUFFIX_RE.match(snapshot_dir) if not suffix_match: logging.warning( ("Could not parse tablet id and snapshot id out of snapshot " "directory: '{}'").format(snapshot_dir)) continue if snapshot_id != suffix_match.group(2): raise BackupException( "Snapshot directory does not end with snapshot id: '{}'".format( snapshot_dir)) tablet_id = suffix_match.group(1) # During CREATE BACKUP only the LEADER tablet replicas are needed. # So, ignore the following warning for FOLLOWERS. It's expected because # FOLLOWERS replicas are not in the 'tablets_by_tserver_ip' list # (the list 'tablets_by_tserver_ip' contains only the LEADER replicas). if tablet_id not in tablets_by_tserver_ip[tserver_ip]: logging.warning( ("Found a snapshot directory '{}' on tablet server '{}' that is not " "present in the list of tablets we are interested in that have this " "tserver hosting it ({}), skipping.").format( snapshot_dir, tserver_ip, ", ".join(sorted(tablets_by_tserver_ip[tserver_ip])))) continue tablet_id_to_snapshot_dirs.setdefault(tablet_id, set()).add(snapshot_dir) return tserver_ip_to_tablet_id_to_snapshot_dirs def create_checksum_cmd_not_quoted(self, file_path, checksum_file_path): prefix = pipes.quote(SHA_TOOL_PATH) if not self.args.mac else '/usr/bin/shasum' return "{} {} > {}".format(prefix, file_path, checksum_file_path) def create_checksum_cmd(self, file_path, checksum_file_path): return self.create_checksum_cmd_not_quoted( pipes.quote(file_path), pipes.quote(checksum_file_path)) def create_checksum_cmd_for_dir(self, dir_path): return self.create_checksum_cmd_not_quoted( os.path.join(pipes.quote(strip_dir(dir_path)), '[!i]*'), pipes.quote(checksum_path(strip_dir(dir_path)))) def prepare_upload_command(self, parallel_commands, snapshot_filepath, tablet_id, tserver_ip, snapshot_dir): """ Prepares the command to upload the backup files to backup location from the tservers. :param parallel_commands: result parallel commands to run. :param snapshot_filepath: Filepath/cloud url where the backup must be stored. :param tablet_id: tablet_id for the tablet whose data we would like to upload. :param tserver_ip: tserver ip from which the data needs to be uploaded. :param snapshot_dir: The snapshot directory on the tserver from which we need to upload. """ target_tablet_filepath = os.path.join(snapshot_filepath, 'tablet-%s' % (tablet_id)) if not self.args.disable_checksums: logging.info('Creating check-sum for %s on tablet server %s' % ( snapshot_dir, tserver_ip)) create_checksum_cmd = self.create_checksum_cmd_for_dir(snapshot_dir) target_checksum_filepath = checksum_path(target_tablet_filepath) snapshot_dir_checksum = checksum_path(strip_dir(snapshot_dir)) logging.info('Uploading %s from tablet server %s to %s URL %s' % ( snapshot_dir_checksum, tserver_ip, self.args.storage_type, target_checksum_filepath)) upload_checksum_cmd = self.storage.upload_file_cmd( snapshot_dir_checksum, target_checksum_filepath) target_filepath = target_tablet_filepath + '/' logging.info('Uploading %s from tablet server %s to %s URL %s' % ( snapshot_dir, tserver_ip, self.args.storage_type, target_filepath)) upload_tablet_cmd = self.storage.upload_dir_cmd(snapshot_dir, target_filepath) # Commands to be run on TSes over ssh for uploading the tablet backup. if not self.args.disable_checksums: # 1. Create check-sum file (via sha256sum tool). parallel_commands.add_args(create_checksum_cmd) # 2. Upload check-sum file. parallel_commands.add_args(tuple(upload_checksum_cmd)) # 3. Upload tablet folder. parallel_commands.add_args(tuple(upload_tablet_cmd)) def prepare_download_command(self, parallel_commands, tablet_id, tserver_ip, snapshot_dir, snapshot_metadata): """ Prepares the command to download the backup files to the tservers. :param parallel_commands: result parallel commands to run. :param tablet_id: tablet_id for the tablet whose data we would like to download. :param tserver_ip: tserver ip from which the data needs to be downloaded. :param snapshot_dir: The snapshot directory on the tserver to which we need to download. """ if tablet_id not in snapshot_metadata['tablet']: raise BackupException('Could not find metadata for tablet id {}'.format(tablet_id)) old_tablet_id = snapshot_metadata['tablet'][tablet_id] snapshot_filepath = snapshot_metadata['tablet_location'][old_tablet_id] source_filepath = os.path.join(snapshot_filepath, 'tablet-%s/' % (old_tablet_id)) snapshot_dir_tmp = strip_dir(snapshot_dir) + '.tmp/' logging.info('Downloading %s from %s to %s on tablet server %s' % (source_filepath, self.args.storage_type, snapshot_dir_tmp, tserver_ip)) # Download the data to a tmp directory and then move it in place. cmd = self.storage.download_dir_cmd(source_filepath, snapshot_dir_tmp) source_checksum_filepath = checksum_path( os.path.join(snapshot_filepath, 'tablet-%s' % (old_tablet_id))) snapshot_dir_checksum = checksum_path_downloaded(strip_dir(snapshot_dir)) cmd_checksum = self.storage.download_file_cmd( source_checksum_filepath, snapshot_dir_checksum) create_checksum_cmd = self.create_checksum_cmd_for_dir(snapshot_dir_tmp) # Throw an error on failed checksum comparison, this will trigger this entire command # chain to be retried. check_checksum_cmd = compare_checksums_cmd( snapshot_dir_checksum, checksum_path(strip_dir(snapshot_dir_tmp)), error_on_failure=True) rmcmd = ['rm', '-rf', snapshot_dir] mkdircmd = ['mkdir', '-p', snapshot_dir_tmp] mvcmd = ['mv', snapshot_dir_tmp, snapshot_dir] # Commands to be run over ssh for downloading the tablet backup. # 1. Clean-up: delete target tablet folder. parallel_commands.add_args(tuple(rmcmd)) # 2. Create temporary snapshot dir. parallel_commands.add_args(tuple(mkdircmd)) # 3. Download tablet folder. parallel_commands.add_args(tuple(cmd)) if not self.args.disable_checksums: # 4. Download check-sum file. parallel_commands.add_args(tuple(cmd_checksum)) # 5. Create new check-sum file. parallel_commands.add_args(create_checksum_cmd) # 6. Compare check-sum files. parallel_commands.add_args(check_checksum_cmd) # 7. Move the backup in place. parallel_commands.add_args(tuple(mvcmd)) def prepare_cloud_ssh_cmds( self, parallel_commands, tserver_ip_to_tablet_id_to_snapshot_dirs, location_by_tablet, snapshot_id, tablets_by_tserver_ip, upload, snapshot_metadata): """ Prepares cloud_command-over-ssh command lines for uploading the snapshot. :param parallel_commands: result parallel commands to run. :param tserver_ip_to_tablet_id_to_snapshot_dirs: the three-level map as returned by rearrange_snapshot_dirs. :param location_by_tablet: target cloud URL for every tablet to create snapshot directories under :param snapshot_id: the snapshot id we're dealing with :param tablets_by_tserver_ip: a map from tserver ip to all tablet ids that tserver is the responsible for. :param upload: True if we are uploading files to cloud, false if we are downloading files from cloud. :param snapshot_metadata: In case of downloading files from cloud to restore a backup, this is the snapshot metadata stored in cloud for the backup. """ tserver_ip_to_tablet_ids_with_data_dirs = {} for tserver_ip in tserver_ip_to_tablet_id_to_snapshot_dirs: tserver_ip_to_tablet_ids_with_data_dirs.setdefault(tserver_ip, set()) tservers_processed = [] while len(tserver_ip_to_tablet_id_to_snapshot_dirs) > len(tservers_processed): for tserver_ip in list(tserver_ip_to_tablet_id_to_snapshot_dirs): if tserver_ip not in tservers_processed: tablet_id_to_snapshot_dirs =\ tserver_ip_to_tablet_id_to_snapshot_dirs[tserver_ip] tablet_ids_with_data_dirs = tserver_ip_to_tablet_ids_with_data_dirs[tserver_ip] if len(tablet_id_to_snapshot_dirs) > 0: tablet_id = list(tablet_id_to_snapshot_dirs)[0] snapshot_dirs = tablet_id_to_snapshot_dirs[tablet_id] if len(snapshot_dirs) > 1: raise BackupException( ('Found multiple snapshot directories on tserver {} for snapshot ' 'id {}: {}').format(tserver_ip, snapshot_id, snapshot_dirs)) assert len(snapshot_dirs) == 1 snapshot_dir = list(snapshot_dirs)[0] + '/' # Pass in the tablet_id and tserver_ip, so if we fail then we know on which # tserver and for what tablet we failed on. parallel_commands.start_command((tablet_id, tserver_ip)) if upload: self.prepare_upload_command( parallel_commands, location_by_tablet[tablet_id], tablet_id, tserver_ip, snapshot_dir) else: self.prepare_download_command( parallel_commands, tablet_id, tserver_ip, snapshot_dir, snapshot_metadata) tablet_ids_with_data_dirs.add(tablet_id) tablet_id_to_snapshot_dirs.pop(tablet_id) if len(tablet_id_to_snapshot_dirs) == 0: tservers_processed += [tserver_ip] if tablet_ids_with_data_dirs != tablets_by_tserver_ip[tserver_ip]: for possible_tablet_id in tablets_by_tserver_ip[tserver_ip]: if possible_tablet_id not in tablet_ids_with_data_dirs: logging.error( ("No snapshot directory found for tablet id '{}' on " "tablet server '{}'.").format( possible_tablet_id, tserver_ip)) raise BackupException("Did not find snapshot directories for some " + "tablets on tablet server " + tserver_ip) else: tservers_processed += [tserver_ip] def get_tmp_dir(self): if not self.tmp_dir_name: tmp_dir = '/tmp/yb_backup_' + random_string(16) atexit.register(self.cleanup_temporary_directory, tmp_dir) self.run_program(['mkdir', '-p', tmp_dir]) self.tmp_dir_name = tmp_dir return self.tmp_dir_name def upload_encryption_key_file(self): key_file = os.path.basename(self.args.backup_keys_source) key_file_dest = os.path.join("/".join(self.args.backup_location.split("/")[:-1]), key_file) if self.is_nfs(): # Upload keys file from local to NFS mount path on DB node. self.upload_local_file_to_server(self.get_main_host_ip(), self.args.backup_keys_source, os.path.dirname(key_file_dest)) elif self.is_az(): self.run_program(self.storage.upload_file_cmd(self.args.backup_keys_source, key_file_dest, True)) else: self.run_program(self.storage.upload_file_cmd(self.args.backup_keys_source, key_file_dest)) self.run_program(["rm", self.args.backup_keys_source]) def download_file_from_server(self, server_ip, file_path, dest_dir): if self.args.verbose: logging.info("Downloading {} to local dir {} from {}".format( file_path, dest_dir, server_ip)) output = self.download_file_to_local(server_ip, file_path, dest_dir) if self.args.verbose: logging.info("Downloading {} to local dir {} from {} done: {}".format( file_path, dest_dir, server_ip, output)) def download_encryption_key_file(self): key_file = os.path.basename(self.args.restore_keys_destination) key_file_src = os.path.join("/".join(self.args.backup_location.split("/")[:-1]), key_file) if self.is_nfs(): # Download keys file from NFS mount path on DB node to local. self.download_file_from_server(self.get_main_host_ip(), key_file_src, self.args.restore_keys_destination) elif self.is_az(): self.run_program( self.storage.download_file_cmd(key_file_src, self.args.restore_keys_destination, True) ) else: self.run_program( self.storage.download_file_cmd(key_file_src, self.args.restore_keys_destination) ) def delete_bucket_obj(self, backup_path): logging.info("[app] Removing backup directory '{}'".format(backup_path)) del_cmd = self.storage.delete_obj_cmd(backup_path) if self.is_nfs(): self.run_ssh_cmd(' '.join(del_cmd), self.get_leader_master_ip()) else: self.run_program(del_cmd) def find_nfs_storage(self, tserver_ip): """ Finds the NFS storage path mounted on the given tserver. if we don't find storage path mounted on given tserver IP we raise exception :param tserver_ip: tablet server ip """ try: self.run_ssh_cmd(['ls', self.args.nfs_storage_path], tserver_ip) except Exception as ex: raise BackupException( ('Did not find nfs backup storage path: %s mounted on tablet server %s' % (self.args.nfs_storage_path, tserver_ip))) def upload_metadata_and_checksum(self, src_path, dest_path): """ Upload metadata file and checksum file to the target backup location. :param src_path: local metadata file path :param dest_path: destination metadata file path """ src_checksum_path = checksum_path(src_path) dest_checksum_path = checksum_path(dest_path) if self.args.local_yb_admin_binary: if not os.path.exists(src_path): raise BackupException( "Could not find metadata file at '{}'".format(src_path)) if not self.args.disable_checksums: logging.info('Creating check-sum for %s' % (src_path)) self.run_program( self.create_checksum_cmd(src_path, src_checksum_path)) logging.info('Uploading %s to %s' % (src_checksum_path, dest_checksum_path)) self.run_program( self.storage.upload_file_cmd(src_checksum_path, dest_checksum_path)) logging.info('Uploading %s to %s' % (src_path, dest_path)) self.run_program( self.storage.upload_file_cmd(src_path, dest_path)) else: server_ip = self.get_main_host_ip() if not self.args.disable_checksums: logging.info('Creating check-sum for %s on tablet server %s' % ( src_path, server_ip)) self.run_ssh_cmd( self.create_checksum_cmd(src_path, src_checksum_path), server_ip) logging.info('Uploading %s from tablet server %s to %s URL %s' % ( src_checksum_path, server_ip, self.args.storage_type, dest_checksum_path)) self.run_ssh_cmd( self.storage.upload_file_cmd(src_checksum_path, dest_checksum_path), server_ip) logging.info('Uploading %s from tablet server %s to %s URL %s' % ( src_path, server_ip, self.args.storage_type, dest_path)) self.run_ssh_cmd( self.storage.upload_file_cmd(src_path, dest_path), server_ip) def get_ysql_catalog_version(self): """ Get current YSQL Catalog version. :return: YSQL Catalog version """ output = self.run_yb_admin(['ysql_catalog_version']) matched = YSQL_CATALOG_VERSION_RE.match(output) if not matched: raise BackupException( "Couldn't parse ysql_catalog_version output! Expected " "'Version: <number>' in the end: {}".format(output)) return matched.group('version') def create_metadata_files(self): """ :return: snapshot_id and list of sql_dump files """ snapshot_id = None dump_files = [] pg_based_backup = self.args.pg_based_backup if self.is_ysql_keyspace(): sql_tbsp_dump_path = os.path.join( self.get_tmp_dir(), SQL_TBSP_DUMP_FILE_NAME) if self.args.use_tablespaces else None sql_dump_path = os.path.join(self.get_tmp_dir(), SQL_DUMP_FILE_NAME) db_name = keyspace_name(self.args.keyspace[0]) ysql_dump_args = ['--include-yb-metadata', '--serializable-deferrable', '--create', '--schema-only', '--dbname=' + db_name, '--file=' + sql_dump_path] if sql_tbsp_dump_path: logging.info("[app] Creating ysql dump for tablespaces to {}".format( sql_tbsp_dump_path)) self.run_ysql_dumpall(['--tablespaces-only', '--file=' + sql_tbsp_dump_path]) dump_files.append(sql_tbsp_dump_path) else: ysql_dump_args.append('--no-tablespaces') logging.info("[app] Creating ysql dump for DB '{}' to {}".format( db_name, sql_dump_path)) self.run_ysql_dump(ysql_dump_args) dump_files.append(sql_dump_path) if pg_based_backup: sql_data_dump_path = os.path.join(self.get_tmp_dir(), SQL_DATA_DUMP_FILE_NAME) logging.info("[app] Performing ysql_dump based backup!") self.run_ysql_dump( ['--include-yb-metadata', '--serializable-deferrable', '--data-only', '--dbname=' + db_name, '--file=' + sql_data_dump_path]) dump_files.append(sql_data_dump_path) if not self.args.snapshot_id and not pg_based_backup: snapshot_id = self.create_snapshot() logging.info("Snapshot started with id: %s" % snapshot_id) # TODO: Remove the following try-catch for compatibility to un-relax the code, after # we ensure nobody uses versions < v2.1.4 (after all move to >= v2.1.8). try: # With 'update_table_list=True' it runs: 'yb-admin list_snapshots SHOW_DETAILS' # to get updated list of backed up namespaces and tables. Note that the last # argument 'SHOW_DETAILS' is not supported in old YB versions (< v2.1.4). self.wait_for_snapshot(snapshot_id, 'creating', CREATE_SNAPSHOT_TIMEOUT_SEC, update_table_list=True) except CompatibilityException as ex: logging.info("Ignoring the exception in the compatibility mode: {}".format(ex)) # In the compatibility mode repeat the command in old style # (without the new command line argument 'SHOW_DETAILS'). # With 'update_table_list=False' it runs: 'yb-admin list_snapshots'. self.wait_for_snapshot(snapshot_id, 'creating', CREATE_SNAPSHOT_TIMEOUT_SEC, update_table_list=False) if not self.args.no_snapshot_deleting: logging.info("Snapshot %s will be deleted at exit...", snapshot_id) atexit.register(self.delete_created_snapshot, snapshot_id) return (snapshot_id, dump_files) def create_and_upload_metadata_files(self, snapshot_bucket): """ Generates and uploads metadata files describing the given snapshot to the target backup location. :param snapshot_bucket: Backup subfolder under which to create a path :return: snapshot id """ if self.args.snapshot_id: logging.info("Using existing snapshot ID: '{}'".format(self.args.snapshot_id)) snapshot_id = self.args.snapshot_id if self.args.local_yb_admin_binary: self.run_program(['mkdir', '-p', self.get_tmp_dir()]) else: self.create_remote_tmp_dir(self.get_main_host_ip()) is_ysql = self.is_ysql_keyspace() if is_ysql: start_version = self.get_ysql_catalog_version() stored_keyspaces = self.args.keyspace stored_tables = self.args.table stored_table_uuids = self.args.table_uuid num_retry = CREATE_METAFILES_MAX_RETRIES while num_retry > 0: num_retry = num_retry - 1 (snapshot_id, dump_files) = self.create_metadata_files() if is_ysql: final_version = self.get_ysql_catalog_version() logging.info('[app] YSQL catalog versions: {} - {}'.format( start_version, final_version)) if final_version == start_version: break # Ok. No table schema changes during meta data creating. else: # wait_for_snapshot() can update the variables - restore them back. self.args.keyspace = stored_keyspaces self.args.table = stored_tables self.args.table_uuid = stored_table_uuids start_version = final_version logging.info('[app] Retry creating metafiles ({} retries left)'.format( num_retry)) else: break # Ok. No need to retry for YCQL. if num_retry == 0: raise BackupException("Couldn't create metafiles due to catalog changes") snapshot_filepath = self.snapshot_location(snapshot_bucket) if snapshot_id: metadata_path = os.path.join(self.get_tmp_dir(), METADATA_FILE_NAME) logging.info('[app] Exporting snapshot {} to {}'.format(snapshot_id, metadata_path)) self.run_yb_admin(['export_snapshot', snapshot_id, metadata_path], run_ip=self.get_main_host_ip()) self.upload_metadata_and_checksum(metadata_path, os.path.join(snapshot_filepath, METADATA_FILE_NAME)) for file_path in dump_files: self.upload_metadata_and_checksum( file_path, os.path.join(snapshot_filepath, os.path.basename(file_path))) return snapshot_id def create_and_upload_manifest(self, tablet_leaders, snapshot_bucket, pg_based_backup): """ Generates and uploads metadata file describing the backup properties. :param tablet_leaders: a list of (tablet_id, tserver_ip, tserver_region) tuples :param snapshot_bucket: the bucket directory under which to upload the data directories """ self.manifest.init(snapshot_bucket, pg_based_backup) if not pg_based_backup: self.manifest.init_locations(tablet_leaders, snapshot_bucket) # Create Manifest file and upload it to tmp dir on the main host. metadata_path = os.path.join(self.get_tmp_dir(), MANIFEST_FILE_NAME) self.manifest.save_into_file(metadata_path) os.chmod(metadata_path, 0o400) if not self.args.local_yb_admin_binary: self.upload_local_file_to_server( self.get_main_host_ip(), metadata_path, self.get_tmp_dir()) # Upload Manifest and checksum file from the main host to the target backup path. snapshot_filepath = self.snapshot_location(snapshot_bucket) target_filepath = os.path.join(snapshot_filepath, MANIFEST_FILE_NAME) self.upload_metadata_and_checksum(metadata_path, target_filepath) def bg_disable_splitting(self): while (True): logging.info("Disabling splitting for {} milliseconds.".format(DISABLE_SPLITTING_MS)) self.disable_tablet_splitting() time.sleep(DISABLE_SPLITTING_FREQ_SEC) def disable_tablet_splitting(self): self.run_yb_admin(["disable_tablet_splitting", str(DISABLE_SPLITTING_MS), "yb_backup"]) def backup_table(self): """ Creates a backup of the given table by creating a snapshot and uploading it to the provided backup location. """ if not self.args.keyspace: raise BackupException('Need to specify --keyspace') if self.args.table: if self.is_ysql_keyspace(): raise BackupException( "Back up for YSQL is only supported at the database level, " "and not at the table level.") logging.info('[app] Backing up tables: {} to {}'.format( self.table_names_str(), self.args.backup_location)) else: if len(self.args.keyspace) != 1: raise BackupException( "Only one keyspace supported. Found {} --keyspace keys.". format(len(self.args.keyspace))) logging.info('[app] Backing up keyspace: {} to {}'.format( self.args.keyspace[0], self.args.backup_location)) if self.per_region_backup(): logging.info('[app] Geo-partitioned backup for regions: {}'.format(self.args.region)) if self.args.no_auto_name: snapshot_bucket = None else: if self.args.table: snapshot_bucket = 'table-{}'.format(self.table_names_str('.', '-')) else: snapshot_bucket = 'keyspace-{}'.format(self.args.keyspace[0]) if self.args.table_uuid: if len(self.args.table) != len(self.args.table_uuid): raise BackupException( "Found {} --table_uuid keys and {} --table keys. Number of these keys " "must be equal.".format(len(self.args.table_uuid), len(self.args.table))) snapshot_bucket = '{}-{}'.format(snapshot_bucket, '-'.join(self.args.table_uuid)) if not self.args.do_not_disable_splitting: disable_splitting_supported = False try: self.disable_tablet_splitting() disable_splitting_supported = True except YbAdminOpNotSupportedException as ex: # Continue if the disable splitting APIs are not supported, otherwise re-raise and # crash. logging.warning("disable_tablet_splitting operation was not found in yb-admin.") if disable_splitting_supported: disable_splitting_thread = threading.Thread(target=self.bg_disable_splitting) disable_splitting_thread.start() for i in range(IS_SPLITTING_DISABLED_MAX_RETRIES): # Wait for existing splits to complete. output = self.run_yb_admin(["is_tablet_splitting_complete"]) if ("is_tablet_splitting_complete: true" in output): break logging.info("Waiting for existing tablet splits to complete.") time.sleep(5) else: raise BackupException('Splitting did not complete in time.') self.timer.log_new_phase("Create and upload snapshot metadata") snapshot_id = self.create_and_upload_metadata_files(snapshot_bucket) pg_based_backup = snapshot_id is None snapshot_locations = {} if self.args.upload: if self.args.backup_keys_source: self.upload_encryption_key_file() snapshot_filepath = self.snapshot_location(snapshot_bucket) snapshot_locations["snapshot_url"] = snapshot_filepath if pg_based_backup: self.create_and_upload_manifest(None, snapshot_bucket, pg_based_backup) logging.info("[app] PG based backup successful!") else: self.timer.log_new_phase("Find tablet leaders") tablet_leaders = self.find_tablet_leaders() self.timer.log_new_phase("Upload snapshot directories") self.upload_snapshot_directories(tablet_leaders, snapshot_id, snapshot_bucket) self.create_and_upload_manifest(tablet_leaders, snapshot_bucket, pg_based_backup) logging.info("[app] Backed up tables {} to {} successfully!".format( self.table_names_str(), snapshot_filepath)) if self.per_region_backup(): for region in self.args.region: regional_filepath = self.snapshot_location(snapshot_bucket, region) logging.info("[app] Path for region '{}': {}".format( region, regional_filepath)) snapshot_locations[region] = regional_filepath snapshot_locations["backup_size_in_bytes"] = self.manifest.get_backup_size() else: snapshot_locations["snapshot_url"] = "UPLOAD_SKIPPED" print(json.dumps(snapshot_locations)) def download_file(self, src_path, target_path): """ Download the file from the external source to the local temporary folder. """ if self.args.local_yb_admin_binary: if not self.args.disable_checksums: checksum_downloaded = checksum_path_downloaded(target_path) self.run_program( self.storage.download_file_cmd(checksum_path(src_path), checksum_downloaded)) self.run_program( self.storage.download_file_cmd(src_path, target_path)) if not self.args.disable_checksums: self.run_program( self.create_checksum_cmd(target_path, checksum_path(target_path))) check_checksum_res = self.run_program( compare_checksums_cmd(checksum_downloaded, checksum_path(target_path))).strip() else: server_ip = self.get_main_host_ip() if not self.args.disable_checksums: checksum_downloaded = checksum_path_downloaded(target_path) self.run_ssh_cmd( self.storage.download_file_cmd(checksum_path(src_path), checksum_downloaded), server_ip) self.run_ssh_cmd( self.storage.download_file_cmd(src_path, target_path), server_ip) if not self.args.disable_checksums: self.run_ssh_cmd( self.create_checksum_cmd(target_path, checksum_path(target_path)), server_ip) check_checksum_res = self.run_ssh_cmd( compare_checksums_cmd(checksum_downloaded, checksum_path(target_path)), server_ip).strip() if (not self.args.disable_checksums) and check_checksum_res != 'correct': raise BackupException('Check-sum for {} is {}'.format( target_path, check_checksum_res)) logging.info( 'Downloaded metadata file %s from %s' % (target_path, src_path)) def load_or_create_manifest(self): """ Download the Manifest file for the backup to the local object. Create the Manifest by default if it's not available (old backup). """ if self.args.local_yb_admin_binary: self.run_program(['mkdir', '-p', self.get_tmp_dir()]) else: self.create_remote_tmp_dir(self.get_main_host_ip()) src_manifest_path = os.path.join(self.args.backup_location, MANIFEST_FILE_NAME) manifest_path = os.path.join(self.get_tmp_dir(), MANIFEST_FILE_NAME) try: self.download_file(src_manifest_path, manifest_path) self.download_file_from_server( self.get_main_host_ip(), manifest_path, self.get_tmp_dir()) self.manifest.load_from_file(manifest_path) except subprocess.CalledProcessError as ex: # The file is available for new backup only. if self.args.verbose: logging.info("Exception while downloading Manifest file {}. This must be " "a backup from an older version, ignoring: {}". format(src_manifest_path, ex)) if not self.manifest.is_loaded(): self.manifest.create_by_default(self.args.backup_location) if self.args.verbose: logging.info("{} manifest: {}".format( "Loaded" if self.manifest.is_loaded() else "Generated", self.manifest.to_string())) def download_metadata_file(self): """ Download the metadata file for a backup so as to perform a restore based on it. """ self.load_or_create_manifest() dump_files = [] if self.args.use_tablespaces: src_sql_tbsp_dump_path = os.path.join( self.args.backup_location, SQL_TBSP_DUMP_FILE_NAME) sql_tbsp_dump_path = os.path.join(self.get_tmp_dir(), SQL_TBSP_DUMP_FILE_NAME) self.download_file(src_sql_tbsp_dump_path, sql_tbsp_dump_path) dump_files.append(sql_tbsp_dump_path) else: sql_tbsp_dump_path = None src_sql_dump_path = os.path.join(self.args.backup_location, SQL_DUMP_FILE_NAME) sql_dump_path = os.path.join(self.get_tmp_dir(), SQL_DUMP_FILE_NAME) try: self.download_file(src_sql_dump_path, sql_dump_path) except subprocess.CalledProcessError as ex: if self.is_ysql_keyspace(): raise ex else: # Possibly this is YCQL backup (no way to determite it exactly at this point). # Try to ignore YSQL dump - YSQL table restoring will fail a bit later # on 'import_snapshot' step. logging.info("Ignoring the exception in downloading of {}: {}". format(src_sql_dump_path, ex)) sql_dump_path = None if sql_dump_path: dump_files.append(sql_dump_path) if self.manifest.is_pg_based_backup(): src_sql_data_dump_path = os.path.join( self.args.backup_location, SQL_DATA_DUMP_FILE_NAME) sql_data_dump_path = os.path.join(self.get_tmp_dir(), SQL_DATA_DUMP_FILE_NAME) try: self.download_file(src_sql_data_dump_path, sql_data_dump_path) except subprocess.CalledProcessError as ex: raise ex dump_files.append(sql_data_dump_path) logging.info('Skipping ' + METADATA_FILE_NAME + ' metadata file downloading.') return (None, dump_files) src_metadata_path = os.path.join(self.args.backup_location, METADATA_FILE_NAME) metadata_path = os.path.join(self.get_tmp_dir(), METADATA_FILE_NAME) self.download_file(src_metadata_path, metadata_path) return (metadata_path, dump_files) def import_ysql_dump(self, dump_file_path): """ Import the YSQL dump using the provided file. """ new_db_name = None if self.args.keyspace: cmd = get_db_name_cmd(dump_file_path) if self.args.local_yb_admin_binary: old_db_name = self.run_program(cmd).strip() else: old_db_name = self.run_ssh_cmd(cmd, self.get_main_host_ip()).strip() if old_db_name: new_db_name = keyspace_name(self.args.keyspace[0]) if new_db_name == old_db_name: logging.info("[app] Skip renaming because YSQL DB name was not changed: " "'{}'".format(old_db_name)) else: logging.info("[app] Renaming YSQL DB from '{}' into '{}'".format( old_db_name, new_db_name)) cmd = replace_db_name_cmd(dump_file_path, old_db_name, new_db_name) if self.args.local_yb_admin_binary: self.run_program(cmd) else: self.run_ssh_cmd(cmd, self.get_main_host_ip()) else: logging.info("[app] Skip renaming because YSQL DB name was not found in file " "{}".format(dump_file_path)) if self.args.edit_ysql_dump_sed_reg_exp: logging.info("[app] Applying sed regular expression '{}' to {}".format( self.args.edit_ysql_dump_sed_reg_exp, dump_file_path)) cmd = apply_sed_edit_reg_exp_cmd(dump_file_path, self.args.edit_ysql_dump_sed_reg_exp) if self.args.local_yb_admin_binary: self.run_program(cmd) else: self.run_ssh_cmd(cmd, self.get_main_host_ip()) self.run_ysql_shell(['--echo-all', '--file=' + dump_file_path]) return new_db_name def import_snapshot(self, metadata_file_path): """ Import the snapshot metadata using the provided metadata file, process the metadata for the imported snapshot and return the snapshot metadata. The snapshot metadata returned is a map containing all the metadata for the snapshot and mappings from old ids to new ids for table, keyspace, tablets and snapshot. """ yb_admin_args = ['import_snapshot', metadata_file_path] if self.args.keyspace: yb_admin_args += [self.args.keyspace[0]] if self.args.table: yb_admin_args += [' '.join(self.args.table)] output = self.run_yb_admin(yb_admin_args, run_ip=self.get_main_host_ip()) snapshot_metadata = {} snapshot_metadata['keyspace_name'] = [] snapshot_metadata['table_name'] = [] snapshot_metadata['table'] = {} snapshot_metadata['tablet'] = {} snapshot_metadata['snapshot_id'] = {} for idx, line in enumerate(output.splitlines()): table_match = IMPORTED_TABLE_RE.search(line) if table_match: snapshot_metadata['keyspace_name'].append(table_match.group(1)) snapshot_metadata['table_name'].append(table_match.group(2)) logging.info('Imported table: {}.{}'.format(table_match.group(1), table_match.group(2))) elif NEW_OLD_UUID_RE.search(line): (entity, old_id, new_id) = split_by_tab(line) if entity == 'Table': snapshot_metadata['table'][new_id] = old_id logging.info('Imported table id was changed from {} to {}'.format(old_id, new_id)) elif entity.startswith('Tablet'): snapshot_metadata['tablet'][new_id] = old_id elif entity == 'Snapshot': snapshot_metadata['snapshot_id']['old'] = old_id snapshot_metadata['snapshot_id']['new'] = new_id elif COLOCATED_NEW_OLD_UUID_RE.search(line): (entity, old_id, new_id) = split_by_tab(line) if entity == 'ParentColocatedTable': verify_colocated_table_ids(old_id, new_id) snapshot_metadata['table'][new_id] = old_id logging.info('Imported colocated table id was changed from {} to {}' .format(old_id, new_id)) elif entity == 'ColocatedTable': # A colocated table's tablets are kept under its corresponding parent colocated # table, so we just need to verify the table ids now. verify_colocated_table_ids(old_id, new_id) logging.info('Imported colocated table id was changed from {} to {}' .format(old_id, new_id)) tablet_locations = {} if self.manifest.is_loaded(): self.manifest.get_tablet_locations(tablet_locations) else: default_location = self.args.backup_location if self.args.verbose: logging.info("Default location for all tablets: {}".format(default_location)) for tablet_id in snapshot_metadata['tablet'].values(): tablet_locations[tablet_id] = default_location snapshot_metadata['tablet_location'] = tablet_locations return snapshot_metadata def find_tablet_replicas(self, snapshot_metadata): """ Finds the tablet replicas for tablets present in snapshot_metadata and returns a list of all tservers that need to be processed. """ # Parallize this using half of the parallelism setting to not overload master with yb-admin. parallelism = min(16, (self.args.parallelism + 1) // 2) pool = ThreadPool(parallelism) self.pools.append(pool) parallel_find_tservers = MultiArgParallelCmd(self.run_yb_admin) # First construct all the yb-admin commands to send. for new_tablet_id in snapshot_metadata['tablet']: parallel_find_tservers.add_args(('list_tablet_servers', new_tablet_id)) num_loops = 0 # Continue searching for TServers until all tablet peers are either LEADER or FOLLOWER # or READ_REPLICA. This is done to avoid errors later in the restore_snapshot phase. while num_loops < REPLICAS_SEARCHING_LOOP_MAX_RETRIES: logging.info('[app] Start searching for tablet replicas (try {})'.format(num_loops)) num_loops += 1 found_bad_ts = False tablets_by_tserver_ip = {} # Run all the list_tablet_servers in parallel. output = parallel_find_tservers.run(pool) # Process the output. for cmd in output: # Pull the new_id value out from the command string. matches = LIST_TABLET_SERVERS_RE.match(str(cmd)) tablet_id = matches.group(1) num_ts = 0 # For each output line, get the tablet servers ips for this tablet id. for line in output[cmd].splitlines(): if LEADING_UUID_RE.match(line): fields = split_by_tab(line) (ts_ip_port, role) = (fields[1], fields[2]) (ts_ip, ts_port) = ts_ip_port.split(':') if role == 'LEADER' or role == 'FOLLOWER' or role == 'READ_REPLICA': if self.secondary_to_primary_ip_map: ts_ip = self.secondary_to_primary_ip_map[ts_ip] tablets_by_tserver_ip.setdefault(ts_ip, set()).add(tablet_id) num_ts += 1 else: # Bad/temporary roles: LEARNER, NON_PARTICIPANT, UNKNOWN_ROLE. found_bad_ts = True logging.warning("Found TS {} with bad role: {} for tablet {}. " "Retry searching.".format(ts_ip, role, tablet_id)) break if found_bad_ts: break if num_ts == 0: raise BackupException( "No alive TS found for tablet {}:\n{}".format(tablet_id, output)) if not found_bad_ts: return tablets_by_tserver_ip logging.info("Sleep for {} seconds before the next tablet replicas searching round.". format(SLEEP_IN_REPLICAS_SEARCHING_ROUND_SEC)) time.sleep(SLEEP_IN_REPLICAS_SEARCHING_ROUND_SEC) raise BackupException( "Exceeded max number of retries for the tablet replicas searching loop ({})!". format(REPLICAS_SEARCHING_LOOP_MAX_RETRIES)) def identify_new_tablet_replicas(self, tablets_by_tserver_ip_old, tablets_by_tserver_ip_new): """ Compare old and new sets of tablets per every TServer, find and return difference. Returns union of the sets per TServer, and delta of the sets. """ tablets_by_tserver_union = copy.deepcopy(tablets_by_tserver_ip_old) tablets_by_tserver_delta = {} for ip in tablets_by_tserver_ip_new: tablets = tablets_by_tserver_ip_new[ip] if ip in tablets_by_tserver_ip_old: if not (tablets_by_tserver_ip_old[ip] >= tablets): tablets_by_tserver_union[ip].update(tablets) tablets_by_tserver_delta[ip] = tablets - tablets_by_tserver_ip_old[ip] else: tablets_by_tserver_union[ip] = tablets tablets_by_tserver_delta[ip] = tablets return (tablets_by_tserver_union, tablets_by_tserver_delta) def download_snapshot_directories(self, snapshot_meta, tablets_by_tserver_to_download, snapshot_id, table_ids): with terminating(ThreadPool(self.args.parallelism)) as pool: self.pools.append(pool) self.timer.log_new_phase("Find all table/tablet data dirs on all tservers") tserver_ips = list(tablets_by_tserver_to_download.keys()) data_dir_by_tserver = SingleArgParallelCmd(self.find_data_dirs, tserver_ips).run(pool) if self.args.verbose: logging.info('Found data directories: {}'.format(data_dir_by_tserver)) (tserver_to_tablet_to_snapshot_dirs, tserver_to_deleted_tablets) =\ self.generate_snapshot_dirs( data_dir_by_tserver, snapshot_id, tablets_by_tserver_to_download, table_ids) # Remove deleted tablets from the list of planned to be downloaded tablets. for tserver_ip in tserver_to_deleted_tablets: deleted_tablets = tserver_to_deleted_tablets[tserver_ip] tablets_by_tserver_to_download[tserver_ip] -= deleted_tablets self.timer.log_new_phase("Download data") parallel_downloads = SequencedParallelCmd( self.run_ssh_cmd, preprocess_args_fn=self.join_ssh_cmds, handle_errors=True) self.prepare_cloud_ssh_cmds( parallel_downloads, tserver_to_tablet_to_snapshot_dirs, None, snapshot_id, tablets_by_tserver_to_download, upload=False, snapshot_metadata=snapshot_meta) # Run a sequence of steps for each tablet, handling different tablets in parallel. results = parallel_downloads.run(pool) for k in results: v = results[k] if isinstance(v, tuple) and v[0] == 'failed-cmd': assert len(v) == 2 (tablet_id, tserver_ip) = v[1] # In case we fail a cmd, don't mark this tablet-tserver pair as succeeded, # instead we will retry in the next round of downloads. tserver_to_deleted_tablets.setdefault(tserver_ip, set()).add(tablet_id) return tserver_to_deleted_tablets def restore_table(self): """ Restore a table from the backup stored in the given backup path. """ if self.args.keyspace: if len(self.args.keyspace) > 1: raise BackupException('Only one --keyspace expected for the restore mode.') elif self.args.table: raise BackupException('Need to specify --keyspace') if self.args.region_location is not None: raise BackupException('--region_location is not supported for the restore mode.') # TODO (jhe): Perform verification for restore_time. Need to check for: # - Verify that the timestamp given fits in the history retention window for the snapshot # - Verify that we are restoring a keyspace/namespace (no individual tables for pitr) logging.info('[app] Restoring backup from {}'.format(self.args.backup_location)) (metadata_file_path, dump_file_paths) = self.download_metadata_file() if len(dump_file_paths): self.timer.log_new_phase("Create objects via YSQL dumps") for dump_file_path in dump_file_paths: dump_file = os.path.basename(dump_file_path) if dump_file == SQL_TBSP_DUMP_FILE_NAME: logging.info('[app] Create tablespaces from {}'.format(dump_file_path)) self.import_ysql_dump(dump_file_path) elif dump_file == SQL_DUMP_FILE_NAME: logging.info('[app] Create YSQL tables from {}'.format(dump_file_path)) new_db_name = self.import_ysql_dump(dump_file_path) elif dump_file == SQL_DATA_DUMP_FILE_NAME: # Note: YSQLDump_data must be last in the dump file list. self.timer.log_new_phase("Apply complete YSQL data dump") ysqlsh_args = ['--file=' + dump_file_path] if new_db_name: ysqlsh_args += ['--dbname=' + new_db_name] self.run_ysql_shell(ysqlsh_args) # Skipping Snapshot loading & restoring because # PG based backup means only complete YSQL Data Dump applying. logging.info('[app] Restored PG based backup successfully!') print(json.dumps({"success": True})) return self.timer.log_new_phase("Import snapshot") snapshot_metadata = self.import_snapshot(metadata_file_path) snapshot_id = snapshot_metadata['snapshot_id']['new'] table_ids = list(snapshot_metadata['table'].keys()) self.wait_for_snapshot(snapshot_id, 'importing', CREATE_SNAPSHOT_TIMEOUT_SEC, False) if not self.args.no_snapshot_deleting: logging.info("Snapshot %s will be deleted at exit...", snapshot_id) atexit.register(self.delete_created_snapshot, snapshot_id) self.timer.log_new_phase("Generate list of tservers for every tablet") all_tablets_by_tserver = self.find_tablet_replicas(snapshot_metadata) tablets_by_tserver_to_download = all_tablets_by_tserver # The loop must stop after a few rounds because the downloading list includes only new # tablets for downloading. The downloading list should become smaller with every round # and must become empty in the end. num_loops = 0 while tablets_by_tserver_to_download and num_loops < RESTORE_DOWNLOAD_LOOP_MAX_RETRIES: num_loops += 1 logging.info('[app] Downloading tablets onto %d tservers...', len(tablets_by_tserver_to_download)) if self.args.verbose: logging.info('Downloading list: {}'.format(tablets_by_tserver_to_download)) # Download tablets and get list of deleted tablets. tserver_to_deleted_tablets = self.download_snapshot_directories( snapshot_metadata, tablets_by_tserver_to_download, snapshot_id, table_ids) # Remove deleted tablets from the list of all tablets. for tserver_ip in tserver_to_deleted_tablets: deleted_tablets = tserver_to_deleted_tablets[tserver_ip] all_tablets_by_tserver[tserver_ip] -= deleted_tablets self.timer.log_new_phase("Regenerate list of tservers for every tablet") tablets_by_tserver_new = self.find_tablet_replicas(snapshot_metadata) # Calculate the new downloading list as a subtraction of sets: # downloading_list = NEW_all_tablet_replicas - OLD_all_tablet_replicas # And extend the list of all tablets (as unioun of sets) for using it on the next # loop iteration: # OLD_all_tablet_replicas = OLD_all_tablet_replicas + NEW_all_tablet_replicas # = OLD_all_tablet_replicas + downloading_list (all_tablets_by_tserver, tablets_by_tserver_to_download) =\ self.identify_new_tablet_replicas(all_tablets_by_tserver, tablets_by_tserver_new) if num_loops >= RESTORE_DOWNLOAD_LOOP_MAX_RETRIES: raise BackupException( "Exceeded max number of retries for the restore download loop ({})!". format(RESTORE_DOWNLOAD_LOOP_MAX_RETRIES)) # Finally, restore the snapshot. logging.info('Downloading is finished. Restoring snapshot %s ...', snapshot_id) self.timer.log_new_phase("Restore the snapshot") restore_snapshot_args = ['restore_snapshot', snapshot_id] # Pass in the timestamp if provided. if self.args.restore_time: restore_snapshot_args.append(self.args.restore_time) output = self.run_yb_admin(restore_snapshot_args) # Transaction-aware snapshots use special restaration id with final state RESTORED, # while previous implementation uses snapshot id and it's state COMPLETE. restoration_id = snapshot_id complete_restoration_state = 'COMPLETE' for line in output.splitlines(): restoration_match = RESTORATION_RE.match(line) if restoration_match: restoration_id = restoration_match.group(1) complete_restoration_state = 'RESTORED' logging.info('[app] Found restoration id: ' + restoration_id) self.wait_for_snapshot(restoration_id, 'restoring', RESTORE_SNAPSHOT_TIMEOUT_SEC, False, complete_restoration_state) logging.info('[app] Restored backup successfully!') print(json.dumps({"success": True})) def delete_backup(self): """ Delete the backup specified by the storage location. """ if not self.args.backup_location: raise BackupException('Need to specify --backup_location') self.load_or_create_manifest() error = None for loc in self.manifest.get_locations(): try: self.delete_bucket_obj(loc) except Exception as ex: logging.warning("Failed to delete '{}'. Error: {}".format(loc, ex)) error = ex if error: raise error logging.info('[app] Deleted backup %s successfully!', self.args.backup_location) print(json.dumps({"success": True})) def restore_keys(self): """ Restore universe keys from the backup stored in the given backup path. """ if self.args.restore_keys_destination: self.download_encryption_key_file() logging.info('[app] Restored backup universe keys successfully!') print(json.dumps({"success": True})) # At exit callbacks def cleanup_temporary_directory(self, tmp_dir): """ Callback run on exit to clean up temporary directories. """ if self.args.verbose: logging.info("Removing temporary directory '{}'".format(tmp_dir)) self.run_program(['rm', '-rf', tmp_dir]) def cleanup_remote_temporary_directory(self, server_ip, tmp_dir): """ Callback run on exit to clean up temporary directories on remote host. """ if self.args.verbose: logging.info("Removing remote temporary directory '{}' on {}".format( tmp_dir, server_ip)) self.run_ssh_cmd(['rm', '-rf', tmp_dir], server_ip) def delete_created_snapshot(self, snapshot_id): """ Callback run on exit to delete temporary newly created snapshot. """ if self.args.verbose: logging.info("Deleting snapshot %s ...", snapshot_id) return self.run_yb_admin(['delete_snapshot', snapshot_id]) def TEST_yb_admin_unsupported_commands(self): try: self.run_yb_admin(["fake_command"]) raise BackupException("Expected YbAdminOpNotSupportedException on unsupported command.") except YbAdminOpNotSupportedException as ex: # Required output for the JsonReader to pass the test. print(json.dumps({"success": True})) def run(self): try: self.post_process_arguments() if self.args.TEST_yb_admin_unsupported_commands: self.TEST_yb_admin_unsupported_commands() return try: self.database_version = YBVersion(self.run_yb_admin(['--version']), self.args.verbose) except Exception as ex: logging.error("Cannot identify YB cluster version. Ignoring the exception: {}". format(ex)) if self.args.command == 'restore': self.restore_table() elif self.args.command == 'create': self.backup_table() elif self.args.command == 'restore_keys': self.restore_keys() elif self.args.command == 'delete': self.delete_backup() else: logging.error('Command was not specified') print(json.dumps({"error": "Command was not specified"})) except BackupException as ex: print(json.dumps({"error": "Backup exception: {}".format(str(ex))})) except Exception as ex: print(json.dumps({"error": "Exception: {}".format(str(ex))})) traceback.print_exc() traceback.print_stack() finally: self.timer.print_summary() if __name__ == "__main__": # Setup logging. By default in the config the output stream is: stream=sys.stderr. # Set custom output format and logging level=INFO (DEBUG messages will not be printed). logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s: %(message)s") # Registers the signal handlers. yb_backup = YBBackup() with terminating(ThreadPool(1)) as pool: try: # Main thread cannot be blocked to handle signals. future = pool.apply_async(yb_backup.run) while not future.ready(): # Prevent blocking by waiting with timeout. future.wait(timeout=1) finally: logging.info("Terminating all threadpool ...") yb_backup.terminate_pools() logging.shutdown()
ServerDriver.py
#!/usr/bin/env python3 # Package imports import logging import threading # Local imports from controllers.ReceiverController import ReceiverServer from controllers.SenderController import SenderServer async def Servers(): format = "%(asctime)s: %(message)s" logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S") logging.info('Main: Before ReceiverServer creating thread') thread_1 = threading.Thread(target=ReceiverServer, args=(), daemon=True) logging.info('Main: Before ReceiverServer running thread') thread_1.start() logging.info('Main: After ReceiverServer starting thread') logging.info('\n-------------\n') logging.info('Main: Before SenderServer creating thread') thread_2 = threading.Thread(target=SenderServer, args=(), daemon=True) logging.info('Main: Before SenderServer running thread') thread_2.start() logging.info('Main: After SenderServer starting thread') logging.info('\n-------------\n')
server.py
from flask import Flask, render_template, request, jsonify from flask_cors import CORS, cross_origin from multiprocessing import Process from configuration import Config import json import boto3 import time import paramiko import os app = Flask(__name__) CORS(app) #Paraminko ssh information dirname = os.path.dirname(__file__) filename = os.path.join(dirname, Config.SSH_KEY_FILE_PATH) key = paramiko.RSAKey.from_private_key_file(filename) sshClient = paramiko.SSHClient() sshClient.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #Waits for the server to reach a valid state so that commands can be executed on the server def serverWaitOk(instanceIp, client): checksPassed = False status = 'initializing' instanceIds=[Config.INSTANCE_ID] while (not checksPassed) and (status == 'initializing'): statusCheckResponse = client.describe_instance_status(InstanceIds = instanceIds) instanceStatuses = statusCheckResponse['InstanceStatuses'] instanceStatus = instanceStatuses[0] instanceStatus = instanceStatus['InstanceStatus'] status = instanceStatus['Status'] checksPassed = status == 'ok' time.sleep(5) if checksPassed: initServerCommands(instanceIp) else: print('An error has occurred booting the server') #SSH connects to server and executes command to boot minecraft server def initServerCommands(instanceIp): # Connect/ssh to an instance try: # Here 'ubuntu' is user name and 'instance_ip' is public IP of EC2 sshClient.connect(hostname=instanceIp, username="ubuntu", pkey=key) # Execute a command(cmd) after connecting/ssh to an instance stdin, stdout, stderr = sshClient.exec_command("screen -dmS minecraft bash -c 'sudo java " + Config.MEMORY_ALLOCATION + "-jar server.jar nogui'") print("COMMAND EXECUTED") # close the client connection once the job is done sshClient.close() except: print('Error running server commands') #Main endpoint for loading the webpage @app.route('/') def loadIndex(): return render_template('index.html') @app.route('/initServerMC', methods = ['POST']) def initServerMC(): inputPass = request.form['pass'] returnData = {} message = "Password Incorrect!" if inputPass == Config.SERVER_PASSWORD: #Instantiate server here or return ip address if already running client = boto3.client( 'ec2', aws_access_key_id=Config.ACCESS_KEY, aws_secret_access_key=Config.SECRET_KEY, region_name=Config.ec2_region ) message = manageServer(client) print(message) return render_template('index.html', ipMessage=message) #Gets IP Address for return to webpage otherwise boots server def manageServer(client): returnString = 'ERROR' instanceIds = [Config.INSTANCE_ID] response = client.describe_instances(InstanceIds = instanceIds) reservations = response['Reservations'] reservation = reservations[0] instances = reservation['Instances'] print("\nSERVER INSTANCES\n") print(instances) print("\n") if len(instances) > 0: instance = instances[0] state = instance['State'] stateName = state['Name'] if (stateName == 'stopped') or (stateName == 'shutting-down'): #SETUP MULTIPROCESSING HERE INSTEAD OF REDIS returnString = startServer(client) elif stateName == 'running': returnString = 'IP: ' + instance['PublicIpAddress'] else: returnString = 'ERROR' return returnString #Starts the specified AWS Instance from the configuration def startServer(client): #Gets proper variables to attempt to instantiate EC2 instance and start minecraft server returnString = 'ERROR' instanceIds = [Config.INSTANCE_ID] response = client.start_instances(InstanceIds = instanceIds) stateCode = 0 while not (stateCode == 16): time.sleep(3) print('\nAWS EC2 START RESPONSE\n') print(str(response)) print('\n') response = client.describe_instances(InstanceIds = instanceIds) reservations = response['Reservations'] reservation = reservations[0] instances = reservation['Instances'] instance = instances[0] state = instance['State'] stateCode = state['Code'] print("\nSERVER INSTANCES\n") print(instances) print("\n") ipAddress = instance['PublicIpAddress'] returnString = 'Server is starting, this may take a few minutes.\nIP: ' + ipAddress #SETUP MULTIPROCESSING HERE INSTEAD OF REDIS p = Process(target=serverWaitOk, args=(ipAddress, client)) p.start() return returnString if __name__ == "__main__": app.run()
intersubs_ui.py
#! /usr/bin/env python # v. 2.7 # Interactive subtitles for `mpv` for language learners. import os import subprocess import sys import random import re import time import threading import platform from json import loads import numpy from PyQt5.QtCore import Qt, QThread, QObject, pyqtSignal, pyqtSlot, QSize from PyQt5.QtWidgets import QApplication, QFrame, QVBoxLayout, QHBoxLayout, QLabel, QSizePolicy, QWidget from PyQt5.QtGui import QPalette, QPaintEvent, QPainter, QPainterPath, QFontMetrics, QColor, QPen, QBrush import intersubs_config as config import intersubs_providers as providers def mpv_pause(): os.system( 'echo \'{ "command": ["set_property", "pause", true] }\' | socat - "' + config.mpv_socket + '" > /dev/null') def mpv_resume(): os.system( 'echo \'{ "command": ["set_property", "pause", false] }\' | socat - "' + config.mpv_socket + '" > /dev/null') def mpv_pause_status(): stdoutdata = subprocess.getoutput( 'echo \'{ "command": ["get_property", "pause"] }\' | socat - "' + config.mpv_socket + '"') try: return loads(stdoutdata)['data'] except BaseException: return mpv_pause_status() def mpv_fullscreen_status(): stdoutdata = subprocess.getoutput( 'echo \'{ "command": ["get_property", "fullscreen"] }\' | socat - "' + config.mpv_socket + '"') try: return loads(stdoutdata)['data'] except BaseException: return mpv_fullscreen_status() def mpv_message(message, timeout=3000): os.system( 'echo \'{ "command": ["show-text", "' + message + '", "' + str(timeout) + '"] }\' | socat - "' + config.mpv_socket + '" > /dev/null') def stripsd2(phrase): return ''.join(e for e in phrase.strip().lower() if e == ' ' or (e.isalnum() and not e.isdigit())).strip() def r2l(l): l2 = '' try: l2 = re.findall('(?!%)\W+$', l)[0][::-1] except BaseException: pass l2 += re.sub('^\W+|(?!%)\W+$', '', l) try: l2 += re.findall('^\W+', l)[0][::-1] except BaseException: pass return l2 def split_long_lines(line, chunks=2, max_symbols_per_line=False): if max_symbols_per_line: chunks = 0 while 1: chunks += 1 new_lines = [] for i in range(chunks): new_line = ' '.join( numpy.array_split( line.split(' '), chunks)[i]) new_lines.append(new_line) if len(max(new_lines, key=len)) <= max_symbols_per_line: return '\n'.join(new_lines) else: new_lines = [] for i in range(chunks): new_line = ' '.join(numpy.array_split(line.split(' '), chunks)[i]) new_lines.append(new_line) return '\n'.join(new_lines) def dir2(name): print('\n'.join(dir(name))) sys.exit() class ThreadSubtitles(QObject): update_subtitles = pyqtSignal(bool, bool) @pyqtSlot() def main(self): was_hidden = 0 inc = 0 auto_pause_2_ind = 0 last_updated = time.time() while 1: time.sleep(config.update_time) if platform.system() == 'Linux': # hide subs when mpv isn't in focus or in fullscreen if inc * config.update_time > config.focus_checking_time - \ 0.0001 and not config.testing: while 'mpv' not in subprocess.getoutput('xdotool getwindowfocus getwindowname') or ( config.hide_when_not_fullscreen_B and not mpv_fullscreen_status()) or ( os.path.exists( config.mpv_socket + '_hide')): if not was_hidden: self.update_subtitles.emit(True, False) was_hidden = 1 else: time.sleep(config.focus_checking_time) inc = 0 inc += 1 if was_hidden: was_hidden = 0 self.update_subtitles.emit(False, False) continue try: tmp_file_subs = open(config.sub_file).read() except BaseException: continue if config.extend_subs_duration2max_B and not len(tmp_file_subs): if not config.extend_subs_duration_limit_sec: continue if config.extend_subs_duration_limit_sec > time.time() - last_updated: continue last_updated = time.time() # automatically switch into Hebrew if it's detected if config.lang_from != 'he' and config.lang_from != 'iw' and any( (c in set('קראטוןםפשדגכעיחלךףזסבהנמצתץ')) for c in tmp_file_subs): config.lang_from = 'he' frf = random.choice(config.he_fonts) config.style_subs = re.sub( 'font-family: ".*?";', lambda ff: 'font-family: "%s";' % frf, config.style_subs, flags=re.I) config.R2L_from_B = True config.translation_function_names = config.translation_function_names_2 config.listen_via = 'forvo' os.system('notify-send -i none -t 1111 "He"') os.system('notify-send -i none -t 1111 "%s"' % str(frf)) self.update_subtitles.emit(False, True) while tmp_file_subs != config.subs: if config.auto_pause == 2: if not auto_pause_2_ind and len( re.sub( ' +', ' ', stripsd2( config.subs.replace( '\n', ' '))).split(' ')) > config.auto_pause_min_words - 1 and not mpv_pause_status(): mpv_pause() auto_pause_2_ind = 1 if auto_pause_2_ind and mpv_pause_status(): break auto_pause_2_ind = 0 config.subs = tmp_file_subs if config.auto_pause == 1: if len( re.sub( ' +', ' ', stripsd2( config.subs.replace( '\n', ' '))).split(' ')) > config.auto_pause_min_words - 1: mpv_pause() self.update_subtitles.emit(False, False) break class ThreadTranslations(QObject): get_translations = pyqtSignal(str, int, bool) @pyqtSlot() def main(self): while 1: to_new_word = False try: word, globalX = config.queue_to_translate.get(False) except BaseException: time.sleep(config.update_time) continue # changing cursor to hourglass during translation QApplication.setOverrideCursor(Qt.WaitCursor) threads = [] for translation_function_name in config.translation_function_names: threads.append( threading.Thread( target=getattr(providers, translation_function_name), args=( word, ))) for x in threads: x.start() while any(thread.is_alive() for thread in threads): if config.queue_to_translate.qsize(): to_new_word = True break time.sleep(config.update_time) QApplication.restoreOverrideCursor() if to_new_word: continue if config.block_popup: continue self.get_translations.emit(word, globalX, False) # drawing layer # because can't calculate outline with precision class DrawingLayer(QLabel): def __init__(self, line, subs, parent=None): super().__init__(None) self.line = line self.setStyleSheet(config.style_subs) self.psuedo_line = 0 def draw_text_n_outline( self, painter: QPainter, x, y, outline_width, outline_blur, text): outline_color = QColor(config.outline_color) font = self.font() text_path = QPainterPath() if config.R2L_from_B: text_path.addText(x, y, font, ' ' + r2l(text.strip()) + ' ') else: text_path.addText(x, y, font, text) # draw blur range_width = range(outline_width, outline_width + outline_blur) # ~range_width = range(outline_width + outline_blur, outline_width, -1) for width in range_width: if width == min(range_width): alpha = 200 else: alpha = (max(range_width) - width) / max(range_width) * 200 blur_color = QColor( outline_color.red(), outline_color.green(), outline_color.blue(), alpha) blur_brush = QBrush(blur_color, Qt.SolidPattern) blur_pen = QPen( blur_brush, width, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin) painter.setPen(blur_pen) painter.drawPath(text_path) # draw outline outline_color = QColor( outline_color.red(), outline_color.green(), outline_color.blue(), 255) outline_brush = QBrush(outline_color, Qt.SolidPattern) outline_pen = QPen( outline_brush, outline_width, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin) painter.setPen(outline_pen) painter.drawPath(text_path) # draw text color = self.palette().color(QPalette.Text) painter.setPen(color) painter.drawText(x, y, text) if config.outline_B: def paintEvent(self, evt: QPaintEvent): # pylint: disable=invalid-name if not self.psuedo_line: self.psuedo_line = 1 return x = y = 0 y += self.fontMetrics().ascent() painter = QPainter(self) self.draw_text_n_outline( painter, x, y + config.outline_top_padding - config.outline_bottom_padding, config.outline_thickness, config.outline_blur, text=self.line ) def resizeEvent(self, *args): # pylint: disable=invalid-name self.setFixedSize( self.fontMetrics().width(self.line), self.fontMetrics().height() + config.outline_bottom_padding + config.outline_top_padding ) def sizeHint(self): # pylint: disable=invalid-name return QSize( self.fontMetrics().width(self.line), self.fontMetrics().height() ) class EventsClass(QLabel): mouseHover = pyqtSignal(str, int, bool) redraw = pyqtSignal(bool, bool) def __init__(self, word, subs, skip=False, parent=None): super().__init__(word) self.setMouseTracking(True) self.word = word self.subs = subs self.skip = skip self.highlight = False self.setStyleSheet('background: transparent; color: transparent;') def highligting(self, color, underline_width): color = QColor(color) color = QColor(color.red(), color.green(), color.blue(), 200) painter = QPainter(self) if config.hover_underline: font_metrics = QFontMetrics(self.font()) text_width = font_metrics.width(self.word) text_height = font_metrics.height() brush = QBrush(color) pen = QPen(brush, underline_width, Qt.SolidLine, Qt.RoundCap) painter.setPen(pen) if not self.skip: painter.drawLine( 0, text_height - underline_width, text_width, text_height - underline_width) if config.hover_hightlight: x = y = 0 y += self.fontMetrics().ascent() painter.setPen(color) painter.drawText( x, y + config.outline_top_padding - config.outline_bottom_padding, self.word) if config.outline_B: def paintEvent(self, evt: QPaintEvent): # pylint: disable=invalid-name if self.highlight: self.highligting( config.hover_color, config.hover_underline_thickness) ##################################################### def resizeEvent(self, event): # pylint: disable=invalid-name text_height = self.fontMetrics().height() text_width = self.fontMetrics().width(self.word) self.setFixedSize( text_width, text_height + config.outline_bottom_padding + config.outline_top_padding) def enterEvent(self, event): # pylint: disable=invalid-name if not self.skip: self.highlight = True self.repaint() config.queue_to_translate.put((self.word, event.globalX())) @pyqtSlot() def leaveEvent(self, event): # pylint: disable=invalid-name if not self.skip: self.highlight = False self.repaint() config.scroll = {} self.mouseHover.emit('', 0, False) QApplication.restoreOverrideCursor() def wheel_scrolling(self, event): if event.y() > 0: return 'ScrollUp' if event.y(): return 'ScrollDown' if event.x() > 0: return 'ScrollLeft' if event.x(): return 'ScrollRight' def wheelEvent(self, event): # pylint: disable=invalid-name for mouse_action in config.mouse_buttons: if self.wheel_scrolling(event.angleDelta()) == mouse_action[0]: if event.modifiers() == eval('Qt.%s' % mouse_action[1]): exec('self.%s(event)' % mouse_action[2]) def mousePressEvent(self, event): # pylint: disable=invalid-name for mouse_action in config.mouse_buttons: if 'Scroll' not in mouse_action[0]: if event.button() == eval('Qt.%s' % mouse_action[0]): if event.modifiers() == eval('Qt.%s' % mouse_action[1]): exec('self.%s(event)' % mouse_action[2]) ##################################################### def f_show_in_browser(self, event): config.avoid_resuming = True os.system(config.show_in_browser.replace('${word}', self.word)) def f_auto_pause_options(self, event): if config.auto_pause == 2: config.auto_pause = 0 else: config.auto_pause += 1 mpv_message('auto_pause: %d' % config.auto_pause) def f_listen(self, event): providers.listen(self.word, config.listen_via) @pyqtSlot() def f_subs_screen_edge_padding_decrease(self, event): config.subs_screen_edge_padding -= 5 mpv_message( 'subs_screen_edge_padding: %d' % config.subs_screen_edge_padding) self.redraw.emit(False, True) @pyqtSlot() def f_subs_screen_edge_padding_increase(self, event): config.subs_screen_edge_padding += 5 mpv_message( 'subs_screen_edge_padding: %d' % config.subs_screen_edge_padding) self.redraw.emit(False, True) @pyqtSlot() def f_font_size_decrease(self, event): config.style_subs = re.sub( 'font-size: (\d+)px;', lambda size: [ 'font-size: %dpx;' % (int( size.group(1)) - 1), mpv_message( 'font: %s' % size.group(1))][0], config.style_subs, flags=re.I) self.redraw.emit(False, True) @pyqtSlot() def f_font_size_increase(self, event): config.style_subs = re.sub( 'font-size: (\d+)px;', lambda size: [ 'font-size: %dpx;' % (int( size.group(1)) + 1), mpv_message( 'font: %s' % size.group(1))][0], config.style_subs, flags=re.I) self.redraw.emit(False, True) def f_auto_pause_min_words_decrease(self, event): config.auto_pause_min_words -= 1 mpv_message('auto_pause_min_words: %d' % config.auto_pause_min_words) def f_auto_pause_min_words_increase(self, event): config.auto_pause_min_words += 1 mpv_message('auto_pause_min_words: %d' % config.auto_pause_min_words) @pyqtSlot() def f_deepl_translation(self, event): self.mouseHover.emit(self.subs, event.globalX(), True) def f_save_word_to_file(self, event): if ( os.path.isfile( os.path.expanduser( config.save_word_to_file_fname)) and self.word not in [ x.strip() for x in open( os.path.expanduser( config.save_word_to_file_fname)).readlines()]) or not os.path.isfile( os.path.expanduser( config.save_word_to_file_fname)): print( self.word, file=open( os.path.expanduser( config.save_word_to_file_fname), 'a')) @pyqtSlot() def f_scroll_translations_up(self, event): if self.word in config.scroll and config.scroll[self.word] > 0: config.scroll[self.word] = config.scroll[self.word] - 1 else: config.scroll[self.word] = 0 self.mouseHover.emit(self.word, event.globalX(), False) @pyqtSlot() def f_scroll_translations_down(self, event): if self.word in config.scroll: config.scroll[self.word] = config.scroll[self.word] + 1 else: config.scroll[self.word] = 1 self.mouseHover.emit(self.word, event.globalX(), False) class MainView(QWidget): def __init__(self): super().__init__() self.thread_subs = QThread() self.obj = ThreadSubtitles() self.obj.update_subtitles.connect(self.render_subtitles) self.obj.moveToThread(self.thread_subs) self.thread_subs.started.connect(self.obj.main) self.thread_subs.start() self.thread_translations = QThread() self.obj2 = ThreadTranslations() self.obj2.get_translations.connect(self.render_popup) self.obj2.moveToThread(self.thread_translations) self.thread_translations.started.connect(self.obj2.main) self.thread_translations.start() # start the forms self.subtitles_base() self.subtitles_base2() self.popup_base() def clearLayout(self, layout): # pylint: disable=invalid-name if layout == 'subs': layout = self.subtitles_vbox self.subtitles.hide() elif layout == 'subs2': layout = self.subtitles_vbox2 self.subtitles2.hide() elif layout == 'popup': layout = self.popup_vbox self.popup.hide() if layout is not None: while layout.count(): item = layout.takeAt(0) widget = item.widget() if widget is not None: widget.deleteLater() else: self.clearLayout(item.layout()) def subtitles_base(self): self.subtitles = QFrame() self.subtitles.setAttribute(Qt.WA_TranslucentBackground) self.subtitles.setWindowFlags( Qt.WindowStaysOnTopHint | Qt.X11BypassWindowManagerHint | Qt.FramelessWindowHint) self.subtitles.setStyleSheet(config.style_subs) self.subtitles_vbox = QVBoxLayout(self.subtitles) self.subtitles_vbox.setSpacing(config.subs_padding_between_lines) self.subtitles_vbox.setContentsMargins(0, 0, 0, 0) def subtitles_base2(self): self.subtitles2 = QFrame() self.subtitles2.setAttribute(Qt.WA_TranslucentBackground) self.subtitles2.setWindowFlags( Qt.WindowStaysOnTopHint | Qt.X11BypassWindowManagerHint | Qt.FramelessWindowHint) self.subtitles2.setStyleSheet(config.style_subs) self.subtitles_vbox2 = QVBoxLayout(self.subtitles2) self.subtitles_vbox2.setSpacing(config.subs_padding_between_lines) self.subtitles_vbox2.setContentsMargins(0, 0, 0, 0) if config.pause_during_translation_B: self.subtitles2.enterEvent = lambda event: [ mpv_pause(), setattr(config, 'block_popup', False)][0] self.subtitles2.leaveEvent = lambda event: [ mpv_resume(), setattr( config, 'block_popup', True)][0] if not config.avoid_resuming else [ setattr( config, 'avoid_resuming', False), setattr( config, 'block_popup', True)][0] def popup_base(self): self.popup = QFrame() self.popup.setAttribute(Qt.WA_TranslucentBackground) self.popup.setWindowFlags( Qt.WindowStaysOnTopHint | Qt.X11BypassWindowManagerHint | Qt.FramelessWindowHint) self.popup.setStyleSheet(config.style_popup) self.popup_inner = QFrame() outer_box = QVBoxLayout(self.popup) outer_box.addWidget(self.popup_inner) self.popup_vbox = QVBoxLayout(self.popup_inner) self.popup_vbox.setSpacing(0) def render_subtitles(self, hide=False, redraw=False): if hide or not len(config.subs): try: self.subtitles.hide() self.subtitles2.hide() finally: return if redraw: self.subtitles.setStyleSheet(config.style_subs) self.subtitles2.setStyleSheet(config.style_subs) else: self.clearLayout('subs') self.clearLayout('subs2') if hasattr(self, 'popup'): self.popup.hide() # if subtitle consists of one overly long line - split into two if config.split_long_lines_B and len( config.subs.split('\n')) == 1 and len( config.subs.split(' ')) > config.split_long_lines_words_min - 1: subs2 = split_long_lines(config.subs) else: subs2 = config.subs subs2 = re.sub(' +', ' ', subs2).strip() ############################## for line in subs2.split('\n'): line2 = ' %s ' % line.strip() ll = DrawingLayer(line2, subs2) hbox = QHBoxLayout() hbox.setContentsMargins(0, 0, 0, 0) hbox.setSpacing(0) hbox.addStretch() hbox.addWidget(ll) hbox.addStretch() self.subtitles_vbox.addLayout(hbox) #################################### hbox = QHBoxLayout() hbox.setContentsMargins(0, 0, 0, 0) hbox.setSpacing(0) hbox.addStretch() if config.R2L_from_B: line2 = line2[::-1] line2 += '\00' word = '' for smbl in line2: if smbl.isalpha(): word += smbl else: if len(word): if config.R2L_from_B: word = word[::-1] ll = EventsClass(word, subs2) ll.mouseHover.connect(self.render_popup) ll.redraw.connect(self.render_subtitles) hbox.addWidget(ll) word = '' if smbl != '\00': ll = EventsClass(smbl, subs2, skip=True) hbox.addWidget(ll) hbox.addStretch() self.subtitles_vbox2.addLayout(hbox) self.subtitles.adjustSize() self.subtitles2.adjustSize() w = self.subtitles.geometry().width() h = self.subtitles.height = self.subtitles.geometry().height() x = (config.screen_width / 2) - (w / 2) if config.subs_top_placement_B: y = config.subs_screen_edge_padding else: y = config.screen_height - config.subs_screen_edge_padding - h self.subtitles.setGeometry(x, y, 0, 0) self.subtitles.show() self.subtitles2.setGeometry(x, y, 0, 0) self.subtitles2.show() def render_popup(self, text, x_cursor_pos, is_line): if text == '': if hasattr(self, 'popup'): self.popup.hide() return self.clearLayout('popup') if is_line: QApplication.setOverrideCursor(Qt.WaitCursor) line = providers.deepl(text) if config.split_long_lines_B and len( line.split('\n')) == 1 and len( line.split(' ')) > config.split_long_lines_words_min - 1: line = split_long_lines(line) ll = QLabel(line) ll.setObjectName("first_line") self.popup_vbox.addWidget(ll) else: word = text for translation_function_name_i, translation_function_name in enumerate( config.translation_function_names): pairs, word_descr = getattr(providers, translation_function_name)(word) if not len(pairs): pairs = [['', '[Not found]']] # return # ~pairs = [ [ str(i) + ' ' + pair[0], pair[1] ] for i, pair in enumerate(pairs) ] if word in config.scroll: if len(pairs[config.scroll[word]:] ) > config.number_of_translations: pairs = pairs[config.scroll[word]:] else: pairs = pairs[-config.number_of_translations:] if len(config.translation_function_names) == 1: config.scroll[word] -= 1 for i1, pair in enumerate(pairs): if i1 == config.number_of_translations: break if config.split_long_lines_in_popup_B: pair[0] = split_long_lines( pair[0], max_symbols_per_line=config.split_long_lines_in_popup_symbols_min) pair[1] = split_long_lines( pair[1], max_symbols_per_line=config.split_long_lines_in_popup_symbols_min) if pair[0] == '-': pair[0] = '' if pair[1] == '-': pair[1] = '' # ~if config.R2L_from_B: # ~pair[0] = pair[0][::-1] # ~if config.R2L_to_B: # ~pair[1] = pair[1][::-1] if pair[0] != '': # to emphasize the exact form of the word # to ignore case on input and match it on output chnks = re.split(word, pair[0], flags=re.I) exct_words = re.findall(word, pair[0], flags=re.I) hbox = QHBoxLayout() hbox.setContentsMargins(0, 0, 0, 0) for i2, chnk in enumerate(chnks): if len(chnk): ll = QLabel(chnk) ll.setObjectName("first_line") hbox.addWidget(ll) if i2 + 1 < len(chnks): ll = QLabel(exct_words[i2]) ll.setObjectName("first_line_emphasize_word") hbox.addWidget(ll) # filling the rest of the line with empty bg ll = QLabel() ll.setSizePolicy( QSizePolicy.Expanding, QSizePolicy.Preferred) hbox.addWidget(ll) self.popup_vbox.addLayout(hbox) if pair[1] != '': ll = QLabel(pair[1]) ll.setObjectName("second_line") self.popup_vbox.addWidget(ll) # padding ll = QLabel() ll.setStyleSheet("font-size: 6px;") self.popup_vbox.addWidget(ll) if len(word_descr[0]): ll = QLabel(word_descr[0]) ll.setProperty("morphology", word_descr[1]) ll.setAlignment(Qt.AlignRight) self.popup_vbox.addWidget(ll) # delimiter between dictionaries if translation_function_name_i + \ 1 < len(config.translation_function_names): ll = QLabel() ll.setObjectName("delimiter") self.popup_vbox.addWidget(ll) self.popup_inner.adjustSize() self.popup.adjustSize() w = self.popup.geometry().width() h = self.popup.geometry().height() if w > config.screen_width: w = config.screen_width - 20 if not is_line: if w < config.screen_width / 3: w = config.screen_width / 3 if x_cursor_pos == -1: x = (config.screen_width / 2) - (w / 2) else: x = x_cursor_pos - w / 5 if x + w > config.screen_width: x = config.screen_width - w if config.subs_top_placement_B: y = self.subtitles.height + config.subs_screen_edge_padding else: y = config.screen_height - config.subs_screen_edge_padding - self.subtitles.height - h self.popup.setGeometry(x, y, w, 0) self.popup.show() QApplication.restoreOverrideCursor()
iotcore.py
# Copyright 2021 Amazon.com. # SPDX-License-Identifier: MIT from threading import Thread from typing import Callable import awsiot.greengrasscoreipc.client as client from awsiot.greengrasscoreipc import connect from awsiot.greengrasscoreipc.model import ( QOS, PublishToIoTCoreRequest, SubscribeToIoTCoreRequest, IoTCoreMessage ) import concurrent.futures from . import BaseClient class _MqttSubscribeHandler(client.SubscribeToIoTCoreStreamHandler): def __init__(self, handler: Callable[[str, bytes], None], error_handler: Callable[[Exception], None] ): self._handler = handler self._error_handler = error_handler def on_stream_event(self, event: IoTCoreMessage) -> None: msg = event.message t = Thread(target=self._handler, args=[msg.topic_name, msg.payload]) t.start() def on_stream_error(self, error: Exception)-> bool: t = Thread(target=self._error_handler, args=[error]) t.start() return True def on_stream_closed(self) -> None: pass class Client(BaseClient): def publish_async(self, topic: str, message: bytes, qos: QOS) -> concurrent.futures.Future: """Returns a Future Publishes a message on a topic with the given qos. """ request = PublishToIoTCoreRequest() request.topic_name = topic request.payload = message request.qos = qos operation = self._ipc_client.new_publish_to_iot_core() operation.activate(request) future = operation.get_response() return future def publish(self, topic: str, message: bytes, qos: QOS): """Publishes a message synchronously to AWS IoT Core via Greengrass connection Throws an exception if the publish fails """ try: future = self.publish_async(topic, message, qos) future.result(self._timeout) except Exception as ex: raise ex def subscribe_async(self, topic: str, qos: QOS, handler: Callable[[str, bytes], None], error_handler: Callable[[Exception], None]) -> concurrent.futures.Future: """Subscribes to a topic asynchronously with a given QoS. All received messages are sent to the handler. Unhandled exceptions in the handler code will be sent to the error_handler """ request = SubscribeToIoTCoreRequest() request.topic_name = topic request.qos = qos _handler = _MqttSubscribeHandler(handler, error_handler) operation = self._ipc_client.new_subscribe_to_iot_core(_handler) operation.activate(request) future = operation.get_response() return future def subscribe(self, topic: str, qos: QOS, handler: Callable[[str, bytes], None]): """ Subscribes to a topic asynchronously with a given QoS. All received messages are sent to the handler. Throws an exception if there are unhandled exceptions in the handler code. """ try: future = self.subscribe_async(topic, qos, handler, self._sync_error_handler) future.result(self._timeout) except Exception as ex: raise ex
minion.py
# -*- coding: utf-8 -*- ''' Routines to set up a minion ''' # Import python libs from __future__ import print_function import copy import errno import fnmatch import hashlib import logging import multiprocessing import os import re import salt import signal import sys import threading import time import traceback import types from random import randint, shuffle # Import third party libs try: import zmq HAS_ZMQ = True except ImportError: # Running in local, zmq not needed HAS_ZMQ = False HAS_RANGE = False try: import seco.range HAS_RANGE = True except ImportError: pass HAS_PSUTIL = False try: import psutil HAS_PSUTIL = True except ImportError: pass HAS_RESOURCE = False try: import resource HAS_RESOURCE = True except ImportError: pass # Import salt libs from salt.exceptions import ( AuthenticationError, CommandExecutionError, CommandNotFoundError, SaltInvocationError, SaltReqTimeoutError, SaltClientError, SaltSystemExit ) import salt.client import salt.crypt import salt.loader import salt.payload import salt.utils import salt.utils.args import salt.utils.event import salt.utils.schedule from salt._compat import string_types from salt.utils.debug import enable_sigusr1_handler from salt.utils.event import tagify import salt.syspaths log = logging.getLogger(__name__) # To set up a minion: # 1. Read in the configuration # 2. Generate the function mapping dict # 3. Authenticate with the master # 4. Store the AES key # 5. Connect to the publisher # 6. Handle publications def resolve_dns(opts): ''' Resolves the master_ip and master_uri options ''' ret = {} check_dns = True if opts.get('file_client', 'remote') == 'local' and check_dns: check_dns = False if check_dns is True: # Because I import salt.log below I need to re-import salt.utils here import salt.utils try: ret['master_ip'] = \ salt.utils.dns_check(opts['master'], True, opts['ipv6']) except SaltClientError: if opts['retry_dns']: while True: import salt.log msg = ('Master hostname: {0} not found. Retrying in {1} ' 'seconds').format(opts['master'], opts['retry_dns']) if salt.log.is_console_configured(): log.warn(msg) else: print('WARNING: {0}'.format(msg)) time.sleep(opts['retry_dns']) try: ret['master_ip'] = salt.utils.dns_check( opts['master'], True, opts['ipv6'] ) break except SaltClientError: pass else: ret['master_ip'] = '127.0.0.1' except SaltSystemExit: err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format( opts.get('master', 'Unknown')) log.error(err) raise SaltSystemExit(code=42, msg=err) else: ret['master_ip'] = '127.0.0.1' if 'master_ip' in ret and 'master_ip' in opts: if ret['master_ip'] != opts['master_ip']: log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'], ret['master_ip']) ) ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'], port=opts['master_port']) return ret def get_proc_dir(cachedir): ''' Given the cache directory, return the directory that process data is stored in, creating it if it doesn't exist. ''' fn_ = os.path.join(cachedir, 'proc') if not os.path.isdir(fn_): # proc_dir is not present, create it os.makedirs(fn_) return fn_ def parse_args_and_kwargs(func, args, data=None): ''' Wrap load_args_and_kwargs ''' salt.utils.warn_until( 'Boron', 'salt.minion.parse_args_and_kwargs() has been renamed to ' 'salt.minion.load_args_and_kwargs(). Please change this function call ' 'before the Boron release of Salt.' ) return load_args_and_kwargs(func, args, data=data) def load_args_and_kwargs(func, args, data=None): ''' Detect the args and kwargs that need to be passed to a function call, and check them against what was passed. ''' argspec = salt.utils.get_function_argspec(func) _args = [] _kwargs = {} invalid_kwargs = [] for arg in args: if isinstance(arg, string_types): string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632 if string_arg: # Don't append the version that was just derived from parse_cli # above, that would result in a 2nd call to # salt.utils.cli.yamlify_arg(), which could mangle the input. _args.append(arg) elif string_kwarg: salt.utils.warn_until( 'Boron', 'The list of function args and kwargs should be parsed ' 'by salt.utils.args.parse_input() before calling ' 'salt.minion.load_args_and_kwargs().' ) if argspec.keywords or string_kwarg.keys()[0] in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs.update(string_kwarg) else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. invalid_kwargs.append('{0}'.format(arg)) continue # if the arg is a dict with __kwarg__ == True, then its a kwarg elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True: for key, val in arg.iteritems(): if argspec.keywords or key in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs[key] = val else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. invalid_kwargs.append('{0}'.format(arg)) continue else: _args.append(arg) if invalid_kwargs: raise SaltInvocationError( 'The following keyword arguments are not valid: {0}' .format(', '.join(invalid_kwargs)) ) if argspec.keywords and isinstance(data, dict): # this function accepts **kwargs, pack in the publish data for key, val in data.items(): _kwargs['__pub_{0}'.format(key)] = val return _args, _kwargs class SMinion(object): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def __init__(self, opts): # Late setup of the opts grains, so we can log from the grains module opts['grains'] = salt.loader.grains(opts) self.opts = opts # Clean out the proc directory (default /var/cache/salt/minion/proc) if self.opts.get('file_client', 'remote') == 'remote': if isinstance(self.opts['master'], list): masters = self.opts['master'] if self.opts['random_master'] is True: shuffle(masters) self.opts['_safe_auth'] = False for master in masters: self.opts['master'] = master self.opts.update(resolve_dns(opts)) try: self.gen_modules() break except SaltClientError: log.warning(('Attempted to authenticate with master ' '{0} and failed'.format(master))) continue else: if self.opts['random_master'] is True: log.warning('random_master is True but there is only one master specified. Ignoring.') self.opts.update(resolve_dns(opts)) self.gen_modules() else: self.gen_modules() def gen_modules(self): ''' Load all of the modules for the minion ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['environment'], ).compile_pillar() self.functions = salt.loader.minion_mods(self.opts) self.returners = salt.loader.returners(self.opts, self.functions) self.states = salt.loader.states(self.opts, self.functions) self.rend = salt.loader.render(self.opts, self.functions) self.matcher = Matcher(self.opts, self.functions) self.functions['sys.reload_modules'] = self.gen_modules class MinionBase(object): def __init__(self, opts): self.opts = opts def _init_context_and_poller(self): self.context = zmq.Context() self.poller = zmq.Poller() def _prepare_minion_event_system(self): # Prepare the minion event system # # Start with the publish socket self._init_context_and_poller() hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5')) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(self.opts['id']).hexdigest()[:10] epub_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash) ) if os.path.exists(epub_sock_path): os.unlink(epub_sock_path) epull_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pull.ipc'.format(id_hash) ) if os.path.exists(epull_sock_path): os.unlink(epull_sock_path) self.epub_sock = self.context.socket(zmq.PUB) if self.opts.get('ipc_mode', '') == 'tcp': epub_uri = 'tcp://127.0.0.1:{0}'.format( self.opts['tcp_pub_port'] ) epull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts['tcp_pull_port'] ) else: epub_uri = 'ipc://{0}'.format(epub_sock_path) salt.utils.check_ipc_path_max_len(epub_uri) epull_uri = 'ipc://{0}'.format(epull_sock_path) salt.utils.check_ipc_path_max_len(epull_uri) log.debug( '{0} PUB socket URI: {1}'.format( self.__class__.__name__, epub_uri ) ) log.debug( '{0} PULL socket URI: {1}'.format( self.__class__.__name__, epull_uri ) ) # Check to make sure the sock_dir is available, create if not default_minion_sock_dir = os.path.join( salt.syspaths.SOCK_DIR, 'minion' ) minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir) if not os.path.isdir(minion_sock_dir): # Let's try to create the directory defined on the configuration # file try: os.makedirs(minion_sock_dir, 0755) except OSError as exc: log.error('Could not create SOCK_DIR: {0}'.format(exc)) # Let's not fail yet and try using the default path if minion_sock_dir == default_minion_sock_dir: # We're already trying the default system path, stop now! raise if not os.path.isdir(default_minion_sock_dir): try: os.makedirs(default_minion_sock_dir, 0755) except OSError as exc: log.error('Could not create SOCK_DIR: {0}'.format(exc)) # Let's stop at this stage raise # Create the pull socket self.epull_sock = self.context.socket(zmq.PULL) # Securely bind the event sockets if self.opts.get('ipc_mode', '') != 'tcp': old_umask = os.umask(0177) try: log.info('Starting pub socket on {0}'.format(epub_uri)) self.epub_sock.bind(epub_uri) log.info('Starting pull socket on {0}'.format(epull_uri)) self.epull_sock.bind(epull_uri) finally: if self.opts.get('ipc_mode', '') != 'tcp': os.umask(old_umask) @staticmethod def process_schedule(minion, loop_interval): try: minion.schedule.eval() # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error( 'Exception {0} occurred in scheduled job'.format(exc) ) return loop_interval class MasterMinion(object): ''' Create a fully loaded minion function object for generic use on the master. What makes this class different is that the pillar is omitted, otherwise everything else is loaded cleanly. ''' def __init__( self, opts, returners=True, states=True, rend=True, matcher=True, whitelist=None): self.opts = salt.config.minion_config(opts['conf_file']) self.opts.update(opts) self.whitelist = whitelist self.opts['grains'] = salt.loader.grains(opts) self.opts['pillar'] = {} self.mk_returners = returners self.mk_states = states self.mk_rend = rend self.mk_matcher = matcher self.gen_modules() def gen_modules(self): ''' Load all of the modules for the minion ''' self.functions = salt.loader.minion_mods( self.opts, whitelist=self.whitelist) if self.mk_returners: self.returners = salt.loader.returners(self.opts, self.functions) if self.mk_states: self.states = salt.loader.states(self.opts, self.functions) if self.mk_rend: self.rend = salt.loader.render(self.opts, self.functions) if self.mk_matcher: self.matcher = Matcher(self.opts, self.functions) self.functions['sys.reload_modules'] = self.gen_modules class MultiMinion(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' def __init__(self, opts): super(MultiMinion, self).__init__(opts) def _gen_minions(self): ''' Set up and tune in the minion options ''' if not isinstance(self.opts['master'], list): log.error( 'Attempting to start a multimaster system with one master') return False minions = [] for master in set(self.opts['master']): s_opts = copy.copy(self.opts) s_opts['master'] = master try: minions.append(Minion(s_opts, 5, False)) except SaltClientError: minions.append(s_opts) return minions def minions(self): ''' Return a list of minion generators bound to the tune_in method ''' ret = {} minions = self._gen_minions() for minion in minions: if isinstance(minion, dict): ret[minion['master']] = minion else: ret[minion.opts['master']] = { 'minion': minion, 'generator': minion.tune_in_no_block()} return ret # Multi Master Tune In def tune_in(self): ''' Bind to the masters ''' self._prepare_minion_event_system() self.poller.register(self.epull_sock, zmq.POLLIN) module_refresh = False pillar_refresh = False # Prepare the minion generators minions = self.minions() loop_interval = int(self.opts['loop_interval']) last = time.time() auth_wait = self.opts['acceptance_wait_time'] max_wait = auth_wait * 6 while True: for minion in minions.values(): if isinstance(minion, dict): continue if not hasattr(minion, 'schedule'): continue loop_interval = self.process_schedule(minion, loop_interval) socks = dict(self.poller.poll(1)) if socks.get(self.epull_sock) == zmq.POLLIN: try: while True: package = self.epull_sock.recv(zmq.NOBLOCK) if package.startswith('module_refresh'): module_refresh = True elif package.startswith('pillar_refresh'): pillar_refresh = True elif package.startswith('fire_master'): tag, data = salt.utils.event.MinionEvent.unpack(package) log.debug('Forwarding master event tag={tag}'.format(tag=data['tag'])) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) self.epub_sock.send(package) except Exception: pass # get commands from each master for master, minion in minions.items(): if 'generator' not in minion: if time.time() - auth_wait > last: last = time.time() if auth_wait < max_wait: auth_wait += auth_wait try: if not isinstance(minion, dict): minions[master] = {'minion': minion} t_minion = Minion(minion, 5, False) minions[master]['minion'] = t_minion minions[master]['generator'] = t_minion.tune_in_no_block() auth_wait = self.opts['acceptance_wait_time'] except SaltClientError: continue else: continue if module_refresh: minion['minion'].module_refresh() if pillar_refresh: minion['minion'].pillar_refresh() minion['generator'].next() class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True): ''' Pass in the options dict ''' self._running = None # Warn if ZMQ < 3.2 if HAS_ZMQ and (not(hasattr(zmq, 'zmq_version_info')) or zmq.zmq_version_info() < (3, 2)): # PyZMQ 2.1.9 does not have zmq_version_info log.warning('You have a version of ZMQ less than ZMQ 3.2! There ' 'are known connection keep-alive issues with ZMQ < ' '3.2 which may result in loss of contact with ' 'minions. Please upgrade your ZMQ!') # Late setup the of the opts grains, so we can log from the grains # module opts['grains'] = salt.loader.grains(opts) # if master_type was changed, we might want to load our # master-variable from a user defined modules function if opts['master_type'] != 'str': # check for a valid keyword if opts['master_type'] == 'func': # split module and function and try loading the module mod, fun = opts['master'].split('.') try: master_mod = salt.loader.raw_mod(opts, mod, fun) if not master_mod: raise TypeError # we take whatever the module returns as master address opts['master'] = master_mod[mod + '.' + fun]() except TypeError: msg = ('Failed to evaluate master address from ' 'module \'{0}\''.format(opts['master'])) log.error(msg) sys.exit(1) log.info('Evaluated master from module: {0}'.format(opts['master'])) else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(1) opts.update(resolve_dns(opts)) super(Minion, self).__init__(opts) self.authenticate(timeout, safe) self.opts['pillar'] = salt.pillar.get_pillar( opts, opts['grains'], opts['id'], opts['environment'], ).compile_pillar() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() self.functions, self.returners = self._load_modules() self.matcher = Matcher(self.opts, self.functions) self.proc_dir = get_proc_dir(opts['cachedir']) self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners) self.grains_cache = self.opts['grains'] if 'proxy' in self.opts['pillar']: log.debug('I am {0} and I need to start some proxies for {0}'.format(self.opts['id'], self.opts['pillar']['proxy'])) for p in self.opts['pillar']['proxy']: log.debug('Starting {0} proxy.'.format(p)) pid = os.fork() if pid > 0: continue else: proxyminion = salt.ProxyMinion() proxyminion.start(self.opts['pillar']['proxy'][p]) self.clean_die(signal.SIGTERM, None) else: log.debug('I am {0} and I am not supposed to start any proxies. ' '(Likely not a problem)'.format(self.opts['id'])) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in self.opts.items(): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self): ''' Return the functions and the returners loaded up from the loader module ''' # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory'])) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).get_memory_info() mem_limit = rss + vms + self.opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif self.opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') self.opts['grains'] = salt.loader.grains(self.opts) functions = salt.loader.minion_mods(self.opts) returners = salt.loader.returners(self.opts, functions) # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) return functions, returners def _fire_master(self, data=None, tag=None, events=None, pretag=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag else: return sreq = salt.payload.SREQ(self.opts['master_uri']) try: result = sreq.send('aes', self.crypticle.dumps(load)) try: data = self.crypticle.loads(result) except AuthenticationError: log.info("AES key changed, re-authenticating") # We can't decode the master's response to our event, # so we will need to re-authenticate. self.authenticate() except SaltReqTimeoutError: log.info("Master failed to respond. Preforming re-authenticating") self.authenticate() except Exception: log.info("fire_master failed: {0}".format(traceback.format_exc())) def _handle_payload(self, payload): ''' Takes a payload from the master publisher and does whatever the master wants done. ''' {'aes': self._handle_aes, 'pub': self._handle_pub, 'clear': self._handle_clear}[payload['enc']](payload['load'], payload['sig'] if 'sig' in payload else None) def _handle_aes(self, load, sig=None): ''' Takes the AES encrypted load, checks the signature if pub signatures are turned on, decrypts it, and runs the encapsulated instructions ''' # Verify that the signature is valid master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub') if sig and self.functions['config.get']('sign_pub_messages'): if not salt.crypt.verify_signature(master_pubkey_path, load, sig): raise AuthenticationError('Message signature failed to validate.') try: data = self.crypticle.loads(load) except AuthenticationError: # decryption of the payload failed, try to re-auth but wait # random seconds if set in config with random_reauth_delay if 'random_reauth_delay' in self.opts: reauth_delay = randint(0, int(self.opts['random_reauth_delay'])) log.debug('Waiting {0} seconds to re-authenticate'.format(reauth_delay)) time.sleep(reauth_delay) self.authenticate() data = self.crypticle.loads(load) # Verify that the publication is valid if 'tgt' not in data or 'jid' not in data or 'fun' not in data \ or 'arg' not in data: return # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in data: match_func = getattr(self.matcher, '{0}_match'.format(data['tgt_type']), None) if match_func is None or not match_func(data['tgt']): return else: if not self.matcher.glob_match(data['tgt']): return # If the minion does not have the function, don't execute, # this prevents minions that could not load a minion module # from returning a predictable exception #if data['fun'] not in self.functions: # return if 'user' in data: log.info( 'User {0[user]} Executing command {0[fun]} with jid ' '{0[jid]}'.format(data) ) else: log.info( 'Executing command {0[fun]} with jid {0[jid]}'.format(data) ) log.debug('Command details {0}'.format(data)) self._handle_decoded_payload(data) def _handle_pub(self, load): ''' Handle public key payloads ''' pass def _handle_clear(self, load): ''' Handle un-encrypted transmissions ''' pass def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' if isinstance(data['fun'], string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): target = Minion._thread_multi_return else: target = Minion._thread_return # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self if self.opts['multiprocessing']: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None process = multiprocessing.Process( target=target, args=(instance, self.opts, data) ) else: process = threading.Thread( target=target, args=(instance, self.opts, data), name=data['jid'] ) process.start() if not sys.platform.startswith('win'): process.join() @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' # this seems awkward at first, but it's a workaround for Windows # multiprocessing communication. if not minion_instance: minion_instance = cls(opts) fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing']: salt.utils.daemonize_if(opts) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID {0}'.format(sdata['pid'])) with salt.utils.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] if function_name in minion_instance.functions: try: func = minion_instance.functions[data['fun']] args, kwargs = load_args_and_kwargs( func, data['arg'], data) sys.modules[func.__module__].__context__['retcode'] = 0 return_data = func(*args, **kwargs) if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, list): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data ret['retcode'] = sys.modules[func.__module__].__context__.get( 'retcode', 0 ) ret['success'] = True except CommandNotFoundError as exc: msg = 'Command required for {0!r} not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' except CommandExecutionError as exc: log.error( 'A command in {0!r} had a problem: {1}'.format( function_name, exc ), exc_info=log.isEnabledFor(logging.DEBUG) ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' except SaltInvocationError as exc: log.error( 'Problem executing {0!r}: {1}'.format( function_name, exc ), exc_info=log.isEnabledFor(logging.DEBUG) ) ret['return'] = 'ERROR executing {0!r}: {1}'.format( function_name, exc ) ret['out'] = 'nested' except TypeError as exc: trb = traceback.format_exc() aspec = salt.utils.get_function_argspec( minion_instance.functions[data['fun']] ) msg = ('TypeError encountered executing {0}: {1}. See ' 'debug log for more info. Possibly a missing ' 'arguments issue: {2}').format(function_name, exc, aspec) log.warning(msg, exc_info=log.isEnabledFor(logging.DEBUG)) ret['return'] = msg ret['out'] = 'nested' except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info=log.isEnabledFor(logging.DEBUG)) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' else: ret['return'] = '{0!r} is not available.'.format(function_name) ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] minion_instance._return_pub(ret) if data['ret']: ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job {0} {1}'.format( data['jid'], exc ) ) log.error(traceback.format_exc()) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' # this seems awkward at first, but it's a workaround for Windows # multiprocessing communication. if not minion_instance: minion_instance = cls(opts) ret = { 'return': {}, 'success': {}, } for ind in range(0, len(data['fun'])): ret['success'][data['fun'][ind]] = False try: func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) ret['return'][data['fun'][ind]] = func(*args, **kwargs) ret['success'][data['fun'][ind]] = True except Exception as exc: trb = traceback.format_exc() log.warning( 'The minion function caused an exception: {0}'.format( exc ) ) ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] minion_instance._return_pub(ret) if data['ret']: for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job {0} {1}'.format( data['jid'], exc ) ) def _return_pub(self, ret, ret_cmd='_return'): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: {0}'.format(jid)) sreq = salt.payload.SREQ(self.opts['master_uri']) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['id'], 'jid': jid, 'fun': fun, 'load': ret.get('__load__')} load['return'] = {} for key, value in ret.items(): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in ret.items(): load[key] = value if 'out' in ret: if isinstance(ret['out'], string_types): load['out'] = ret['out'] else: log.error('Invalid outputter {0}. This is likely a bug.' .format(ret['out'])) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled fn_ = os.path.join( self.opts['cachedir'], 'minion_jobs', load['jid'], 'return.p') jdir = os.path.dirname(fn_) if not os.path.isdir(jdir): os.makedirs(jdir) salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret)) try: ret_val = sreq.send('aes', self.crypticle.dumps(load)) except SaltReqTimeoutError: msg = ('The minion failed to return the job information for job ' '{0}. This is often due to the master being shut down or ' 'overloaded. If the master is running consider incresing ' 'the worker_threads value.').format(jid) log.warn(msg) return '' if isinstance(ret_val, string_types) and not ret_val: # The master AES key has changed, reauth self.authenticate() ret_val = sreq.send('aes', self.crypticle.dumps(load)) log.trace('ret_val = {0}'.format(ret_val)) return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if not 'schedule' in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _set_tcp_keepalive(self): if hasattr(zmq, 'TCP_KEEPALIVE'): self.socket.setsockopt( zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive'] ) self.socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle'] ) self.socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt'] ) self.socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl'] ) def _set_reconnect_ivl(self): recon_delay = self.opts['recon_default'] if self.opts['recon_randomize']: recon_delay = randint(self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'] ) log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format( self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'], recon_delay) ) log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay)) self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay) def _set_reconnect_ivl_max(self): if hasattr(zmq, 'RECONNECT_IVL_MAX'): log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format( self.opts['recon_default'] + self.opts['recon_max']) ) self.socket.setsockopt( zmq.RECONNECT_IVL_MAX, self.opts['recon_max'] ) def _set_ipv4only(self): if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self.socket.setsockopt(zmq.IPV4ONLY, 0) def _fire_master_minion_start(self): # Send an event to the master that the minion is live self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # dup name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def _setsockopts(self): self.socket.setsockopt(zmq.SUBSCRIBE, '') self.socket.setsockopt(zmq.IDENTITY, self.opts['id']) self._set_ipv4only() self._set_reconnect_ivl_max() self._set_tcp_keepalive() @property def master_pub(self): ''' Return the master publish port ''' return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'], port=self.publish_port) def authenticate(self, timeout=60, safe=True): ''' Authenticate with the master, this method breaks the functional paradigm, it will update the master information from a fresh sign in, signing in can occur as often as needed to keep up with the revolving master AES key. ''' log.debug( 'Attempting to authenticate with the Salt Master at {0}'.format( self.opts['master_ip'] ) ) auth = salt.crypt.Auth(self.opts) self.tok = auth.gen_token('salt') acceptance_wait_time = self.opts['acceptance_wait_time'] acceptance_wait_time_max = self.opts['acceptance_wait_time_max'] if not acceptance_wait_time_max: acceptance_wait_time_max = acceptance_wait_time tries = self.opts.get('auth_tries', 1) safe = self.opts.get('auth_safemode', safe) while True: creds = auth.sign_in(timeout, safe, tries) if creds != 'retry': log.info('Authentication with master successful!') break log.info('Waiting for minion key to be accepted by the master.') if acceptance_wait_time: log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time)) time.sleep(acceptance_wait_time) if acceptance_wait_time < acceptance_wait_time_max: acceptance_wait_time += acceptance_wait_time log.debug('Authentication wait time is {0}'.format(acceptance_wait_time)) self.aes = creds['aes'] if self.opts.get('syndic_master_publish_port'): self.publish_port = self.opts.get('syndic_master_publish_port') else: self.publish_port = creds['publish_port'] self.crypticle = salt.crypt.Crypticle(self.opts, self.aes) def module_refresh(self): ''' Refresh the functions and returners. ''' self.functions, self.returners = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners def pillar_refresh(self): ''' Refresh the pillar ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['environment'], ).compile_pillar() self.module_refresh() def environ_setenv(self, package): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' tag, data = salt.utils.event.MinionEvent.unpack(package) environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def clean_die(self, signum, frame): ''' Python does not handle the SIGTERM cleanly, if it is signaled exit the minion process cleanly ''' self._running = False exit(0) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This {0} was scheduled to stop. Not running ' '{0}.tune_in()'.format(self.__class__.__name__) ) return elif self._running is True: log.error( 'This {0} is already running. Not running ' '{0}.tune_in()'.format(self.__class__.__name__) ) return try: log.info( '{0} is starting as user \'{1}\''.format( self.__class__.__name__, salt.utils.get_user() ) ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting {0}'.format( self.__class__.__name__ ), exc_info=err ) # Main Minion Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() # Properly exit if a SIGTERM is signalled signal.signal(signal.SIGTERM, self.clean_die) log.debug('Minion {0!r} trying to tune in'.format(self.opts['id'])) self._prepare_minion_event_system() self.socket = self.context.socket(zmq.SUB) self._set_reconnect_ivl() self._setsockopts() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) self.poller.register(self.epull_sock, zmq.POLLIN) self._fire_master_minion_start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT salt.utils.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() time.sleep(.5) loop_interval = int(self.opts['loop_interval']) try: if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds! if self.opts['grains_refresh_every'] > 1: log.debug( 'Enabling the grains refresher. Will run every {0} minutes.'.format( self.opts['grains_refresh_every']) ) else: # Clean up minute vs. minutes in log message log.debug( 'Enabling the grains refresher. Will run every {0} minute.'.format( self.opts['grains_refresh_every']) ) self._refresh_grains_watcher( abs(self.opts['grains_refresh_every']) ) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format( exc) ) ping_interval = self.opts.get('ping_interval', 0) * 60 ping_at = None while self._running is True: loop_interval = self.process_schedule(self, loop_interval) try: socks = self._do_poll(loop_interval) if ping_interval > 0: if socks or not ping_at: ping_at = time.time() + ping_interval if ping_at < time.time(): log.debug('Ping master') self._fire_master('ping', 'minion_ping') ping_at = time.time() + ping_interval self._do_socket_recv(socks) # Check the event system if socks.get(self.epull_sock) == zmq.POLLIN: package = self.epull_sock.recv(zmq.NOBLOCK) log.debug('Handling event {0!r}'.format(package)) try: if package.startswith('module_refresh'): self.module_refresh() elif package.startswith('pillar_refresh'): self.pillar_refresh() elif package.startswith('grains_refresh'): if self.grains_cache != self.opts['grains']: self.pillar_refresh() self.grains_cache = self.opts['grains'] elif package.startswith('environ_setenv'): self.environ_setenv(package) elif package.startswith('fire_master'): tag, data = salt.utils.event.MinionEvent.unpack(package) log.debug('Forwarding master event tag={tag}'.format(tag=data['tag'])) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) self.epub_sock.send(package) except Exception: log.debug('Exception while handling events', exc_info=True) # Add an extra fallback in case a forked process leeks through multiprocessing.active_children() except zmq.ZMQError as exc: # The interrupt caused by python handling the # SIGCHLD. Throws this error with errno == EINTR. # Nothing to recieve on the zmq socket throws this error # with EAGAIN. # Both are safe to ignore if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR: log.critical('Unexpected ZMQError while polling minion', exc_info=True) continue except SaltClientError: raise except Exception: log.critical( 'An exception occurred while polling the minion', exc_info=True ) def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' self._pre_tune() self._init_context_and_poller() self.socket = self.context.socket(zmq.SUB) self._setsockopts() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) self._fire_master_minion_start() loop_interval = int(self.opts['loop_interval']) # On first startup execute a state run if configured to do so self._state_run() time.sleep(.5) while self._running is True: try: socks = self._do_poll(loop_interval) self._do_socket_recv(socks) # Check the event system except zmq.ZMQError: # If a zeromq error happens recover yield True except Exception: log.critical( 'An exception occurred while polling the minion', exc_info=True ) yield True def _do_poll(self, loop_interval): log.trace('Check main poller timeout {0}'.format(loop_interval)) return dict(self.poller.poll( loop_interval * 1000) ) def _do_socket_recv(self, socks): if socks.get(self.socket) == zmq.POLLIN: payload = self.serial.loads(self.socket.recv(zmq.NOBLOCK)) log.trace('Handling payload') self._handle_payload(payload) def destroy(self): ''' Tear down the minion ''' self._running = False if getattr(self, 'poller', None) is not None: if isinstance(self.poller.sockets, dict): for socket in self.poller.sockets.keys(): if socket.closed is False: socket.close() self.poller.unregister(socket) else: for socket in self.poller.sockets: if socket[0].closed is False: socket[0].close() self.poller.unregister(socket[0]) if hasattr(self, 'epub_sock') and self.epub_sock.closed is False: self.epub_sock.close() if hasattr(self, 'epull_sock') and self.epull_sock.closed is False: self.epull_sock.close() if hasattr(self, 'socket') and self.socket.closed is False: self.socket.close() if hasattr(self, 'context') and self.context.closed is False: self.context.term() def __del__(self): self.destroy() class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts): self._syndic_interface = opts.get('interface') self._syndic = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) def _handle_aes(self, load, sig=None): ''' Takes the AES encrypted load, decrypts it, and runs the encapsulated instructions ''' # If the AES authentication has changed, re-authenticate try: data = self.crypticle.loads(load) except AuthenticationError: self.authenticate() data = self.crypticle.loads(load) # Verify that the publication is valid if 'tgt' not in data or 'jid' not in data or 'fun' not in data \ or 'to' not in data or 'arg' not in data: return data['to'] = int(data['to']) - 1 if 'user' in data: log.debug( 'User {0[user]} Executing syndic command {0[fun]} with ' 'jid {0[jid]}'.format( data ) ) else: log.debug( 'Executing syndic command {0[fun]} with jid {0[jid]}'.format( data ) ) log.debug('Command details: {0}'.format(data)) self._handle_decoded_payload(data) def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' self.syndic_cmd(data) def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' # Send out the publication self.local.pub(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to']) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' # Instantiate the local client self.local = salt.client.get_local_client(self.opts['_minion_conf_file']) self.local.event.subscribe('') self.local.opts['interface'] = self._syndic_interface signal.signal(signal.SIGTERM, self.clean_die) log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id'])) self.context = zmq.Context() # Start with the publish socket # Share the poller with the event object self.poller = self.local.event.poller self.socket = self.context.socket(zmq.SUB) self.socket.setsockopt(zmq.SUBSCRIBE, '') self.socket.setsockopt(zmq.IDENTITY, self.opts['id']) self._set_reconnect_ivl_max() self._set_tcp_keepalive() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) # Send an event to the master that the minion is live self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start' ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), ) # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() loop_interval = int(self.opts['loop_interval']) self._reset_event_aggregation() while True: try: # Do all the maths in seconds timeout = loop_interval if self.event_forward_timeout is not None: timeout = min(timeout, self.event_forward_timeout - time.time()) if timeout >= 0: log.trace('Polling timeout: %f', timeout) socks = dict(self.poller.poll(timeout * 1000)) else: # This shouldn't really happen. # But there's no harm being defensive log.warning('Negative timeout in syndic main loop') socks = {} if socks.get(self.socket) == zmq.POLLIN: self._process_cmd_socket() if socks.get(self.local.event.sub) == zmq.POLLIN: self._process_event_socket() if (self.event_forward_timeout is not None and self.event_forward_timeout < time.time()): self._forward_events() # We don't handle ZMQErrors like the other minions # I've put explicit handling around the recieve calls # in the process_*_socket methods. If we see any other # errors they may need some kind of handling so log them # for now. except Exception: log.critical( 'An exception occurred while polling the syndic', exc_info=True ) def _process_cmd_socket(self): try: payload = self.serial.loads(self.socket.recv(zmq.NOBLOCK)) except zmq.ZMQError as e: # Swallow errors for bad wakeups or signals needing processing if e.errno != errno.EAGAIN and e.errno != errno.EINTR: raise log.trace('Handling payload') self._handle_payload(payload) def _reset_event_aggregation(self): self.jids = {} self.raw_events = [] self.event_forward_timeout = None def _process_event_socket(self): tout = time.time() + self.opts['syndic_max_event_process_time'] while tout > time.time(): try: event = self.local.event.get_event_noblock() except zmq.ZMQError as e: # EAGAIN indicates no more events at the moment # EINTR some kind of signal maybe someone trying # to get us to quit so escape our timeout if e.errno == errno.EAGAIN or e.errno == errno.EINTR: break raise log.trace('Got event {0}'.format(event['tag'])) if self.event_forward_timeout is None: self.event_forward_timeout = ( time.time() + self.opts['syndic_event_forward_timeout'] ) if salt.utils.is_jid(event['tag']) and 'return' in event['data']: if not 'jid' in event['data']: # Not a job return continue jdict = self.jids.setdefault(event['tag'], {}) if not jdict: jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} fstr = '{0}.get_jid'.format(self.opts['master_job_cache']) jdict['__load__'].update( self.mminion.returners[fstr](event['data']['jid']) ) jdict[event['data']['id']] = event['data']['return'] else: # Add generic event aggregation here if not 'retcode' in event['data']: self.raw_events.append(event) def _forward_events(self): log.trace('Forwarding events') if self.raw_events: self._fire_master(events=self.raw_events, pretag=tagify(self.opts['id'], base='syndic'), ) for jid in self.jids: self._return_pub(self.jids[jid], '_syndic_return') self._reset_event_aggregation() def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. self.poller = None super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local class Matcher(object): ''' Use to return the value for matching calls from the master ''' def __init__(self, opts, functions=None): self.opts = opts if functions is None: functions = salt.loader.minion_mods(self.opts) self.functions = functions def confirm_top(self, match, data, nodegroups=None): ''' Takes the data passed to a top file environment and determines if the data matches this minion ''' matcher = 'compound' if not data: log.error('Received bad data when setting the match from the top ' 'file') return False for item in data: if isinstance(item, dict): if 'match' in item: matcher = item['match'] if hasattr(self, matcher + '_match'): funcname = '{0}_match'.format(matcher) if matcher == 'nodegroup': return getattr(self, funcname)(match, nodegroups) return getattr(self, funcname)(match) else: log.error('Attempting to match with unknown matcher: {0}'.format( matcher )) return False def glob_match(self, tgt): ''' Returns true if the passed glob matches the id ''' if type(tgt) != str: return False return fnmatch.fnmatch(self.opts['id'], tgt) def pcre_match(self, tgt): ''' Returns true if the passed pcre regex matches ''' return bool(re.match(tgt, self.opts['id'])) def list_match(self, tgt): ''' Determines if this host is on the list ''' if isinstance(tgt, string_types): tgt = tgt.split(',') return bool(self.opts['id'] in tgt) def grain_match(self, tgt, delim=':'): ''' Reads in the grains glob match ''' log.debug('grains target: {0}'.format(tgt)) if delim not in tgt: log.error('Got insufficient arguments for grains match ' 'statement from master') return False return salt.utils.subdict_match(self.opts['grains'], tgt, delim=delim) def grain_pcre_match(self, tgt, delim=':'): ''' Matches a grain based on regex ''' log.debug('grains pcre target: {0}'.format(tgt)) if delim not in tgt: log.error('Got insufficient arguments for grains pcre match ' 'statement from master') return False return salt.utils.subdict_match(self.opts['grains'], tgt, delim=delim, regex_match=True) def data_match(self, tgt): ''' Match based on the local data store on the minion ''' comps = tgt.split(':') if len(comps) < 2: return False val = self.functions['data.getval'](comps[0]) if val is None: # The value is not defined return False if isinstance(val, list): # We are matching a single component to a single list member for member in val: if fnmatch.fnmatch(str(member).lower(), comps[1].lower()): return True return False if isinstance(val, dict): if comps[1] in val: return True return False return bool(fnmatch.fnmatch( val, comps[1], )) def pillar_match(self, tgt, delim=':'): ''' Reads in the pillar glob match ''' log.debug('pillar target: {0}'.format(tgt)) if delim not in tgt: log.error('Got insufficient arguments for pillar match ' 'statement from master') return False return salt.utils.subdict_match(self.opts['pillar'], tgt, delim=delim) def ipcidr_match(self, tgt): ''' Matches based on ip address or CIDR notation ''' num_parts = len(tgt.split('/')) if num_parts > 2: # Target is not valid CIDR return False elif num_parts == 2: # Target is CIDR return salt.utils.network.in_subnet( tgt, addrs=self.opts['grains'].get('ipv4', []) ) else: # Target is an IPv4 address import socket try: socket.inet_aton(tgt) except socket.error: # Not a valid IPv4 address return False else: return tgt in self.opts['grains'].get('ipv4', []) def range_match(self, tgt): ''' Matches based on range cluster ''' if HAS_RANGE: range_ = seco.range.Range(self.opts['range_server']) try: return self.opts['grains']['fqdn'] in range_.expand(tgt) except seco.range.RangeException as exc: log.debug('Range exception in compound match: {0}'.format(exc)) return False return False def compound_match(self, tgt): ''' Runs the compound target check ''' if not isinstance(tgt, string_types): log.debug('Compound target received that is not a string') return False ref = {'G': 'grain', 'P': 'grain_pcre', 'I': 'pillar', 'L': 'list', 'S': 'ipcidr', 'E': 'pcre'} if HAS_RANGE: ref['R'] = 'range' results = [] opers = ['and', 'or', 'not', '(', ')'] tokens = tgt.split() for match in tokens: # Try to match tokens from the compound target, first by using # the 'G, X, I, L, S, E' matcher types, then by hostname glob. if '@' in match and match[1] == '@': comps = match.split('@') matcher = ref.get(comps[0]) if not matcher: # If an unknown matcher is called at any time, fail out return False results.append( str( getattr(self, '{0}_match'.format(matcher))( '@'.join(comps[1:]) ) ) ) elif match in opers: # We didn't match a target, so append a boolean operator or # subexpression if results or match in ['(', ')']: if match == 'not': if results[-1] == 'and': pass elif results[-1] == 'or': pass else: results.append('and') results.append(match) else: # seq start with oper, fail if match not in ['(', ')']: return False else: # The match is not explicitly defined, evaluate it as a glob results.append(str(self.glob_match(match))) results = ' '.join(results) try: return eval(results) # pylint: disable=W0123 except Exception: log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results)) return False return False def nodegroup_match(self, tgt, nodegroups): ''' This is a compatibility matcher and is NOT called when using nodegroups for remote execution, but is called when the nodegroups matcher is used in states ''' if tgt in nodegroups: return self.compound_match( salt.utils.minions.nodegroup_comp(tgt, nodegroups) ) return False class ProxyMinion(Minion): ''' This class instantiates a 'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231 ''' Pass in the options dict ''' self._running = None # Warn if ZMQ < 3.2 if HAS_ZMQ and (not(hasattr(zmq, 'zmq_version_info')) or zmq.zmq_version_info() < (3, 2)): # PyZMQ 2.1.9 does not have zmq_version_info log.warning('You have a version of ZMQ less than ZMQ 3.2! There ' 'are known connection keep-alive issues with ZMQ < ' '3.2 which may result in loss of contact with ' 'minions. Please upgrade your ZMQ!') # Late setup the of the opts grains, so we can log from the grains # module # print opts['proxymodule'] fq_proxyname = 'proxy.'+opts['proxy']['proxytype'] self.proxymodule = salt.loader.proxy(opts, fq_proxyname) opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy']) opts['id'] = opts['proxyobject'].id(opts) opts.update(resolve_dns(opts)) self.opts = opts self.authenticate(timeout, safe) self.opts['pillar'] = salt.pillar.get_pillar( opts, opts['grains'], opts['id'], opts['environment'], ).compile_pillar() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() self.functions, self.returners = self._load_modules() self.matcher = Matcher(self.opts, self.functions) self.proc_dir = get_proc_dir(opts['cachedir']) self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners) self.grains_cache = self.opts['grains'] # self._running = True def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' return super(ProxyMinion, self)._prep_mod_opts() def _load_modules(self): ''' Return the functions and the returners loaded up from the loader module ''' return super(ProxyMinion, self)._load_modules()
test_forward.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=import-self, invalid-name, unused-argument """ Tensorflow testcases ==================== This article is a test script to test tensorflow operator with Relay. """ from __future__ import print_function import threading import numpy as np import pytest try: import tensorflow.compat.v1 as tf except ImportError: import tensorflow as tf from tensorflow.python.framework import constant_op from tensorflow.python.framework import graph_util from tensorflow.python.ops import nn_ops from tensorflow.python.ops import nn from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.ops import init_ops from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.framework import dtypes from tensorflow.python.ops import gen_functional_ops from distutils.version import LooseVersion import tvm from tvm import te from tvm import relay import tvm.relay.testing.tf as tf_testing from tvm.runtime.vm import VirtualMachine from packaging import version as package_version import tvm.testing ####################################################################### # Generic run functions for TVM & tensorflow # ------------------------------------------ def convert_to_list(x): if not isinstance(x, list): x = [x] return x tf_dtypes = { "float32": tf.float32, "float16": tf.float16, "float64": tf.float64, "int32": tf.int32, "uint8": tf.uint8, "int8": tf.int8, "int16": tf.int16, "uint16": tf.uint16, "int64": tf.int64, } def vmobj_to_list(o): if isinstance(o, tvm.nd.NDArray): return [o.asnumpy()] elif isinstance(o, tvm.runtime.container.ADT): result = [] for f in o: result.extend(vmobj_to_list(f)) return result elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue): if o.constructor.name_hint == "Cons": tl = vmobj_to_list(o.fields[1]) hd = vmobj_to_list(o.fields[0]) hd.extend(tl) return hd elif o.constructor.name_hint == "Nil": return [] elif "tensor_nil" in o.constructor.name_hint: return [0] elif "tensor" in o.constructor.name_hint: return [o.fields[0].asnumpy()] else: raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint) else: raise RuntimeError("Unknown object type: %s" % type(o)) def run_tvm_graph( graph_def, input_data, input_node, num_output=1, target="llvm", out_names=None, opt_level=3, mode="graph_runtime", cuda_layout="NCHW", layout=None, disabled_pass=None, ignore_in_shape=False, serialize=False, ): """ Generic function to compile on relay and execute on tvm """ input_data = convert_to_list(input_data) input_node = convert_to_list(input_node) if target == "cuda": layout = cuda_layout target_host = None if ignore_in_shape: shape_dict = None else: shape_dict = { e: i.shape if hasattr(i, "shape") else () for e, i in zip(input_node, input_data) } mod, params = relay.frontend.from_tensorflow( graph_def, layout=layout, shape=shape_dict, outputs=out_names ) ctx = tvm.context(target, 0) if mode == "debug": ex = relay.create_executor(mode, mod=mod, ctx=tvm.cpu(), target="llvm") inputs = [] for param in mod["main"].params: found = False for i, n in enumerate(input_node): if n == param.name_hint: found = True inputs.append(tvm.nd.array(input_data[i])) break # Interpreter doesn't bind constants, so still need to find in params if not found: inputs.append(tvm.nd.array(params[param.name_hint])) result = ex.evaluate()(*inputs) return vmobj_to_list(result) elif mode == "vm": with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass): print(mod["main"]) mod = relay.transform.InferType()(mod) vm_exec = relay.vm.compile(mod, target="llvm", params=params) if serialize: code, lib = vm_exec.save() vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib) vm = VirtualMachine(vm_exec, tvm.cpu()) inputs = {} for e, i in zip(input_node, input_data): inputs[e] = tvm.nd.array(i) result = vm.invoke("main", **inputs) return vmobj_to_list(result) else: with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass): graph, lib, params = relay.build(mod, target, target_host, params) from tvm.contrib import graph_runtime m = graph_runtime.create(graph, lib, ctx) # set inputs for e, i in zip(input_node, input_data): if e != "": m.set_input(e, tvm.nd.array(i)) m.set_input(**params) # execute m.run() # get outputs assert out_names is None or num_output == len( out_names ), "out_names: {} num_output: {}".format(out_names, num_output) tvm_output_list = [m.get_output(i).asnumpy() for i in range(num_output)] return tvm_output_list def run_tf_graph(sess, input_data, input_node, output_node): """ Generic function to execute tensorflow """ input_data = convert_to_list(input_data) input_node = convert_to_list(input_node) output_node = convert_to_list(output_node) tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node] input_dict = {e: input_data[i] for i, e in enumerate(input_node)} if len(input_node) == 1 and input_node[0] == "": output_data = sess.run(tensor) else: output_data = sess.run(tensor, input_dict) return output_data def compare_tf_with_tvm( in_data, in_name, out_name, init_global_variables=False, no_gpu=False, opt_level=3, mode="graph_runtime", cuda_layout="NCHW", add_shapes_to_graph_def=True, ): """Generic function to generate and compare tensorflow and TVM output""" def name_without_num(name): return name.split(":")[0] if ":" in name else name out_name = convert_to_list(out_name) out_node = [name_without_num(name) for name in out_name] in_data = convert_to_list(in_data) in_name = convert_to_list(in_name) in_node = [name_without_num(name) for name in in_name] with tf.Session() as sess: if init_global_variables: sess.run(variables.global_variables_initializer()) final_graph_def = ( tf_testing.AddShapesToGraphDef(sess, out_node) if add_shapes_to_graph_def else tf.get_default_graph().as_graph_def() ) tf_output = run_tf_graph(sess, in_data, in_name, out_name) for device in ["llvm", "cuda"]: ctx = tvm.context(device, 0) if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) continue if no_gpu and device == "cuda": continue tvm_output = run_tvm_graph( final_graph_def, in_data, in_node, target=device, out_names=out_name, num_output=len(out_name), opt_level=opt_level, mode=mode, cuda_layout=cuda_layout, ) # since the names from tensorflow and relay runs are not exactly same, # first len(tf_output) will be compared for i in range(len(tf_output)): if not isinstance(tf_output[i], np.ndarray): assert len(tvm_output[i].shape) == 0 tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5) sess.close() def is_gpu_available(): from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() gpu_list = [x.name for x in local_device_protos if x.device_type == "GPU"] if len(gpu_list) > 0: print("Tensorflow GPU:", gpu_list) return True else: return False ####################################################################### # Pooling # ------- def _test_pooling_iteration(input_shape, **kwargs): """ One iteration of pool operation with given shapes and attributes """ x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1 with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=input_shape, dtype="float32") nn_ops.pool(in_data, **kwargs) if kwargs["pooling_type"] == "MAX": out_name = "max_pool:0" else: out_name = "avg_pool:0" compare_tf_with_tvm(x, "Placeholder:0", out_name) def _test_pooling(input_shape, **kwargs): _test_pooling_iteration(input_shape, **kwargs) if is_gpu_available(): if len(input_shape) == 4: input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)] kwargs["data_format"] = "NCHW" _test_pooling_iteration(input_shape, **kwargs) @tvm.testing.uses_gpu def test_forward_pooling(): """ Pooling """ # TensorFlow only supports NDHWC for max_pool3d on CPU for pool_type in ["AVG", "MAX"]: # NDHWC is the default layout for max_pool3d and avg_pool3d in TensorFlow _test_pooling( input_shape=[1, 3, 32, 32, 32], window_shape=[2, 2, 2], padding="VALID", pooling_type=pool_type, dilation_rate=[1, 1, 1], strides=[2, 2, 2], ) _test_pooling( input_shape=[1, 3, 32, 32, 32], window_shape=[1, 1, 1], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1, 1], strides=[1, 1, 1], ) _test_pooling( input_shape=[1, 3, 32, 32, 32], window_shape=[2, 2, 2], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1, 1], strides=[2, 2, 2], ) # test cases for max_pool3d & avg_pool3d with layout NCDHW # TensorFlow pool3d doesn't support NCDHW on cpu if is_gpu_available(): _test_pooling( input_shape=[1, 3, 32, 32, 32], window_shape=[1, 1, 1], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1, 1], strides=[1, 1, 1], data_format="NCDHW", ) _test_pooling( input_shape=[1, 3, 32, 32, 32], window_shape=[2, 2, 2], padding="VALID", pooling_type=pool_type, dilation_rate=[1, 1, 1], strides=[2, 2, 2], data_format="NCDHW", ) _test_pooling( input_shape=[2, 9, 10, 2], window_shape=[1, 1], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1], strides=[1, 1], ) _test_pooling( input_shape=[2, 10, 9, 2], window_shape=[1, 1], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1], strides=[1, 1], ) _test_pooling( input_shape=[2, 9, 10, 2], window_shape=[2, 1], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1], strides=[1, 1], ) _test_pooling( input_shape=[2, 10, 9, 2], window_shape=[2, 3], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1], strides=[2, 1], ) # Tests involving SpaceToBatchND _test_pooling( input_shape=[1, 1, 2, 1], window_shape=[1, 1], padding="VALID", pooling_type=pool_type, dilation_rate=[1, 2], ) _test_pooling( input_shape=[1, 2, 1], window_shape=[1], padding="VALID", pooling_type=pool_type, dilation_rate=[2], ) ####################################################################### # Convolution # ----------- def _test_convolution( opname, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, deconv_output_shape=[], add_shapes_to_graph_def=True, ): """ One iteration of convolution with given shapes and attributes """ total_size_1 = np.prod(tensor_in_sizes) total_size_2 = np.prod(filter_in_sizes) # Initializes the input tensor with array containing incrementing # numbers from 1. data_array = [f * 1.0 for f in range(1, total_size_1 + 1)] filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)] with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32") in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32") if data_format == "NHWC": strides = [1] + strides + [1] dilations = [1] + dilations + [1] else: strides = [1, 1] + strides dilations = [1, 1] + dilations if opname == "conv": nn_ops.conv2d( in_data, in_filter, strides=strides, dilations=dilations, padding=padding, data_format=data_format, ) compare_tf_with_tvm( np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "Conv2D:0", add_shapes_to_graph_def=add_shapes_to_graph_def, ) elif opname == "conv_transpose": nn_ops.conv2d_transpose( in_data, in_filter, output_shape=deconv_output_shape, strides=strides, padding=padding, data_format=data_format, ) compare_tf_with_tvm( np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "conv2d_transpose:0", add_shapes_to_graph_def=add_shapes_to_graph_def, ) else: nn_ops.depthwise_conv2d_native( in_data, in_filter, strides=strides, dilations=dilations, padding=padding, data_format=data_format, ) compare_tf_with_tvm( np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "DepthwiseConv2dNative:0", add_shapes_to_graph_def=add_shapes_to_graph_def, ) @tvm.testing.uses_gpu def test_forward_convolution(): if is_gpu_available(): _test_convolution("conv", [4, 176, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW") _test_convolution("conv", [4, 19, 17, 17], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW") _test_convolution("conv", [4, 124, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW") _test_convolution("conv", [4, 12, 17, 17], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW") _test_convolution( "depthwise", [4, 176, 8, 8], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NCHW" ) _test_convolution( "depthwise", [4, 19, 17, 17], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NCHW" ) _test_convolution( "depthwise", [4, 124, 17, 17], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NCHW" ) _test_convolution( "depthwise", [4, 12, 17, 17], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NCHW" ) _test_convolution( "depthwise", [4, 12, 17, 17], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NCHW" ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW", [4, 176, 8, 8], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [2, 2, 176, 32], [1, 1], [1, 1], "SAME", "NCHW", [4, 176, 8, 8], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [2, 2, 176, 32], [1, 1], [2, 2], "SAME", "NCHW", [4, 176, 15, 15], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [3, 3, 176, 32], [1, 1], [1, 1], "SAME", "NCHW", [4, 176, 8, 8], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [3, 3, 176, 32], [1, 1], [2, 2], "SAME", "NCHW", [4, 176, 15, 15], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [3, 3, 176, 32], [1, 1], [2, 2], "SAME", "NCHW", [4, 176, 16, 16], ) _test_convolution( "conv_transpose", [4, 19, 8, 8], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW", [4, 19, 17, 17], ) _test_convolution( "conv_transpose", [4, 19, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW", [4, 124, 17, 17], ) _test_convolution( "conv_transpose", [4, 19, 17, 17], [3, 3, 124, 19], [1, 1], [1, 1], "SAME", "NCHW", [4, 124, 17, 17], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW", [4, 12, 17, 17], ) # kernel 2x2, strides (2,2) _test_convolution( "conv_transpose", [4, 19, 8, 8], [2, 2, 19, 19], [1, 1], [2, 2], "VALID", "NCHW", [4, 19, 16, 16], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [2, 2, 12, 32], [1, 1], [2, 2], "VALID", "NCHW", [4, 12, 16, 16], ) # output channel is 1 _test_convolution( "conv_transpose", [1, 19, 8, 8], [1, 1, 1, 19], [1, 1], [1, 1], "VALID", "NCHW", [1, 1, 8, 8], ) _test_convolution("conv", [4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC") _test_convolution("conv", [4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC") _test_convolution("conv", [4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC") _test_convolution("conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC") _test_convolution( "conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC", add_shapes_to_graph_def=False, ) _test_convolution("depthwise", [4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NHWC") _test_convolution("depthwise", [4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NHWC") _test_convolution("depthwise", [4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NHWC") _test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NHWC") _test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC") _test_convolution( "depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC", add_shapes_to_graph_def=False, ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC", [4, 8, 8, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [2, 2, 176, 32], [1, 1], [1, 1], "SAME", "NHWC", [4, 8, 8, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [2, 2, 176, 32], [1, 1], [2, 2], "SAME", "NHWC", [4, 15, 15, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [3, 3, 176, 32], [1, 1], [1, 1], "SAME", "NHWC", [4, 8, 8, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [3, 3, 176, 32], [1, 1], [2, 2], "SAME", "NHWC", [4, 15, 15, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [3, 3, 176, 32], [1, 1], [2, 2], "SAME", "NHWC", [4, 16, 16, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC", [4, 17, 17, 19], ) _test_convolution( "conv_transpose", [4, 17, 17, 19], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC", [4, 17, 17, 124], ) _test_convolution( "conv_transpose", [4, 17, 17, 19], [3, 3, 124, 19], [1, 1], [1, 1], "SAME", "NHWC", [4, 17, 17, 124], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC", [4, 17, 17, 12], ) # kernel 2x2, strides (2,2) _test_convolution( "conv_transpose", [4, 8, 8, 19], [2, 2, 19, 19], [1, 1], [2, 2], "VALID", "NHWC", [4, 16, 16, 19], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [2, 2, 12, 32], [1, 1], [2, 2], "VALID", "NHWC", [4, 16, 16, 12], ) # output channel is 1 _test_convolution( "conv_transpose", [1, 8, 8, 19], [1, 1, 1, 19], [1, 1], [1, 1], "VALID", "NHWC", [1, 8, 8, 1], ) # Test without adding shapes to graph def _test_convolution( "conv_transpose", [4, 8, 8, 32], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC", [4, 8, 8, 176], add_shapes_to_graph_def=False, ) ####################################################################### # Convolution3D # ------------- def _test_convolution3d( opname, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, deconv_output_shape=[], add_shapes_to_graph_def=True, ): """ One iteration of 3D convolution with given shapes and attributes """ total_size_1 = np.prod(tensor_in_sizes) total_size_2 = np.prod(filter_in_sizes) # Initializes the input tensor with array containing incrementing # numbers from 1. data_array = [f * 1.0 for f in range(1, total_size_1 + 1)] filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)] with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32") in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32") if data_format == "NDHWC": strides = [1] + strides + [1] dilations = [1] + dilations + [1] else: strides = [1, 1] + strides dilations = [1, 1] + dilations if opname == "conv": nn_ops.conv3d( in_data, in_filter, strides=strides, dilations=dilations, padding=padding, data_format=data_format, ) compare_tf_with_tvm( np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "Conv3D:0", cuda_layout="NCDHW", add_shapes_to_graph_def=add_shapes_to_graph_def, ) @tvm.testing.uses_gpu def test_forward_convolution3d(): if is_gpu_available(): _test_convolution3d( "conv", [4, 176, 8, 8, 8], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW" ) _test_convolution3d( "conv", [4, 19, 17, 17, 17], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW" ) _test_convolution3d( "conv", [4, 124, 17, 17, 17], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW" ) _test_convolution3d( "conv", [4, 12, 17, 17, 17], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW" ) _test_convolution3d( "conv", [4, 8, 8, 8, 176], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC" ) _test_convolution3d( "conv", [4, 17, 17, 17, 19], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC" ) _test_convolution3d( "conv", [4, 17, 17, 17, 124], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC" ) _test_convolution3d( "conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC" ) # Test without adding shapes to graph def _test_convolution3d( "conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC", add_shapes_to_graph_def=False, ) ####################################################################### # Convolution3D Transpose # ----------------------- def _test_convolution3d_transpose( data_shape, filter_shape, strides, padding, output_shape, data_format="NCDHW", add_shapes_to_graph_def=True, ): """ One iteration of 3D convolution transpose with given shapes and attributes """ dtype = "float32" data_array = np.random.uniform(size=data_shape).astype(dtype) filter_array = np.random.uniform(size=filter_shape).astype(dtype) if data_format == "NDHWC": strides = [1] + strides + [1] else: strides = [1, 1] + strides with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data_shape, dtype=dtype) in_filter = constant_op.constant(filter_array, shape=filter_shape, dtype=dtype) nn_ops.conv3d_transpose( in_data, in_filter, output_shape=output_shape, strides=strides, padding=padding, data_format=data_format, ) compare_tf_with_tvm( data_array, "Placeholder:0", "conv3d_transpose:0", cuda_layout="NDHWC", add_shapes_to_graph_def=add_shapes_to_graph_def, ) @tvm.testing.uses_gpu def test_forward_convolution3d_transpose(): if is_gpu_available(): _test_convolution3d_transpose( data_shape=[1, 10, 8, 8, 8], filter_shape=[1, 1, 1, 6, 10], strides=[1, 1, 1], padding="VALID", output_shape=[1, 6, 8, 8, 8], ) _test_convolution3d_transpose( data_shape=[4, 9, 8, 8, 8], filter_shape=[1, 1, 1, 6, 9], strides=[1, 1, 1], padding="VALID", output_shape=[4, 6, 8, 8, 8], ) _test_convolution3d_transpose( data_shape=[1, 3, 8, 8, 8], filter_shape=[1, 1, 1, 6, 3], strides=[2, 2, 2], padding="SAME", output_shape=[1, 6, 15, 15, 15], ) _test_convolution3d_transpose( data_shape=[1, 16, 8, 8, 8], filter_shape=[3, 3, 3, 6, 16], strides=[3, 3, 3], padding="VALID", output_shape=[1, 6, 24, 24, 24], ) _test_convolution3d_transpose( data_shape=[1, 8, 8, 8, 10], filter_shape=[1, 1, 1, 6, 10], strides=[1, 1, 1], padding="VALID", output_shape=[1, 8, 8, 8, 6], data_format="NDHWC", ) _test_convolution3d_transpose( data_shape=[4, 8, 8, 8, 9], filter_shape=[1, 1, 1, 6, 9], strides=[1, 1, 1], padding="VALID", output_shape=[4, 8, 8, 8, 6], data_format="NDHWC", ) _test_convolution3d_transpose( data_shape=[1, 8, 8, 8, 3], filter_shape=[1, 1, 1, 6, 3], strides=[2, 2, 2], padding="SAME", output_shape=[1, 15, 15, 15, 6], data_format="NDHWC", ) _test_convolution3d_transpose( data_shape=[1, 8, 8, 8, 16], filter_shape=[3, 3, 3, 6, 16], strides=[3, 3, 3], padding="VALID", output_shape=[1, 24, 24, 24, 6], data_format="NDHWC", ) # Test without adding shapes to graph def _test_convolution3d_transpose( data_shape=[1, 8, 8, 8, 16], filter_shape=[3, 3, 3, 6, 16], strides=[3, 3, 3], padding="VALID", output_shape=[1, 24, 24, 24, 6], data_format="NDHWC", add_shapes_to_graph_def=False, ) ####################################################################### # BiasAdd # ----------- def _test_biasadd(tensor_in_sizes, data_format): """ One iteration of biasadd with given shapes and attributes """ total_size_1 = 1 for s in tensor_in_sizes: total_size_1 *= s tensor_bias_sizes = [tensor_in_sizes[1]] if data_format == "NCHW" else [tensor_in_sizes[3]] total_size_2 = tensor_bias_sizes[0] # Initializes the input tensor with array containing incrementing # numbers from 1. data_array = [f * 1.0 for f in range(1, total_size_1 + 1)] bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)] with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32") in_bias = constant_op.constant(bias_array, shape=tensor_bias_sizes, dtype="float32") nn_ops.bias_add(in_data, in_bias, data_format=data_format) compare_tf_with_tvm( np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "BiasAdd:0" ) @tvm.testing.uses_gpu def test_forward_biasadd(): if is_gpu_available(): _test_biasadd([4, 176, 8, 8], "NCHW") _test_biasadd([1, 100, 1, 1], "NCHW") _test_biasadd([4, 19, 17, 17], "NCHW") _test_biasadd([4, 124, 3, 3], "NCHW") _test_biasadd([4, 8, 8, 176], "NHWC") _test_biasadd([1, 1, 1, 100], "NHWC") _test_biasadd([4, 17, 17, 19], "NHWC") _test_biasadd([4, 3, 3, 124], "NHWC") def _test_forward_where(input_shape): with tf.Graph().as_default(): dtype = tf.float32 t = tf.constant( np.random.choice([0, 1, -2, 3, -1, 0.1, -0.2], size=input_shape).astype(dtype.name) ) out = tf.where(t) compare_tf_with_tvm([], [], out.name, mode="debug") compare_tf_with_tvm([], [], out.name, mode="vm") def test_forward_argwhere(): _test_forward_where((5,)) _test_forward_where((5, 5)) _test_forward_where((5, 5, 5)) _test_forward_where((5, 5, 5, 5)) _test_forward_where((5, 5, 5, 5, 5)) ####################################################################### # SpaceToBatchND # -------------- def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"): data = np.random.uniform(0, 5, size=input_shape).astype(dtype) with tf.Graph().as_default(): in_data = tf.placeholder(shape=input_shape, dtype=dtype) out = tf.space_to_batch_nd(in_data, block_shape, paddings) compare_tf_with_tvm(data, in_data.name, out.name) def _test_space_to_batch_nd_infer_paddings(input_shape, block_shape, dtype="int32"): data = np.random.uniform(0, 5, size=input_shape).astype(dtype) padding_np = np.array([0, 1]).astype(np.int32).reshape((1, 2)) with tf.Graph().as_default(): in_data = tf.placeholder(shape=input_shape, dtype=dtype) const1 = tf.constant(padding_np, dtype=tf.int32) # make paddings an input to tf.transpose, but not an input to the graph, # so it can be extracted with infer_value_simulated paddings = tf.reverse(const1, axis=[-1]) out = tf.space_to_batch_nd(in_data, block_shape, paddings) compare_tf_with_tvm(data, in_data.name, out.name) def test_forward_space_to_batch_nd(): # test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch-n-d _test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]]) _test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]]) _test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]]) _test_space_to_batch_nd( input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]], dtype="int64" ) # pylint: disable=line-too-long # https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/spacetobatch_op_test.py _test_space_to_batch_nd(input_shape=[2, 3], block_shape=[2], paddings=[[1, 0]], dtype="float32") _test_space_to_batch_nd( input_shape=[2, 3, 2], block_shape=[2], paddings=[[1, 0]], dtype="float64" ) _test_space_to_batch_nd_infer_paddings(input_shape=[2, 3, 2], block_shape=[2]) ####################################################################### # BatchToSpaceND # -------------- def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"): data = np.random.uniform(0, 5, size=input_shape).astype(dtype) with tf.Graph().as_default(): in_data = tf.placeholder(shape=input_shape, dtype=dtype) out = tf.batch_to_space_nd(in_data, block_shape, crops) compare_tf_with_tvm(data, in_data.name, out.name) def test_forward_batch_to_space_nd(): # test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d _test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]]) _test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]]) _test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]]) _test_batch_to_space_nd( input_shape=[8, 1, 3, 1], block_shape=[2, 2], crops=[[0, 0], [2, 0]], dtype="int64" ) # pylint: disable=line-too-long # https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/batchtospace_op_test.py _test_batch_to_space_nd( input_shape=[18, 2, 1, 2], block_shape=[2, 3], crops=[[1, 1], [0, 0]], dtype="float32" ) _test_batch_to_space_nd( input_shape=[20, 5, 8, 7], block_shape=[2, 2], crops=[[1, 1], [1, 1]], dtype="float64" ) ####################################################################### # Reshape # ------- def _test_reshape(data, out_shape): """ One iteration of reshape operation with given data and out shape """ with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) array_ops.reshape(in_data, out_shape) compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0") def _test_reshape_with_call(): """ relay.expr.Call as shape """ data = np.zeros((6, 4, 2)) with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out_shape = tf.constant([1, 2, 3], dtype="int32") out_shape = tf.multiply(out_shape, 2) array_ops.reshape(in_data, out_shape) compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0") def _test_reshape_like(data, shape_like): """ A special case for reshape. """ with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) in_shape_like = array_ops.placeholder(shape=shape_like.shape, dtype=data.dtype) out_shape = array_ops.shape(in_shape_like) array_ops.reshape(in_data, out_shape) compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0") def _test_reshape_symbolic(data, a_data, b_data): with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) a = array_ops.placeholder(shape=a_data.shape, dtype=a_data.dtype) b = array_ops.placeholder(shape=b_data.shape, dtype=b_data.dtype) newshape = tf.add(a, b) out = array_ops.reshape(in_data, newshape) for mode in ["debug", "vm"]: compare_tf_with_tvm( [data, a_data, b_data], [in_data.name, a.name, b.name], out.name, mode=mode ) def test_forward_reshape(): _test_reshape(np.arange(6.0), [2, 3]) _test_reshape(np.arange(6), [-1, 2]) _test_reshape(np.arange(6), [3, -1]) _test_reshape(np.arange(6), [-1]) _test_reshape_with_call() _test_reshape_like(np.zeros((3, 6)), np.zeros((9, 2))) _test_reshape_symbolic(np.arange(6.0), np.array([2, 0]), np.array([0, 3])) _test_reshape_symbolic(np.arange(6), np.array([-1, 0]), np.array([0, 2])) _test_reshape_symbolic(np.arange(6), np.array([3, 0]), np.array([3, -1])) _test_reshape_symbolic(np.arange(6), np.array([0]), np.array([-1])) ####################################################################### # DepthToSpace # ------------ def _test_depthtospace(data, block_size): """ One iteration of depth_to_space operation with given data and block size """ with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) array_ops.depth_to_space(in_data, block_size) compare_tf_with_tvm(data, "Placeholder:0", "DepthToSpace:0") def test_forward_depthtospace(): _test_depthtospace(np.random.normal(size=[1, 32, 32, 4]), 2) _test_depthtospace(np.random.normal(size=[1, 16, 8, 32]), 4) ####################################################################### # SpaceToDepth # ------------ def _test_spacetodepth(data, block_size): """ One iteration of space_to_depth operation with given data and block size """ with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) array_ops.space_to_depth(in_data, block_size) compare_tf_with_tvm(data, "Placeholder:0", "SpaceToDepth:0") def test_forward_spacetodepth(): _test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]), 2) _test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]), 4) ####################################################################### # Squeeze # ------- def _test_squeeze(data, squeeze_dims=None): """ One iteration of squeeze """ if squeeze_dims is None: squeeze_dims = [] with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) if squeeze_dims: array_ops.squeeze(in_data, squeeze_dims) else: array_ops.squeeze(in_data) compare_tf_with_tvm(data, "Placeholder:0", "Squeeze:0") def test_forward_squeeze(): """ Squeeze """ # Nothing to squeeze. _test_squeeze(np.arange(2).reshape((2))) _test_squeeze(np.arange(6).reshape((2, 3))) # Squeeze the middle element away. _test_squeeze(np.arange(4).reshape((2, 1, 2))) # Squeeze on both ends. _test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1))) # Positive squeeze dim index. _test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0]) _test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [2, 4]) _test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0, 4, 2]) # Negative squeeze dim index. _test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-1]) _test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5]) _test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5, -1]) ####################################################################### # TensorArray # ----------- def test_tensor_array_write_read(): if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): pytest.skip("Needs fixing for tflite >= 1.15.0") def run(dtype_str, infer_shape, element_shape): with tf.Graph().as_default(): dtype = tf_dtypes[dtype_str] np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str) in_data = [np_data, np_data] t1 = tf.constant(np_data, dtype=dtype) t2 = tf.constant(np_data, dtype=dtype) ta1 = tf.TensorArray( dtype=dtype, size=2, infer_shape=infer_shape, element_shape=element_shape ) ta2 = ta1.write(0, t1) ta3 = ta2.write(1, t2) out = ta3.read(0) g = tf.get_default_graph() compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="vm") for dtype in ["float32", "int8"]: run(dtype, False, None) run(dtype, False, tf.TensorShape([None, 2])) run(dtype, True, None) def test_tensor_array_scatter(): if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): pytest.skip("Needs fixing for tflite >= 1.15.0") def run(dtype_str, infer_shape): with tf.Graph().as_default(): dtype = tf_dtypes[dtype_str] if infer_shape: element_shape = tf.TensorShape([tf.Dimension(None)]) else: element_shape = None t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str), dtype=dtype) indices = tf.constant([2, 1, 0]) ta1 = tf.TensorArray( dtype=dtype, size=3, infer_shape=infer_shape, element_shape=element_shape ) ta2 = ta1.scatter(indices, t) out0 = ta2.read(0) out1 = ta2.read(1) out2 = ta2.read(2) g = tf.get_default_graph() compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="vm") compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="vm") compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="vm") for dtype in ["float32", "int8"]: run(dtype, False) run(dtype, True) def test_tensor_array_gather(): if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): pytest.skip("Needs fixing for tflite >= 1.15.0") def run(dtype_str, infer_shape): with tf.Graph().as_default(): dtype = tf_dtypes[dtype_str] t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str)) scatter_indices = tf.constant([2, 1, 0]) gather_indices = tf.constant([1, 2]) ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape) ta2 = ta1.scatter(scatter_indices, t) t1 = ta2.gather(gather_indices) g = tf.get_default_graph() compare_tf_with_tvm([], [], ["TensorArrayGatherV3:0"], mode="vm") for dtype in ["float32", "int8"]: run(dtype, True) def test_tensor_array_split(): if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): pytest.skip("Needs fixing for tflite >= 1.15.0") def run(dtype_str, infer_shape): with tf.Graph().as_default(): dtype = tf_dtypes[dtype_str] t = tf.constant( np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype( dtype_str ), dtype=dtype, ) split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32) ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape) ta2 = ta1.split(t, split_length) out0 = ta2.read(0) out1 = ta2.read(1) out2 = ta2.read(2) out3 = ta2.read(3) g = tf.get_default_graph() compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="debug") compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="debug") compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="debug") compare_tf_with_tvm([], [], ["TensorArrayReadV3_3:0"], mode="debug") for dtype in ["float32", "int8"]: run(dtype, False) run(dtype, True) def test_tensor_array_concat(): if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): pytest.skip("Needs fixing for tflite >= 1.15.0") def run(dtype_str, infer_shape): with tf.Graph().as_default(): dtype = tf_dtypes[dtype_str] t = tf.constant( np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype( dtype_str ), dtype=dtype, ) split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32) ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape) ta2 = ta1.split(t, split_length) t = ta2.concat() out = tf.identity(t) compare_tf_with_tvm([], [], ["Identity:0"], mode="debug") for dtype in ["float32", "int8"]: run(dtype, False) run(dtype, True) def test_tensor_array_size(): if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): pytest.skip("Needs fixing for tflite >= 1.15.0") def run(dtype_str, infer_shape): with tf.Graph().as_default(): dtype = tf_dtypes[dtype_str] np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str) in_data = [np_data, np_data] t1 = tf.constant(np_data, dtype=dtype) t2 = tf.constant(np_data, dtype=dtype) ta1 = tf.TensorArray(dtype=dtype, size=2, infer_shape=infer_shape) ta2 = ta1.write(0, t1) ta3 = ta2.write(1, t2) out = ta3.size() g = tf.get_default_graph() compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug") for dtype in ["float32", "int8"]: run(dtype, False) run(dtype, True) def test_tensor_array_stack(): def run(dtype_str, infer_shape): if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): pytest.skip("Needs fixing for tflite >= 1.15.0") with tf.Graph().as_default(): dtype = tf_dtypes[dtype_str] t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str)) scatter_indices = tf.constant([2, 1, 0]) ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape) ta2 = ta1.scatter(scatter_indices, t) t1 = ta2.stack() print(t1) g = tf.get_default_graph() compare_tf_with_tvm([], [], ["TensorArrayStack/TensorArrayGatherV3:0"], mode="vm") for dtype in ["float32", "int8"]: run(dtype, True) def test_tensor_array_unstack(): def run(dtype_str, input_shape, infer_shape): if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): pytest.skip("Needs fixing for tflite >= 1.15.0") with tf.Graph().as_default(): dtype = tf_dtypes[dtype_str] t = tf.constant(np.random.choice([0, 1, 2, 3], size=input_shape).astype(dtype.name)) ta1 = tf.TensorArray(dtype=dtype, infer_shape=infer_shape, size=input_shape[0]) ta2 = ta1.unstack(t) out0 = ta2.size() out1 = ta2.read(0) compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug") compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="debug") for dtype in ["float32", "int8"]: run(dtype, (5,), False) run(dtype, (5, 5), True) run(dtype, (5, 5, 5), False) run(dtype, (5, 5, 5, 5), True) ####################################################################### # ConcatV2 # -------- def _test_concat_v2(shape1, shape2, dim): """ One iteration of ConcatV2 """ with tf.Graph().as_default(): dtype = "float32" in1 = tf.placeholder(shape=shape1, dtype=dtype, name="in1") in2 = tf.placeholder(shape=shape2, dtype=dtype, name="in2") array_ops.concat_v2([in1, in2], dim) np_data1 = np.random.uniform(size=shape1).astype(dtype) np_data2 = np.random.uniform(size=shape2).astype(dtype) compare_tf_with_tvm([np_data1, np_data2], ["in1:0", "in2:0"], "ConcatV2:0") def test_forward_concat_v2(): if tf.__version__ < LooseVersion("1.4.1"): return _test_concat_v2([2, 3], [2, 3], 0) _test_concat_v2([10, 3, 5], [2, 3, 5], 0) _test_concat_v2([2, 3], [2, 3], 1) _test_concat_v2([5, 8], [5, 4], 1) _test_concat_v2([2, 8, 5], [2, 8, 6], -1) ####################################################################### # Sigmoid # ------- def _test_sigmoid(data): """ One iteration of sigmoid """ with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) sigmoid_out = math_ops.sigmoid(in_data) compare_tf_with_tvm(data, "Placeholder:0", "Sigmoid:0") def test_forward_sigmoid(): """ Sigmoid """ _test_sigmoid(np.random.uniform(size=(3, 4, 4, 3)).astype("float32")) ####################################################################### # Argmin/Argmax # ------------- def _test_argx(func, data, **kwargs): with tf.Graph().as_default(): inp = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="c0") func(inp, name="argx0", **kwargs) compare_tf_with_tvm(data, "c0:0", "argx0:0") def test_forward_argminmax(): for output_type in [tf.int64, tf.int32]: for axis in [None, 0, 1, 2]: data = np.random.uniform(size=(8, 4, 9)).astype("float32") _test_argx(tf.argmax, data=data, axis=axis, output_type=output_type) _test_argx(tf.argmin, data=data, axis=axis, output_type=output_type) ####################################################################### # Variable # -------- def _test_variable(data): """ One iteration of a variable """ tf.reset_default_graph() with tf.Graph().as_default(): input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype) input_tensor = array_ops.reshape(input_op, data.shape) size = input_tensor.shape.dims[1] with variable_scope.variable_scope("linear", reuse=None): w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype) math_ops.matmul(input_tensor, w) compare_tf_with_tvm(data, "Placeholder:0", "MatMul:0", init_global_variables=True) def test_forward_variable(): """Variable type op test""" _test_variable(np.random.uniform(size=(32, 100)).astype("float32")) @tvm.testing.parametrize_targets("llvm", "cuda") def test_read_variable_op(target, ctx): """ Read Variable op test """ tf.reset_default_graph() data = np.random.uniform(size=(32, 100)).astype("float32") input_tensor = array_ops.placeholder(shape=data.shape, dtype=data.dtype) size = input_tensor.shape.dims[1] var_data = np.random.uniform(-5, 5, size=[size, size]).astype(np.float32) input_var = tf.Variable(var_data, name="var1", use_resource=True) math_ops.matmul(input_tensor, input_var) out_name = ["MatMul:0"] out_node = ["MatMul"] in_name = ["Placeholder:0"] in_node = ["Placeholder"] in_data = [data] with tf.Session() as sess: sess.run(variables.global_variables_initializer()) final_graph_def = sess.graph.as_graph_def(add_shapes=True) tf_output = run_tf_graph(sess, in_data, in_name, out_name) shape_dict = {e: i.shape for e, i in zip(in_name, in_data)} with pytest.raises(Exception) as execinfo: mod, params = relay.frontend.from_tensorflow( final_graph_def, layout=None, shape=shape_dict, outputs=None ) assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph") # Now convert the variables to constant and run inference on the converted graph final_graph_def = tf.graph_util.convert_variables_to_constants( sess, sess.graph.as_graph_def(add_shapes=True), out_node, ) tvm_output = run_tvm_graph( final_graph_def, in_data, in_node, target=target, out_names=out_name, num_output=len(out_name), ) for i in range(len(tf_output)): tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-4, rtol=1e-5) sess.close() ####################################################################### # MatMul, BatchMatMul, BatchMatMulV2 # ---------------------------------- def _test_matmul(i, j, k, dtype, outer=None): """ One iteration of matmul """ A_shape_init = [i, j] B_shape_init = [j, k] for transpose_a in [False, True]: for transpose_b in [False, True]: outer = outer or [] A_shape = outer + (A_shape_init[::-1] if transpose_a else A_shape_init) B_shape = outer + (B_shape_init[::-1] if transpose_b else B_shape_init) with tf.Graph().as_default(): A = tf.placeholder(shape=A_shape, dtype=dtype, name="A") B = tf.placeholder(shape=B_shape, dtype=dtype, name="B") result = tf.matmul(A, B, transpose_a=transpose_a, transpose_b=transpose_b) A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype) B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype) compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name) def test_forward_matmul(): """ MatMul op test""" _test_matmul(1, 3, 6, "int32") _test_matmul(5, 3, 1, "float64") def _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False): with tf.Graph().as_default(): A = tf.placeholder(shape=A_shape, dtype=dtype, name="A") B = tf.placeholder(shape=B_shape, dtype=dtype, name="B") result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul") A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype) B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype) compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name) def test_forward_batch_matmul(): """ TF op BatchMatMul, BatchMatMulV2 test""" _test_batch_matmul((3, 5, 4), (3, 4, 5), "int32") _test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True) _test_batch_matmul((3, 5, 4), (3, 5, 4), "int32", True, False) _test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", False, True) _test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), "int32") _test_batch_matmul((1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 6, 5), "float32", True, True) _test_batch_matmul((3, 4, 5, 6), (3, 4, 5, 6), "int32", True, False) _test_batch_matmul((2, 3, 4, 2, 3, 4, 5, 6), (2, 3, 4, 2, 3, 4, 5, 6), "float32", False, True) ####################################################################### # SparseTensorDenseMatMul # ---------------------------------- def _test_sparse_dense_matmul(indices, values, A_shape, B_shape, dtype, flip=False): """ One iteration of sparse_dense_matmul """ # TODO(ANSHUMAN87): Support adjoint options too for adjoint_a in [False]: for adjoint_b in [False]: with tf.Graph().as_default(): A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape) B = tf.placeholder(shape=B_shape, dtype=dtype, name="B") if flip: result = tf.sparse.sparse_dense_matmul( B, A_sp, adjoint_a=adjoint_a, adjoint_b=adjoint_b ) else: result = tf.sparse.sparse_dense_matmul( A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b ) B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype) compare_tf_with_tvm([B_np], [B.name], result.name) def test_forward_sparse_dense_matmul(): """ sparse_dense_matmul op test""" ################################################################### # # In order to create a SparseTensor, it requires 3 input as below: # SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) # # Above Sparse can be represented in Dense as below : # [[1, 0, 0, 0] # [0, 0, 2, 0] # [0, 0, 0, 0]] # # ------------------------------------------------------------------ _test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [4, 3], "float32") _test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32") _test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32") _test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [7, 9], [9, 5], "float32") _test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [4, 3], [3, 4], "float32", True) _test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32", True) _test_sparse_dense_matmul( [[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32", True ) _test_sparse_dense_matmul( [[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [9, 5], [7, 9], "float32", True ) ####################################################################### # StridedSlice # ------------ def _test_stridedslice( ip_shape, begin, end, stride, dtype, begin_mask=0, end_mask=0, new_axis_mask=0, shrink_axis_mask=0, ellipsis_mask=0, ): """ One iteration of a Stridedslice """ tf.reset_default_graph() np_data = np.random.uniform(size=ip_shape).astype(dtype) with tf.Graph().as_default(): if len(ip_shape) == 0: in_data = tf.constant(np_data, dtype) else: in_data = tf.placeholder(dtype, ip_shape, name="in_data") tf.strided_slice( in_data, begin, end, stride, begin_mask=begin_mask, end_mask=end_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask, ellipsis_mask=ellipsis_mask, name="strided_slice", ) if len(ip_shape) == 0: compare_tf_with_tvm(None, "", "strided_slice:0") else: compare_tf_with_tvm(np_data, "in_data:0", "strided_slice:0") def test_forward_stridedslice(): """test StridedSlice""" _test_stridedslice([], [0], [0], [1], "float32", new_axis_mask=1) _test_stridedslice([2], [1], [1], [1], "float32", shrink_axis_mask=1) _test_stridedslice([2, 1], [0], [1], [1], "float32", shrink_axis_mask=1) _test_stridedslice([2, 3, 4], [0], [1], [1], "float32", shrink_axis_mask=8) _test_stridedslice([3, 4, 3], [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32") _test_stridedslice([3, 4, 3], [1, 0], [4, 3], [2, 1], "float32", ellipsis_mask=8) _test_stridedslice([3, 4, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2) _test_stridedslice([3, 4, 5, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2) _test_stridedslice([3, 4, 5, 3], [1, 0, 1], [4, 2, 2], [2, 1, 1], "float32", ellipsis_mask=2) _test_stridedslice([3, 4, 3], [1, 1, 0], [4, 4, 2], [2, 1, 1], "float32", new_axis_mask=5) _test_stridedslice( [3, 4, 3], [1, 1, 1], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=4 ) _test_stridedslice( [6, 4, 5], [1, 1, 1], [6, 3, 4], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=5 ) _test_stridedslice( [3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=4, new_axis_mask=2 ) _test_stridedslice( [3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3 ) _test_stridedslice( [3, 4, 3], [1, 1, 0], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3 ) _test_stridedslice( [3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=2 ) _test_stridedslice((3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2) _test_stridedslice( [3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=2 ) _test_stridedslice( [3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=1, new_axis_mask=2 ) _test_stridedslice( [3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=1 ) _test_stridedslice( [3, 4, 5, 4, 5, 6], [0, 0], [2, 3], [1, 1], "float32", shrink_axis_mask=5, new_axis_mask=1 ) _test_stridedslice( [3, 4, 5, 4, 5, 6], [0, 0, 1, 2, 1], [2, 3, 4, 5, 3], [1, 1, 2, 2, 1], "float32", shrink_axis_mask=5, new_axis_mask=1, ellipsis_mask=2, begin_mask=8, end_mask=8, ) _test_stridedslice( [3, 4, 5, 4, 5, 6], [0, 0, 1, 2, 1], [2, 3, 4, 5, 3], [1, 1, 2, 2, 1], "float32", shrink_axis_mask=8, new_axis_mask=1, ellipsis_mask=2, begin_mask=5, end_mask=5, ) _test_stridedslice( [3, 4, 5, 4, 5, 6], [0, 0, 1, 2, 1], [2, 3, 4, 5, 3], [1, 1, 2, 2, 1], "float32", shrink_axis_mask=16, new_axis_mask=1, ellipsis_mask=2, begin_mask=5, end_mask=5, ) _test_stridedslice( [3, 4, 5, 4, 5, 6], [1, 2, 0, -3], [4, 5, 3, 3], [2, 2, 1, 1], "float32", shrink_axis_mask=8, new_axis_mask=1, ellipsis_mask=2, begin_mask=5, end_mask=8, ) _test_stridedslice( [1, 13, 13, 3, 2], [0, 0], [1, 1], [1, -1], "float32", ellipsis_mask=1, begin_mask=2, end_mask=2, ) ####################################################################### # FloorDiv, RealDiv # ----------------- def _test_forward_divide(ip_shape, dtype): np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype) np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): numerator = tf.placeholder(dtype, ip_shape, name="numer") denominator = tf.placeholder(dtype, ip_shape, name="denomin") tf.math.divide(numerator, denominator, name="RealDiv") compare_tf_with_tvm([np_numer, np_denomin], ["numer:0", "denomin:0"], "RealDiv:0") def _test_forward_floordiv(ip_shape, dtype): np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): numerator = tf.placeholder(dtype, ip_shape, name="numer") tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name="FloorDiv") compare_tf_with_tvm([np_numer], ["numer:0"], "FloorDiv:0") def test_forward_divide(): """test FloorDiv, RealDiv""" _test_forward_divide((4,), "int32") _test_forward_divide((4, 3, 7), "float32") _test_forward_floordiv((4, 3, 7), "float32") _test_forward_floordiv((4, 3, 7), "int32") ####################################################################### # FloorMod # -------- def _test_forward_floormod(in_shape, if_shape, dtype): np_numer = np.random.uniform(1, 100, size=in_shape).astype(dtype) np_factor = np.random.uniform(1, 100, size=if_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): numerator = tf.placeholder(dtype, in_shape, name="numer") factor = tf.placeholder(dtype, if_shape, name="factor") tf.floormod(numerator, factor, name="FloorMod") compare_tf_with_tvm([np_numer, np_factor], ["numer:0", "factor:0"], "FloorMod:0") def test_forward_floormod(): """test FloorMod""" _test_forward_floormod((10,), (10,), "float32") _test_forward_floormod((8, 2), (1,), "float32") _test_forward_floormod((4, 3, 7), (4, 3, 7), "float32") _test_forward_floormod((4, 3, 7), (4, 3, 7), "int32") ####################################################################### # TruncateMod # ----------- def _test_forward_truncatemod(ip_shape, dtype): np_data_1 = np.random.uniform(-100, 100, size=ip_shape).astype(dtype) np_data_2 = np.random.uniform(1, 10, size=ip_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): in_data_1 = tf.placeholder(dtype, ip_shape, name="in_data_1") in_data_2 = tf.placeholder(dtype, ip_shape, name="in_data_2") tf.truncatemod(in_data_1, in_data_2, name="truncatemod") compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "truncatemod:0") def test_forward_truncatemod(): """test TruncateMod""" _test_forward_truncatemod((4, 3, 7), "int32") ####################################################################### # Gather, GatherV2 # -------------------------- def _test_gather(ip_shape, indice_shape, indice_value, axis, dtype): """ One iteration of a GatherV2 """ tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, ip_shape, name="in_data") indices = tf.placeholder("int32", indice_shape, name="indices") out = tf.gather(in_data, indices, axis=axis) np_data = np.random.uniform(1, 10, size=ip_shape).astype(dtype) def _fill_indices(indice_value): indices = np.array(ip_shape, dtype=dtype) if isinstance(indice_value, int): indices = np.array([indice_value], dtype="int32") else: indices = np.asarray(indice_value, dtype="int32") return indices np_indices = _fill_indices(indice_value) compare_tf_with_tvm([np_data, np_indices], ["in_data:0", "indices:0"], out.name) def test_forward_gather(): """test Gather/GatherV2 layer""" _test_gather((4,), (1,), 1, 0, "int32") _test_gather((4,), (1,), 1, 0, "float32") _test_gather((1, 4), (1,), [0], 0, "int32") _test_gather((4,), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "float32") _test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "int32") _test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 1, "int32") _test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "float32") _test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 0, "int32") _test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 2, "int32") _test_gather((4, 3, 5, 6), (1, 4), [[2, 1, 0, 0]], 0, "float32") ####################################################################### # GatherND # -------------------------- def _test_gather_nd(ip_shape, indice_value, dtype): """test operator GatherNd""" np_data = np.random.uniform(1, 100, size=ip_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, ip_shape, name="in_data") tf.gather_nd(in_data, indices=indice_value, name="gather_nd") compare_tf_with_tvm([np_data], ["in_data:0"], "gather_nd:0") def test_forward_gather_nd(): """test operator GatherNd""" _test_gather_nd((2, 2), [[0, 0], [1, 1]], "float32") _test_gather_nd((2, 2, 2), [[1, 0, 0], [0, 0, 0]], "float32") _test_gather_nd((4,), [1], "float32") _test_gather_nd((4,), [1], "int32") _test_gather_nd((1, 4), [0, 3], "int32") _test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "int32") _test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "float32") _test_gather_nd((3, 3, 3), [[[1, 0]]], "int32") _test_gather_nd((3, 3, 3), [[[1, 0]]], "int32") _test_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], "float32") _test_gather_nd((3, 3, 3), [[[2, 1]]], "int32") ####################################################################### # BiasAdd # ------- def test_forward_bias_add(): """test Op BiasAdd""" def check_bias_add(lh_shpae, rh_shape, dtype): tf.reset_default_graph() lh_data = np.random.uniform(size=lh_shpae).astype(dtype) rh_data = np.random.uniform(size=rh_shape).astype(dtype) with tf.Graph().as_default(): lft_data = tf.placeholder(dtype, name="lft_data") rgt_data = tf.placeholder(dtype, name="rgt_data") tf.nn.bias_add(lft_data, rgt_data, name="BiasAdd") compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "BiasAdd:0") check_bias_add((10, 8, 16, 32), (32,), dtype="int32") check_bias_add((10, 20), (20,), dtype="float32") ####################################################################### # Split # ----- def _test_split(in_shape, axis, num_or_size_splits, dtype): np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype) """ One iteration of a Split """ tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, in_shape, name="in_data") num_split = ( len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits ) split = tf.split(in_data, num_or_size_splits, axis=axis) relu = [tf.nn.relu(i) for i in split] compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in relu]) # and now test together with concat tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, in_shape, name="in_data") splitted = tf.split(in_data, num_or_size_splits, axis=axis) concat = tf.concat(splitted, axis) compare_tf_with_tvm([np_data], "in_data:0", concat.name) def test_forward_split(): """test split layer""" # rank 1 _test_split((3,), 0, 1, "float32") _test_split((3,), 0, 3, "float32") _test_split((6,), 0, 3, "float32") # rank 2 _test_split((6, 2), 0, 3, "float32") _test_split((2, 6), 1, 6, "float32") # rank 3 _test_split((6, 2, 4), 0, 2, "int32") _test_split((2, 6, 4), 1, 3, "float32") _test_split((2, 4, 6), 2, 1, "float32") # rank 4 _test_split((6, 1, 3, 5), 0, 3, "float32") _test_split((1, 6, 3, 5), 1, 3, "float32") _test_split((1, 3, 6, 5), 2, 3, "float32") _test_split((1, 3, 5, 6), 3, 3, "float32") # split along negative axis _test_split((6, 1, 3, 5), -4, 3, "float32") _test_split((1, 6, 3, 5), -3, 3, "float32") _test_split((1, 3, 6, 5), -2, 3, "float32") _test_split((1, 3, 5, 6), -1, 3, "float32") # size_splits list _test_split((6,), 0, [1, 2, 3], "int32") _test_split((3, 6, 4), -2, [1, 4, 1], "float32") ###################################################################### # TopKV2 # ------ def _test_forward_top_k_v2(in_shape, k): np_data = np.random.uniform(-100, 100, size=in_shape).astype("float32") tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder("float32", in_shape, name="in_data") tf.math.top_k(in_data, k, name="TopK") compare_tf_with_tvm([np_data], ["in_data:0"], "TopK:0") def test_forward_top_k_v2(): _test_forward_top_k_v2((3,), 1) _test_forward_top_k_v2((3,), 3) _test_forward_top_k_v2((3, 5, 7), 3) _test_forward_top_k_v2((3, 5, 7), 3) ####################################################################### # Unstack # ------- def _test_unstack(ip_shape, axis, dtype): np_data = np.random.uniform(-5, 5, size=ip_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, ip_shape, name="in_data") unstack = tf.unstack(in_data, axis=axis) compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in unstack]) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, ip_shape, name="in_data") tf.stack(tf.unstack(in_data, axis=axis), axis=axis) compare_tf_with_tvm([np_data], ["in_data:0"], "stack:0") def test_forward_unstack(): """test unstack layer""" _test_unstack((6,), 0, "int32") _test_unstack((2, 6), 1, "float64") # negative axis _test_unstack((1, 4), -1, "int32") _test_unstack((3, 6, 4), -2, "float32") ####################################################################### # Tile # ---- def _test_tile(in_shape, multiples, dtype): np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, in_shape, name="in_data") tf.tile(in_data, multiples=multiples, name="tile") compare_tf_with_tvm([np_data], ["in_data:0"], "tile:0") def test_forward_tile(): """test Tile""" _test_tile((2,), (3,), "int32") _test_tile((2, 2), (2, 3), "float32") _test_tile((2, 4, 6), (6, 7, 8), "float64") ####################################################################### # ClipByValue # ----------- def _test_forward_clip_by_value(ip_shape, clip_value_min, clip_value_max, dtype): tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, ip_shape, name="in_data") tf.clip_by_value(in_data, clip_value_min, clip_value_max, name="ClipByValue") np_data = np.random.uniform(-100, 100, size=ip_shape).astype(dtype) compare_tf_with_tvm([np_data], ["in_data:0"], "ClipByValue:0") def test_forward_clip_by_value(): """test ClipByValue op""" if tf.__version__ < LooseVersion("1.9"): _test_forward_clip_by_value((4,), 0.1, 5.0, "float32") _test_forward_clip_by_value((4, 4), 1, 5, "int32") ####################################################################### # Multi Input to graph # -------------------- def test_forward_multi_input(): with tf.Graph().as_default(): in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1") in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2") in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3") in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4") out1 = tf.add(in1, in2, name="out1") out2 = tf.subtract(in3, in4, name="out2") out = tf.multiply(out1, out2, name="out") in_data = np.arange(9, dtype="int32").reshape([3, 3]) compare_tf_with_tvm( [in_data, in_data, in_data, in_data], ["in1:0", "in2:0", "in3:0", "in4:0"], "out:0" ) ####################################################################### # Multi Output to Graph # --------------------- def test_forward_multi_output(): with tf.Graph().as_default(): in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1") in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2") in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3") in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4") out1 = tf.add(in1, in2, name="out1") out2 = tf.subtract(in3, in4, name="out2") in_data = np.arange(9, dtype="int32").reshape([3, 3]) in_data = [in_data] * 4 in_name = ["in1:0", "in2:0", "in3:0", "in4:0"] out_name = ["out1:0", "out2:0"] out_node = [out.strip(":0") for out in out_name] in_node = [inp.strip(":0") for inp in in_name] with tf.Session() as sess: final_graph_def = tf.graph_util.convert_variables_to_constants( sess, sess.graph.as_graph_def(add_shapes=True), out_node, ) tf_output = run_tf_graph(sess, in_data, in_name, out_name) tvm_output = run_tvm_graph( final_graph_def, in_data, in_node, target="llvm", out_names=out_node, num_output=2 ) for i in range(len(tf_output)): tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5) ####################################################################### # Resize Bilinear, Nearest_Neighbor # --------------------------------- def _test_resize_bilinear(in_shape, to_shape, align_corners): """ One iteration of resize bilinear """ data = np.random.uniform(size=in_shape).astype("float32") shape_data = np.array(to_shape).astype("int32") with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) shape_data = constant_op.constant( shape_data, shape=shape_data.shape, dtype=shape_data.dtype ) tf.image.resize_bilinear(in_data, shape_data, align_corners=align_corners) compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0") def _test_resize_bilinear_from_tensor(in_shape, align_corners): """One iteration of resize bilinear with non-constant output shape, requires value inference to get proper output shape.""" data = np.random.uniform(size=in_shape).astype("float32") with tf.Graph().as_default(): in_data = array_ops.placeholder( shape=[in_shape[0], None, None, in_shape[3]], dtype=data.dtype ) to_shape = tf.shape(in_data)[1:3] tf.image.resize_bilinear(in_data, to_shape, align_corners=align_corners) compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0") def _test_resize_nearest_neighbor(in_shape, to_shape): """ One iteration of resize nearest neighbor """ data = np.random.uniform(size=in_shape).astype("float32") shape_data = np.array(to_shape).astype("int32") with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) shape_data = constant_op.constant( shape_data, shape=shape_data.shape, dtype=shape_data.dtype ) tf.image.resize_nearest_neighbor(in_data, shape_data, name="resize_nearest_neighbor") compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0") def _test_resize_nearest_neighbor_dynamic_shape(in_shape, scale): """ One iteration of resize nearest neighbor for graph with dynamic input shape """ data = np.random.uniform(size=in_shape).astype("float32") with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=None, dtype=data.dtype) # multiply input shape by scale factor new_shape = tf.shape(in_data)[1:3] * tf.constant(scale, dtype=tf.int32) tf.image.resize_nearest_neighbor(in_data, new_shape, name="resize_nearest_neighbor") compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0") def test_forward_resize(): """ Resize Bilinear, Nearest_Neighbor """ # TF default layout is NHWC _test_resize_bilinear((4, 32, 32, 3), [50, 50], False) _test_resize_bilinear((6, 32, 32, 3), [20, 20], True) _test_resize_bilinear_from_tensor((4, 32, 32, 3), False) _test_resize_bilinear_from_tensor((6, 50, 50, 3), True) _test_resize_nearest_neighbor((6, 32, 32, 3), [20, 20]) _test_resize_nearest_neighbor_dynamic_shape((1, 16, 16, 3), scale=[2, 2]) ####################################################################### # BroadcastTo # ----------- def _test_broadcast_to(in_shape, to_shape): """ One iteration of broadcast_to""" data = np.random.uniform(size=in_shape).astype("float32") shape_data = np.array(to_shape).astype("int32") with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) shape_data = constant_op.constant( shape_data, shape=shape_data.shape, dtype=shape_data.dtype ) tf.broadcast_to(in_data, shape_data) compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0", opt_level=0) def _test_broadcast_to_from_tensor(in_shape): """ One iteration of broadcast_to with unknown shape at graph build""" data = np.random.uniform(size=in_shape).astype("float32") with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=[None], dtype=data.dtype) shape_data = tf.multiply(tf.shape(in_data), 32) tf.broadcast_to(in_data, shape_data) compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0") def test_forward_broadcast_to(): """ Resize Bilinear """ _test_broadcast_to((4, 1, 32, 32), [4, 8, 32, 32]) _test_broadcast_to((6, 32, 32, 1), [6, 32, 32, 16]) _test_broadcast_to_from_tensor((1)) ####################################################################### # Fill # ---- def _test_fill(in_shape): """ Use the fill op to create a tensor of ones with non-constant shape.""" with tf.Graph().as_default(): tf.ones(shape=in_shape, dtype="float32") compare_tf_with_tvm(in_shape, [], "ones:0", opt_level=1) def _test_fill_from_tensor(in_shape): """Use the fill op to create a tensor of ones with non-constant shape. Some extra ops need to be added here to prevent the graph from being fully constant and folded away.""" data = np.random.uniform(size=in_shape).astype("float32") with tf.Graph().as_default(): in_data = array_ops.placeholder( shape=[in_shape[0], in_shape[1], None, None], dtype=data.dtype ) x = tf.ones(shape=2 * tf.shape(in_data), dtype=data.dtype) y = tf.math.add(in_data, tf.reduce_mean(x), name="out1") compare_tf_with_tvm(data, "Placeholder:0", "out1:0") def _test_fill_symbolic_inputs(in_shape_data, in_value_data, dtype): with tf.Graph().as_default(): in_shape = tf.placeholder(shape=[in_shape_data.shape[0]], dtype=in_shape_data.dtype) in_value = tf.placeholder(shape=(), dtype=dtype) out = tf.fill(in_shape, in_value) for mode in ["debug", "vm"]: compare_tf_with_tvm( [in_shape_data, in_value_data], [in_shape.name, in_value.name], out.name, mode=mode ) def test_forward_fill(): """ Resize Bilinear """ _test_fill((32)) _test_fill((6, 32, 64, 64)) _test_fill_from_tensor((6, 32, 64, 64)) _test_fill_symbolic_inputs(np.array((2,)), np.int32(9), tf.int32) _test_fill_symbolic_inputs(np.array((2, 3)), 9, tf.int64) _test_fill_symbolic_inputs(np.array((2, 3, 4)), np.float32(9.0), tf.float32) ####################################################################### # Crop to bounding box # -------------------- def _test_crop(in_shape, off_h, off_w, tar_h, tar_w): """ Crop to bounding box """ data = np.random.uniform(size=in_shape).astype("float32") with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) tf.image.crop_to_bounding_box(in_data, off_h, off_w, tar_h, tar_w) compare_tf_with_tvm(data, "Placeholder:0", "crop_to_bounding_box/Slice:0") def test_forward_crop(): """ Crop to bounding box """ _test_crop((1, 224, 224, 3), 20, 20, 120, 120) ####################################################################### # CropAndResize # ------------- def _test_forward_crop_and_resize( img_shape, boxes, box_idx, crop_size, extrapolation_value=0.0, method="bilinear", dtype="float32", ): image = np.random.uniform(0, 10, size=img_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): in_data = array_ops.placeholder(dtype, image.shape, name="in_data") tf.image.crop_and_resize( in_data, boxes=boxes, box_ind=box_idx, crop_size=crop_size, method=method, extrapolation_value=extrapolation_value, name="crop_and_resize", ) compare_tf_with_tvm([image], ["in_data:0"], "crop_and_resize:0") def test_forward_crop_and_resize(): """ CropAndResize """ _test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3]) _test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2) _test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2, "nearest") _test_forward_crop_and_resize([1, 11, 11, 3], [[0.3, 0.3, 1, 1]], [0], [21, 21]) _test_forward_crop_and_resize([1, 41, 41, 3], [[0.2, 0.4, 0.8, 0.8]], [0], [21, 11]) _test_forward_crop_and_resize([1, 100, 100, 3], [[0, 0, 0.9, 0.9]], [0], [30, 30]) _test_forward_crop_and_resize([1, 224, 224, 3], [[0.1, 0.2, 1, 1]], [0], [9, 9]) _test_forward_crop_and_resize([1, 249, 249, 3], [[0, 0, 1, 1]], [0], [9, 9]) _test_forward_crop_and_resize([1, 201, 301, 3], [[0.2, 0.3, 0.7, 0.8]], [0], [51, 51]) _test_forward_crop_and_resize( img_shape=[10, 11, 11, 3], boxes=[[0, 0, 0.9, 0.9], [0.2, 0.2, 0.8, 0.8]], box_idx=[0, 1], crop_size=[5, 5], ) _test_forward_crop_and_resize( img_shape=[20, 576, 576, 3], boxes=[[0, 0, 1, 1], [0, 0, 0.8, 0.8], [0.1, 0.2, 0.9, 1], [0.2, 0, 1, 1]], box_idx=[1, 0, 2, 3], crop_size=[24, 24], extrapolation_value=0.3, ) _test_forward_crop_and_resize( img_shape=[20, 229, 229, 3], boxes=[[0, 0, 0.9, 0.9], [0.3, 0.3, 1, 1], [0.2, 0.1, 0.7, 0.8], [0, 0, 1, 1]], box_idx=[3, 0, 2, 1], crop_size=[58, 58], extrapolation_value=0.2, method="nearest", ) ####################################################################### # Non Max Suppression # ------------------- def _test_forward_nms_v3( bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32" ): boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype) scores = np.random.uniform(size=score_shape).astype(dtype) max_output_size = np.int32(out_size) tf.reset_default_graph() in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1") in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2") in_data_3 = tf.placeholder(tf.int32, name="in_data_3") tf.image.non_max_suppression( boxes=in_data_1, scores=in_data_2, max_output_size=in_data_3, iou_threshold=iou_threshold, score_threshold=score_threshold, name="nms", ) compare_tf_with_tvm( [boxes, scores, max_output_size], ["in_data_1:0", "in_data_2:0", "in_data_3:0"], "nms/NonMaxSuppressionV3:0", mode="vm", ) compare_tf_with_tvm( [boxes, scores, max_output_size], ["in_data_1:0", "in_data_2:0", "in_data_3:0"], "nms/NonMaxSuppressionV3:0", mode="debug", ) def _test_forward_nms_v4( bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32" ): boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype) scores = np.random.uniform(size=score_shape).astype(dtype) max_output_size = np.int32(out_size) tf.reset_default_graph() in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1") in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2") in_data_3 = tf.placeholder(tf.int32, name="in_data_3") indices_padded, num_valid = tf.image.non_max_suppression_padded( boxes=in_data_1, scores=in_data_2, max_output_size=in_data_3, iou_threshold=iou_threshold, score_threshold=score_threshold, name="nms", pad_to_max_output_size=True, ) num_valid = tf.reshape(num_valid, shape=(-1,)) indices_padded = tf.reshape(indices_padded, shape=(-1,)) tf.slice(indices_padded, tf.constant([0]), num_valid, name="SlicedIndices") compare_tf_with_tvm( [boxes, scores, max_output_size], ["in_data_1:0", "in_data_2:0", "in_data_3:0"], ["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"], mode="vm", ) compare_tf_with_tvm( [boxes, scores, max_output_size], ["in_data_1:0", "in_data_2:0", "in_data_3:0"], ["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"], mode="debug", ) def _test_forward_nms_v5( bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32" ): boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype) scores = np.random.uniform(size=score_shape).astype(dtype) max_output_size = np.int32(out_size) tf.reset_default_graph() in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1") in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2") in_data_3 = tf.placeholder(tf.int32, name="in_data_3") tf.image.non_max_suppression_with_scores( boxes=in_data_1, scores=in_data_2, max_output_size=in_data_3, iou_threshold=iou_threshold, score_threshold=score_threshold, name="nms", ) compare_tf_with_tvm( [boxes, scores, max_output_size], ["in_data_1:0", "in_data_2:0", "in_data_3:0"], ["nms/NonMaxSuppressionV5:0", "nms/NonMaxSuppressionV5:1"], mode="vm", ) def test_forward_nms(): """ NonMaxSuppressionV3,5 """ for _test_forward_nms in [_test_forward_nms_v3, _test_forward_nms_v5]: _test_forward_nms((5, 4), (5,), 0.7, 0.5, 5) _test_forward_nms((20, 4), (20,), 0.5, 0.6, 10) _test_forward_nms((1000, 4), (1000,), 0.3, 0.7, 1000) _test_forward_nms((2000, 4), (2000,), 0.4, 0.6, 7) ####################################################################### # LSTM # ---- def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype): """ One iteration of a LSTM cell """ tf.reset_default_graph() input_size = num_hidden input_data = np.full((batch_size, input_size), 1.0, dtype=dtype) in_state_c = np.full((batch_size, num_hidden), 0.1, dtype=dtype) in_state_h = np.full((batch_size, num_hidden), 0.1, dtype=dtype) def _get_tensorflow_output(): with tf.Session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5) ): m0 = tf.placeholder(dtype, [batch_size, num_hidden], name="m0") m1 = tf.placeholder(dtype, [batch_size, num_hidden], name="m1") x = tf.placeholder(shape=(batch_size, input_size), dtype=dtype, name="input") g, ((out_m0, out_m1)) = tensorflow.contrib.rnn.LSTMBlockCell( num_hidden, forget_bias=forget_bias )(x, (m0, m1)) sess.run([variables.global_variables_initializer()]) res = sess.run( [g, out_m0, out_m1], { x.name: np.array([[1.0, 1.0]]), m0.name: in_state_c, m1.name: in_state_h, }, ) graph_def = sess.graph.as_graph_def(add_shapes=True) final_graph_def = graph_util.convert_variables_to_constants( sess, graph_def, ["root/lstm_cell/LSTMBlockCell"] ) return final_graph_def, res graph_def, tf_out = _get_tensorflow_output() tvm_output = run_tvm_graph( graph_def, [input_data, in_state_c, in_state_h], ["root/input", "root/m0", "root/m1"], num_output=7, ) assert isinstance(tvm_output, list) tvm.testing.assert_allclose(tf_out[0], tvm_output[6], rtol=1e-3, atol=1e-3) tvm.testing.assert_allclose(tf_out[1], tvm_output[1], rtol=1e-3, atol=1e-3) def test_forward_lstm(): """test LSTM block cell""" if package_version.parse(tf.VERSION) < package_version.parse("2.0.0"): # in 2.0, tf.contrib.rnn.LSTMBlockCell is removed _test_lstm_cell(1, 2, 1, 0.5, "float32") ####################################################################### # Pack # --- def _test_pack(axis, shape, **kwargs): a = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) b = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) with tf.Graph().as_default(): tf_a = array_ops.placeholder(shape=shape, dtype="float32", name="pl_a") tf_b = array_ops.placeholder(shape=shape, dtype="float32", name="pl_b") tf_c = tf.stack([tf_a, tf_b], axis=axis, **kwargs) assert tf_c.op.op_def.name == "Pack", "tf.stack() is expected to produce 'Pack' operation" compare_tf_with_tvm([a, b], ["pl_a:0", "pl_b:0"], "stack:0") def test_forward_pack(): for axis in range(-3, 3): _test_pack(axis, [3, 2, 1]) for axis in range(-1, 1): _test_pack(axis, [3]) _test_pack(0, []) ####################################################################### # Unpack # ------ def _test_forward_unpack(in_shape, axis, dtype): """test operator Unpack""" np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, in_shape, name="in_data") tf.unstack(in_data, axis=axis, name="Unpack") compare_tf_with_tvm([np_data], ["in_data:0"], "Unpack:0") def test_forward_unpack(): _test_forward_unpack((3,), 0, "int32") _test_forward_unpack((3,), -1, "int16") _test_forward_unpack((21, 23, 3), 2, "float32") ####################################################################### # Range # ----- def test_forward_range(): """test operator Range""" for dtype in [tf.int32, tf.int64]: tf.reset_default_graph() with tf.Graph().as_default(): tf.range(1, 18, 3, name="range", dtype=dtype) compare_tf_with_tvm([], [], "range:0") """test type assignment for operator Range""" tf.reset_default_graph() with tf.Graph().as_default(): tf.range(1, 256 + 1, 1, dtype=tf.float32) compare_tf_with_tvm([], [], "range:0") ####################################################################### # Pad # --- def _test_pad(input_shape, paddings, mode, **kwargs): """ One iteration of pad operation with given shape""" x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=input_shape, dtype="float32") pad_values = constant_op.constant(paddings) pad = tf.pad(in_data, paddings=pad_values, mode=mode, **kwargs) if mode == "CONSTANT": if "constant_values" in kwargs: out_name = "PadV2:0" else: out_name = "Pad:0" else: out_name = "MirrorPad:0" compare_tf_with_tvm(x, "Placeholder:0", out_name) def test_forward_pad(): """ Pad """ _test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT") _test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT", constant_values=1.0) _test_pad((2, 3), [[1, 1], [2, 2]], mode="SYMMETRIC") _test_pad((2, 3), [[1, 1], [2, 2]], mode="REFLECT") ####################################################################### # Logical operators # -------------------- def test_logical_and(): with tf.Graph().as_default(): in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1") in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2") out = tf.logical_and(in1, in2, name="out") in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool") in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool") compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0") def test_logical_or(): with tf.Graph().as_default(): in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1") in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2") out = tf.logical_or(in1, in2, name="out") in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool") in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool") compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0") def test_logical_xor(): with tf.Graph().as_default(): in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1") in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2") out = tf.logical_xor(in1, in2, name="out") in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool") in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool") compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0") def test_logical_not(): with tf.Graph().as_default(): in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1") out = tf.logical_not(in1, name="out") in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool") compare_tf_with_tvm(in_data1, "in1:0", "out:0") def test_forward_logical(): test_logical_and() test_logical_or() test_logical_xor() test_logical_not() ####################################################################### # Where, Select # ------------- def test_forward_where(): """ Where: return elements depending on conditions""" with tf.Graph().as_default(): with tf.Session() as sess: input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1") input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2") mask = input1 > input2 tf.where(mask, input1 + 1, input2 * 2) in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32") in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32") compare_tf_with_tvm([in_data1, in_data2], ["input1:0", "input2:0"], "Select:0") ####################################################################### # Inception V3 # ------------ def test_forward_inception_v3(): """test inception V3 model""" with tf.Graph().as_default(): graph_def = tf_testing.get_workload( "InceptionV3/inception_v3_2016_08_28_frozen-with_shapes.pb" ) # Call the utility to import the graph definition into default graph. graph_def = tf_testing.ProcessGraphDefParam(graph_def) data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32") with tf.Session() as sess: tf_output = run_tf_graph(sess, data, "input:0", "InceptionV3/Predictions/Reshape_1:0") tvm_output = run_tvm_graph(graph_def, data, "input") tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5) ####################################################################### # Inception V1 # ------------ def test_forward_inception_v1(): """test inception V1 model""" with tf.Graph().as_default(): graph_def = tf_testing.get_workload("InceptionV1/classify_image_graph_def-with_shapes.pb") # Call the utility to import the graph definition into default graph. graph_def = tf_testing.ProcessGraphDefParam(graph_def) # Build an image from random data. from PIL import Image from tvm.contrib import utils img_array = np.random.uniform(size=(1, 600, 600, 3)).astype("uint8") img = Image.frombuffer("RGB", (600, 600), img_array.tostring(), "raw", "RGB", 0, 1) temp = utils.tempdir() img_path = temp.relpath("tf-test.jpg") img.save(img_path) import os.path if not tf.gfile.Exists(os.path.join(img_path)): tf.logging.fatal("File does not exist %s", img_path) data = tf.gfile.FastGFile(os.path.join(img_path), "rb").read() temp.remove() # Extract tensorflow decoded image frame for tvm input with tf.Session() as sess: tvm_data = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "DecodeJpeg:0") with tf.Session() as sess: tf_output = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "softmax:0") tvm_output = run_tvm_graph(graph_def, tvm_data, "DecodeJpeg/contents") tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5) ####################################################################### # Mobilenet # --------- def test_forward_mobilenet(): """test mobilenet model""" # MobilenetV2 with tf.Graph().as_default(): graph_def = tf_testing.get_workload( "https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz", "mobilenet_v2_1.4_224_frozen.pb", ) # Call the utility to import the graph definition into default graph. graph_def = tf_testing.ProcessGraphDefParam(graph_def) data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32") out_node = "MobilenetV2/Predictions/Reshape_1" with tf.Session() as sess: # Add shapes to the graph. graph_def = tf_testing.AddShapesToGraphDef(sess, out_node) tf_output = run_tf_graph(sess, data, "input:0", out_node + ":0") tvm_output = run_tvm_graph(graph_def, data, "input") tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5 ) ####################################################################### # ResnetV2 # -------- @tvm.testing.requires_gpu def test_forward_resnetv2(): """test resnet model""" if is_gpu_available(): with tf.Graph().as_default(): graph_def = tf_testing.get_workload( "ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb" ) # Call the utility to import the graph definition into default graph. graph_def = tf_testing.ProcessGraphDefParam(graph_def) data = np.random.uniform(size=(128, 224, 224, 3)).astype("float32") out_node = "ArgMax" with tf.Session() as sess: tf_output = run_tf_graph(sess, data, "input_tensor:0", out_node + ":0") for device in ["llvm", "cuda"]: ctx = tvm.context(device, 0) if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) continue tvm_output = run_tvm_graph( graph_def, data, "input_tensor", len(tf_output), target=device ) tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5 ) ####################################################################### # SSD # --- def _test_ssd_impl(): """Test SSD with backbone MobileNet V1""" with tf.Graph().as_default(): graph_def = tf_testing.get_workload( "object_detection/ssd_mobilenet_v1_ppn_shared_" "box_predictor_300x300_coco14_sync_2018_07_03.pb" ) # Call the utility to import the graph definition into default graph. graph_def = tf_testing.ProcessGraphDefParam(graph_def) data = np.random.uniform(0.0, 255.0, size=(1, 512, 512, 3)).astype("uint8") in_node = "image_tensor" out_node = ["detection_boxes", "detection_scores", "detection_classes"] with tf.Session() as sess: tf_output = run_tf_graph( sess, data, "{}:0".format(in_node), ["{}:0".format(oname) for oname in out_node] ) # TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready. for device in ["llvm"]: ctx = tvm.context(device, 0) if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) continue tvm_output = run_tvm_graph( graph_def, data, in_node, len(out_node), target=device, layout="NCHW", out_names=out_node, mode="vm", disabled_pass=["FoldScaleAxis"], serialize=True, ) for i in range(len(out_node)): tvm.testing.assert_allclose(tvm_output[i], tf_output[i], rtol=1e-3, atol=1e-3) @pytest.mark.skip("neo-ai/tvm: skip because stack limit of 100mb is exceeded by WellFormedChecker") def test_forward_ssd(): run_thread = threading.Thread(target=_test_ssd_impl, args=()) old_stack_size = threading.stack_size(100 * 1024 * 1024) run_thread.start() run_thread.join() threading.stack_size(old_stack_size) ####################################################################### # Placeholder # ----------- def test_forward_placeholder(): """test a simple pb with Placeholder node in the end of GraphDef""" with tf.Graph().as_default(): graph_def = tf_testing.get_workload("Custom/placeholder.pb") # Call the utility to import the graph definition into default graph. graph_def = tf_testing.ProcessGraphDefParam(graph_def) data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32") out_node = "mul" with tf.Session() as sess: # Add shapes to the graph. graph_def = tf_testing.AddShapesToGraphDef(sess, out_node) tf_output = run_tf_graph(sess, data, "Placeholder:0", out_node + ":0") tvm_output = run_tvm_graph(graph_def, data, "Placeholder") tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5 ) ####################################################################### # PTB # --- try: # Load contrib for running ptb model in tf version before 2.0 import tensorflow.contrib except: pass def test_forward_ptb(): """test ptb model""" config = tf_testing.get_config() num_steps = config.num_steps num_hidden = config.hidden_size num_layers = config.num_layers batch_size = config.batch_size vocab_size = config.vocab_size out_sample_shape = (batch_size, vocab_size) out_state_shape = (batch_size, num_hidden) # Sample input inpt = "we have no useful information on" cnt_sample = 20 def _pretty_print(items, is_char_model, id2word): if not is_char_model: return " ".join([id2word[x] for x in items]) else: return "".join([id2word[x] for x in items]).replace("_", " ") def _get_tvm_graph_module(graph_def): # Cell inputs 'c and 'h' consist of all layers values shape_dict = {"Model/Placeholder": (batch_size, num_steps)} mod, params = relay.frontend.from_tensorflow( graph_def, shape=shape_dict, outputs=[ "Model/Softmax:0", "Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1", "Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6", "Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1", "Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6", ], ) target = "llvm" with tvm.transform.PassContext(opt_level=0): graph, lib, params = relay.build(mod, target, params=params) from tvm.contrib import graph_runtime ctx = tvm.cpu(0) return params, graph_runtime.create(graph, lib, ctx) def _do_tvm_sample(model, data, in_states, params, num_samples): """Sampled from the model""" samples = [] state = in_states sample = None def _get_sample(data, state): input_data = np.full((batch_size, num_steps), data, dtype="int32") model.set_input("Model/Placeholder", tvm.nd.array(input_data.astype("int32"))) model.set_input( "Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros", tvm.nd.array(state[0].astype("float32")), ) model.set_input( "Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1", tvm.nd.array(state[1].astype("float32")), ) model.set_input( "Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros", tvm.nd.array(state[2].astype("float32")), ) model.set_input( "Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1", tvm.nd.array(state[3].astype("float32")), ) model.set_input(**params) model.run() tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape, "float32")).asnumpy() state_output = [] for i in range(4): state_output.append( model.get_output(i + 1, tvm.nd.empty(out_state_shape, "float32")).asnumpy() ) sample = tf_testing.pick_from_weight(tvm_output[0]) return sample, state_output for x in data: sample, state = _get_sample(x, state) if sample is not None: samples.append(sample) else: samples.append(0) k = 1 while k < num_samples: sample, state = _get_sample(samples[-1], state) samples.append(sample) k += 1 return samples, state with tf.Graph().as_default(): word_to_id, id_to_word, graph_def = tf_testing.get_workload_ptb() vocab_size = len(word_to_id) # Call the utility to import the graph definition into default graph. graph_def = tf_testing.ProcessGraphDefParam(graph_def) sess = tf.Session() # TVM graph module creation params, m = _get_tvm_graph_module(graph_def) # Create 10 predicted statments of 20 words cnt_stm = 0 while cnt_stm < 10: cnt_stm += 1 in_state = [np.full((batch_size, num_hidden), 0, dtype="float32")] * 2 * num_layers seed_for_sample = inpt.split() tvm_samples, tvm_state = _do_tvm_sample( m, [word_to_id[word] for word in seed_for_sample], in_state, params, cnt_sample ) tvm_sample_str = _pretty_print(tvm_samples, False, id_to_word) tf_samples, tf_state = tf_testing.do_tf_sample( sess, [word_to_id[word] for word in seed_for_sample], in_state, cnt_sample ) tf_sample_str = _pretty_print(tf_samples, False, id_to_word) inpt = tvm_sample_str tvm.testing.assert_allclose(tf_samples, tvm_samples, rtol=1e-5, atol=1e-5) assert tvm_sample_str == tf_sample_str ####################################################################### # LRN (Local Response Normalization) # ---------------------------------- def _test_lrn(ishape, size, axis, bias, alpha, beta): """ testing local response normalization """ lrn_depth_radius = size / 2 inp_array = np.random.uniform(size=ishape).astype(np.float32) with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype, name="lrn0_data") nn_ops.local_response_normalization( in1, name="lrn", depth_radius=lrn_depth_radius, bias=bias, alpha=alpha, beta=beta ) compare_tf_with_tvm(inp_array, "lrn0_data:0", "lrn:0") def test_forward_lrn(): _test_lrn((1, 3, 20, 20), 3, 1, 1.0, 1.0, 0.5) ####################################################################### # l2_normalize # ------------ def _test_l2_normalize(ishape, eps, axis): """ testing l2 normalize (uses max, sum, square, sqrt frontend operators)""" inp_array = np.random.uniform(size=ishape).astype(np.float32) with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype) nn.l2_normalize(in1, axis=axis, epsilon=eps, name=None, dim=None) compare_tf_with_tvm(inp_array, "Placeholder:0", "l2_normalize:0") def test_forward_l2_normalize(): _test_l2_normalize((1, 3, 20, 20), 0.001, (0,)) ####################################################################### # transpose # --------- def _test_forward_transpose(ishape, axes=None): data = np.random.uniform(size=ishape).astype(np.float32) with tf.Graph().as_default(): in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data") if axes is None: tf.transpose(in1) else: tf.transpose(in1, perm=axes) compare_tf_with_tvm(data, "transpose_data:0", "transpose:0") def _test_forward_tranapose_axes_input(ishape, axes): data = np.random.uniform(size=ishape).astype(np.float32) axes_np = np.array(axes).astype(np.int32) with tf.Graph().as_default(): in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data") const1 = tf.constant(axes_np, dtype=tf.int32) # make axes an input to tf.transpose, but not an input to the graph, # so it can be extracted with infer_value_simulated axes = tf.reverse(const1, axis=[-1]) tf.transpose(in1, axes) compare_tf_with_tvm([data], ["transpose_data:0"], "transpose:0") def test_forward_transpose(): _test_forward_transpose((2, 3, 4), (1, 2, 0)) _test_forward_transpose((2, 3, 4)) _test_forward_transpose((7, 8, 8, 10)) _test_forward_transpose((2, 3, 4), (1, 2, 0)) _test_forward_transpose((2, 3, 4), (0, 1, 2)) _test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2)) _test_forward_tranapose_axes_input((2, 3, 4), (1, 2, 0)) _test_forward_tranapose_axes_input((2, 3, 4, 5), (3, 0, 1, 2)) def _test_forward_slice_operation_input(input_value, begin_value, size_value): input_data = np.array(input_value, dtype=np.float32) with tf.Graph().as_default(): input_tensor = tf.placeholder(shape=input_data.shape, dtype=input_data.dtype, name="input") tf.slice(input_tensor, begin_value, size_value, name="slice_output") compare_tf_with_tvm([input_data], ["input:0"], "slice_output:0") def test_forward_slice(): _test_forward_slice_operation_input([1, 1], [0], [2]) _test_forward_slice_operation_input([0, 1, 2, 3], [3], [-1]) _test_forward_slice_operation_input( [[0, 1, 2, 3], [4, 5, 6, 7]], begin_value=[0, 1], size_value=[-1, -1] ) def test_forward_ceil(): ishape = (1, 3, 10, 10) inp_array = np.random.uniform(size=ishape).astype(np.float32) with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype) tf.ceil(in1) compare_tf_with_tvm(inp_array, "Placeholder:0", "Ceil:0") def test_forward_floor(): ishape = (1, 3, 10, 10) inp_array = np.random.uniform(size=ishape).astype(np.float32) with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype) tf.floor(in1) compare_tf_with_tvm(inp_array, "Placeholder:0", "Floor:0") def test_forward_relu(): ishape = (1, 3, 10, 10) inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32) for mode in ["graph_runtime", "vm"]: with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype) tf.nn.relu(in1) compare_tf_with_tvm(inp_array, "Placeholder:0", "Relu:0", mode=mode) def test_forward_leaky_relu(): ishape = (1, 3, 10, 10) inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32) for mode in ["graph_runtime", "vm"]: with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype) tf.nn.leaky_relu(in1, alpha=0.4) compare_tf_with_tvm(inp_array, "Placeholder:0", "LeakyRelu:0", mode=mode) def test_forward_elu(): ishape = (1, 3, 10, 10) inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32) with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype) tf.nn.elu(in1) compare_tf_with_tvm(inp_array, "Placeholder:0", "Elu:0") def test_forward_selu(): ishape = (1, 3, 10, 10) inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32) with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype) tf.nn.selu(in1) compare_tf_with_tvm(inp_array, "Placeholder:0", "Selu:0") def test_forward_tanh(): ishape = (1, 3, 10, 10) inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32) with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype) tf.nn.tanh(in1) compare_tf_with_tvm(inp_array, "Placeholder:0", "Tanh:0") ####################################################################### # Softmax # ------- def test_forward_softmax(): """test operator Softmax """ def check_softmax(in_shape, axis, dtype): np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, in_shape, name="in_data") tf.nn.softmax(in_data, axis=axis, name="Softmax") compare_tf_with_tvm([np_data], ["in_data:0"], "Softmax:0") check_softmax((2, 3, 5), 2, "float32") check_softmax((2, 3, 5), -1, "float32") ####################################################################### # Tensor # ------ def test_forward_round(): """test Round""" np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(tf.float32, (5, 7), name="in_data") tf.round(in_data, name="round") compare_tf_with_tvm([np_data], ["in_data:0"], "round:0") def test_forward_abs(): """test operator Abs""" np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(tf.float32, (9, 11), name="in_data") tf.math.abs(in_data, name="abs") compare_tf_with_tvm([np_data], ["in_data:0"], "abs:0") def _test_forward_zeros_like(in_shape, dtype): np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, in_shape, name="in_data") tf.zeros_like(in_data, name="zeros_like") compare_tf_with_tvm([np_data], ["in_data:0"], "zeros_like:0") def test_forward_zeros_like(): if tf.__version__ < LooseVersion("1.2"): _test_forward_zeros_like((2, 3), "int32") _test_forward_zeros_like((2, 3, 5), "int8") _test_forward_zeros_like((2, 3, 5, 7), "uint16") _test_forward_zeros_like((2, 3, 11), "float32") _test_forward_zeros_like((2, 3, 11), "float64") def test_forward_squared_difference(): ishape = (1, 3, 10, 14) inp_array_a = np.random.uniform(-5, 5, size=ishape).astype(np.float32) inp_array_b = np.random.uniform(-5, 5, size=ishape).astype(np.float32) with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array_a.shape, dtype=inp_array_a.dtype, name="in1") in2 = tf.placeholder(shape=inp_array_b.shape, dtype=inp_array_b.dtype, name="in2") out = tf.math.squared_difference(in1, in2) compare_tf_with_tvm([inp_array_a, inp_array_b], [in1.name, in2.name], out.name) def _test_forward_reverse_v2(in_shape, axis, dtype): np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, in_shape, name="in_data") tf.reverse(in_data, axis=[axis], name="reverse") compare_tf_with_tvm([np_data], ["in_data:0"], "reverse:0") def test_forward_reverse_v2(): """test ReverseV2""" _test_forward_reverse_v2((2, 3), 0, "int32") _test_forward_reverse_v2((2, 3, 5), 2, "float32") _test_forward_reverse_v2((2, 3, 5, 7), 1, "float32") _test_forward_reverse_v2((2, 3, 5), -1, "float64") _test_forward_reverse_v2((2, 3, 5), -3, "float64") def test_forward_sign(): """test Sign""" np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data") tf.sign(in_data, name="sign") compare_tf_with_tvm([np_data], ["in_data:0"], "sign:0") def test_forward_square(): """test operator Square """ np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data") tf.square(in_data, name="square") compare_tf_with_tvm([np_data], ["in_data:0"], "square:0") def test_forward_pow_exp(): """test Pow and Exp """ np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32) np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): in1 = tf.placeholder(tf.float32, (5, 7, 11), name="in1") in2 = tf.placeholder(tf.float32, (5, 7, 11), name="in2") out1 = tf.pow(in1, in2, name="pow") out = tf.exp(in1, name="exp") compare_tf_with_tvm([np_in1, np_in2], ["in1:0", "in2:0"], "pow:0") compare_tf_with_tvm([np_in1], ["in1:0"], "exp:0") def test_forward_unary(): def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32): """test unary operators""" np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data") out = op(in_data) compare_tf_with_tvm([np_data], ["in_data:0"], out.name) _test_forward_unary(tf.acos, -1, 1) _test_forward_unary(tf.asin, -1, 1) _test_forward_unary(tf.atanh, -1, 1) _test_forward_unary(tf.sinh) _test_forward_unary(tf.cosh) _test_forward_unary(tf.acosh) _test_forward_unary(tf.asinh) _test_forward_unary(tf.atan) _test_forward_unary(tf.sin) _test_forward_unary(tf.cos) _test_forward_unary(tf.tan) _test_forward_unary(tf.tanh) _test_forward_unary(tf.erf) _test_forward_unary(tf.log) _test_forward_unary(tf.log1p) def test_forward_atan2(): """test operator tan """ tf.disable_eager_execution() np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32) np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32) tf.reset_default_graph() in_data_1 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_1") in_data_2 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_2") tf.atan2(in_data_1, in_data_2, name="atan2") compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "atan2:0") def test_forward_expm1(): """test operator expm1 """ def _test_forward_expm1(shape): tf.disable_eager_execution() np_data = np.random.uniform(1, 10, size=shape).astype(np.float32) tf.reset_default_graph() in_data = tf.placeholder(tf.float32, shape, name="in_data") tf.expm1(in_data, name="expm1") compare_tf_with_tvm([np_data], ["in_data:0"], "expm1:0") _test_forward_expm1([1, 100]) _test_forward_expm1([1, 10, 10]) _test_forward_expm1([2, 5, 2, 5]) def test_forward_softsign(): """test operator softsign """ def _test_forward_softsign(shape): tf.disable_eager_execution() np_data = np.random.uniform(1, 100, size=shape).astype(np.float32) tf.reset_default_graph() in_data = tf.placeholder(tf.float32, shape, name="in_data") tf.nn.softsign(in_data, name="softsign") compare_tf_with_tvm([np_data], ["in_data:0"], "softsign:0") _test_forward_softsign([1, 100]) _test_forward_softsign([1, 10, 10]) _test_forward_softsign([2, 5, 2, 5]) def test_forward_rint(): """test operator rint """ def _test_forward_rint(shape): tf.disable_eager_execution() np_data = np.random.uniform(-100, 100, size=shape).astype(np.float32) tf.reset_default_graph() in_data = tf.placeholder(tf.float32, shape, name="in_data") tf.math.rint(in_data, name="rint") compare_tf_with_tvm([np_data], ["in_data:0"], "rint:0") _test_forward_rint([100]) _test_forward_rint([1, 100]) _test_forward_rint([1, 10, 10]) _test_forward_rint([2, 5, 2, 5]) def test_forward_negative(): """test tf operator Neg """ np_data = np.random.uniform(-100, 255, size=(224, 224, 3)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(tf.float32, (224, 224, 3), name="in_data") tf.negative(in_data, name="negative") compare_tf_with_tvm([np_data], ["in_data:0"], "negative:0") def test_forward_log_softmax(): """test operator LogSoftmax""" np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(tf.float32, (9, 11), name="in_data") tf.math.log_softmax(in_data, name="LogSoftmax") compare_tf_with_tvm([np_data], ["in_data:0"], "LogSoftmax:0") def test_forward_softplus(): """test operator Softplus""" np_data = np.random.uniform(1, 10, size=(2, 3, 5)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data") tf.nn.softplus(in_data, name="softplus") compare_tf_with_tvm([np_data], ["in_data:0"], "softplus:0") def test_forward_rsqrt(): """test Rsqrt """ np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data") tf.rsqrt(in_data, name="rsqrt") compare_tf_with_tvm([np_data], ["in_data:0"], "rsqrt:0") def test_forward_sqrt(): """test Sqrt """ np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data") tf.sqrt(in_data, name="sqrt") compare_tf_with_tvm([np_data], ["in_data:0"], "sqrt:0") def _test_forward_right_shift(in_shape, dtype): """test operator RightShift""" lh_data = np.random.randint(1, 3, size=in_shape).astype(dtype) rh_data = np.random.randint(1, 8, size=in_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): lft_data = tf.placeholder(dtype, in_shape, name="lft_data") rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data") tf.bitwise.right_shift(lft_data, rgt_data, name="RightShift") compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "RightShift:0") def test_forward_right_shift(): _test_forward_right_shift((7,), "int32") _test_forward_right_shift((3, 11), "int16") def _test_forward_left_shift(in_shape, dtype): """test operator LeftShift""" lh_data = np.random.randint(100, 1000000, size=in_shape).astype(dtype) rh_data = np.random.randint(1, 3, size=in_shape).astype(dtype) tf.reset_default_graph() with tf.Graph().as_default(): lft_data = tf.placeholder(dtype, in_shape, name="lft_data") rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data") tf.bitwise.left_shift(lft_data, rgt_data, name="LeftShift") compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "LeftShift:0") def test_forward_left_shift(): _test_forward_left_shift((10,), "int32") _test_forward_left_shift((224, 224, 3), "int16") ####################################################################### # Mean # ---- def test_forward_mean(): def check_mean(ishape, **kwargs): inp_array = np.random.uniform(size=ishape).astype(np.float32) with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype) tf.keras.backend.mean(in1, **kwargs) compare_tf_with_tvm(inp_array, "Placeholder:0", "Mean:0", no_gpu=True) check_mean((10, 8, 16, 32)) check_mean((10, 8, 16, 32), axis=(2, 3)) check_mean((10, 8, 16, 32), axis=(1, 2), keepdims=True) ####################################################################### # Size # ---- def test_forward_size(): def check_size(ishape): np_input = np.random.uniform(size=ishape).astype(np.float32) # if all dimensions are constant, TF will optimize away size operator into constant tf_input_shape = list(np_input.shape) tf_input_shape[0] = None with tf.Graph().as_default(): input = tf.placeholder(shape=tf_input_shape, dtype=np_input.dtype, name="input") tf.size(input, name="size") compare_tf_with_tvm([np_input], ["input:0"], "size:0") check_size((10, 8, 16, 32)) check_size((10,)) ####################################################################### # All, Any, Max, Min, Prod, variance, std, logsumexp, euclidean_norm # ------------------------------------------------------------------ def test_forward_reduce(): def _check_op(tf_op, ishape, axis, keepdims, dtype="float32"): tf.reset_default_graph() if dtype == "bool": np_data = np.random.choice([True, False], size=ishape) else: np_data = np.random.uniform(size=ishape).astype(dtype) if tf_op == tf.math.reduce_prod: axis = 1 np_data = np_data.reshape(1, -1) with tf.Graph().as_default(): in_data = tf.placeholder(dtype, name="in_data") reduce_op = tf_op(in_data, axis=axis, keepdims=keepdims, name="reduce_std") compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name) def _test_math_op(op, dtypes=["int32", "float32"]): for dtype in dtypes: _check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype) _check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype) _check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype) _check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype) _test_math_op(tf.math.reduce_all, dtypes=["bool"]) _test_math_op(tf.math.reduce_any, dtypes=["bool"]) _test_math_op(tf.math.reduce_max) _test_math_op(tf.math.reduce_min) _test_math_op(tf.math.reduce_prod) _test_math_op(tf.math.reduce_variance, dtypes=["float32"]) _test_math_op(tf.math.reduce_std, dtypes=["float32"]) _test_math_op(tf.math.reduce_logsumexp, dtypes=["float32"]) if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): _test_math_op(tf.math.reduce_euclidean_norm) ####################################################################### # Relational operators # -------------------- def _test_forward_rel_op(data, func): with tf.Graph().as_default(): in1 = tf.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in1") in2 = tf.placeholder(shape=data[1].shape, dtype=data[1].dtype, name="in2") op = func(in1, in2, name="op") out = tf.cast(op, tf.int32, name="out1") compare_tf_with_tvm([data[0], data[1]], ["in1:0", "in2:0"], "out1:0") def test_forward_rel_ops(): t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]]) _test_forward_rel_op([t1, t2], math_ops.less) _test_forward_rel_op([t1, t2], math_ops.greater) _test_forward_rel_op([t1, t2], math_ops.less_equal) _test_forward_rel_op([t1, t2], math_ops.greater_equal) _test_forward_rel_op([t1, t2], math_ops.equal) _test_forward_rel_op([t1, t2], math_ops.not_equal) ####################################################################### # ExpandDims # ---------- def _test_forward_expand_dims(data, axis): with tf.Graph().as_default(): in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="in1") out = tf.expand_dims(in1, axis) compare_tf_with_tvm([data], [in1.name], out.name) def test_forward_expand_dims(): _test_forward_expand_dims(np.int32(1), 0) _test_forward_expand_dims(np.array([1]), 0) _test_forward_expand_dims(np.array([1]), -1) _test_forward_expand_dims(np.array([[1], [2]]), 0) _test_forward_expand_dims(np.array([[1], [2]]), 1) _test_forward_expand_dims(np.array([[1], [2]]), -1) ####################################################################### # Maximum, Minimum # ---------------- def test_forward_maximum(): """test Op Maximum""" def check_maximum(lh_shape, rh_shape, dtype): tf.reset_default_graph() lh_data = np.random.uniform(size=lh_shape).astype(dtype) rh_data = np.random.uniform(size=rh_shape).astype(dtype) with tf.Graph().as_default(): lft_data = tf.placeholder(dtype, name="lft_data") rgt_data = tf.placeholder(dtype, name="rgt_data") tf.math.maximum(lft_data, rgt_data, name="maximum") compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "maximum:0") check_maximum((10, 8, 16, 32), (1,), dtype="int32") check_maximum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32") def test_forward_minimum(): """test Op Minimum""" def check_minimum(lh_shape, rh_shape, dtype): tf.reset_default_graph() lh_data = np.random.uniform(size=lh_shape).astype(dtype) rh_data = np.random.uniform(size=rh_shape).astype(dtype) with tf.Graph().as_default(): lft_data = tf.placeholder(dtype, name="lft_data") rgt_data = tf.placeholder(dtype, name="rgt_data") tf.math.minimum(lft_data, rgt_data, name="minimum") compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "minimum:0") check_minimum((10, 8, 16, 32), (1,), dtype="int32") check_minimum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32") ####################################################################### # PlaceholderWithDefault # ---------------------- def test_placeholder(): with tf.Graph().as_default(): in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32) var1 = tf.Variable(in_data1, name="in1") var2 = array_ops.placeholder_with_default(var1, None, name="place1") in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32) place1 = array_ops.placeholder(shape=in_data1.shape, dtype=in_data1.dtype, name="in2") out1 = tf.math.add(var1, var2, name="out1") out2 = tf.math.add(out1, place1, name="out2") compare_tf_with_tvm( [in_data1, in_data2], ["place1:0", "in2:0"], "out2:0", init_global_variables=True ) ####################################################################### # OneHot # ---------------------- def _test_forward_one_hot(indices_shape, depth, on_value, off_value, axis, out_dtype): inp_array1 = np.random.randint(0, 5, size=indices_shape) with tf.Graph().as_default(): in1 = tf.placeholder(shape=inp_array1.shape, dtype=inp_array1.dtype) out = tf.one_hot(in1, depth, on_value, off_value, axis, dtype=out_dtype) compare_tf_with_tvm(inp_array1, in1.name, out.name) def test_forward_one_hot(): _test_forward_one_hot((3,), 3, 1, 0, -1, "int32") _test_forward_one_hot((3,), 3, 1.0, 0.0, -1, "float32") _test_forward_one_hot((2, 2), 5, 2, -2, 0, "int32") _test_forward_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32") _test_forward_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32") _test_forward_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32") ####################################################################### # AddN # ---------------------- def _test_forward_add_n(inputs): tf.reset_default_graph() with tf.Graph().as_default(): temp = [] for each in inputs: temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype)) output = tf.add_n(temp) compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name) def test_forward_add_n(): x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32) y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32) z = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32) m, n, o = x.astype(np.float32), y.astype(np.float32), z.astype(np.float32) in0 = x in1 = [x, y] in2 = (x, y, z) in3 = m in4 = [m, n] in5 = (m, n, o) _test_forward_add_n(in0) _test_forward_add_n(in1) _test_forward_add_n(in2) _test_forward_add_n(in3) _test_forward_add_n(in4) _test_forward_add_n(in5) ####################################################################### # Sharing params case # ---------------------- def test_sharing_node(): """Test the sharing params case.""" np_data = np.random.uniform(size=(2, 2, 2)).astype("float32") with tf.Graph().as_default(): in_data = tf.placeholder(tf.float32, shape=(2, 2, 2), name="in_data") axis = tf.constant([-1], dtype=tf.int32, name="axis") mean0 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean0") mean1 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean1") out = tf.add(mean0, mean1, name="out") compare_tf_with_tvm([np_data], ["in_data:0"], "out:0") ####################################################################### # Unravel Index # ---------------------- def _test_forward_unravel_index(inputs): tf.reset_default_graph() with tf.Graph().as_default(): temp = [] for each in inputs: temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype)) output = tf.unravel_index(temp[0], temp[1]) compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name) def _test_forward_unravel_index_scalar(x, y, dtype="int32"): tf.reset_default_graph() with tf.Graph().as_default(): indices_1 = constant_op.constant(x, dtype=dtype) dims_1 = constant_op.constant(y, dtype=dtype) out_1 = array_ops.unravel_index(indices_1, dims_1) compare_tf_with_tvm([], [], out_1.name) def test_forward_unravel_index(): x = np.array([0, 1, 2, 3]) y = np.array([2, 2]) _test_forward_unravel_index([x, y]) x = np.array([0, 1, 2, 5]) y = np.array([2, 3]) _test_forward_unravel_index([x, y]) x = np.array([0, 1, 2, 5]) y = np.array([6]) _test_forward_unravel_index([x, y]) x = np.array([102, 300, 16]) y = np.array([10, 10, 9, 6]) _test_forward_unravel_index([x, y]) x = np.array([100]) y = np.array([10, 10, 9, 6]) _test_forward_unravel_index([x, y]) # Test scalar input _test_forward_unravel_index_scalar(13, [1, 4, 5, 2]) ####################################################################### # Dilation2d # ---------------------- def _test_dilation2d(tensor_in_sizes, filter_in_sizes, strides, dilations, padding): """ One iteration of dilation2d with given shapes and attributes """ total_size_1 = np.prod(tensor_in_sizes) total_size_2 = np.prod(filter_in_sizes) # Initializes the input tensor with array containing incrementing # numbers from 1. data_array = [f * 1.0 for f in range(1, total_size_1 + 1)] filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)] with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32") in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32") nn_ops.dilation2d(in_data, in_filter, strides=strides, rates=dilations, padding=padding) compare_tf_with_tvm( np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "Dilation2D:0", no_gpu=True, ) def test_forward_dilation(): _test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID") _test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME") _test_dilation2d([1, 5, 5, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID") _test_dilation2d([1, 5, 5, 1], [3, 3, 1], [1, 1, 1, 1], [1, 2, 2, 1], "VALID") _test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 1, 1], [1, 1, 1, 1], "SAME") _test_dilation2d([1, 28, 28, 3], [5, 5, 3], [1, 2, 2, 1], [1, 1, 1, 1], "VALID") _test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 1, 1, 1], [1, 1, 1, 1], "VALID") _test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME") _test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID") _test_dilation2d([1, 5, 5, 1], [7, 2, 1], [1, 3, 1, 1], [1, 1, 1, 1], "SAME") _test_dilation2d([1, 5, 5, 1], [3, 4, 1], [1, 2, 1, 1], [1, 2, 2, 1], "SAME") _test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 4, 1], [1, 1, 1, 1], "VALID") _test_dilation2d([1, 28, 28, 3], [5, 6, 3], [1, 1, 2, 1], [1, 1, 1, 1], "SAME") _test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 3, 1, 1], [1, 1, 1, 1], "SAME") _test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 2, 2, 1], "SAME") _test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 2, 1], "VALID") ####################################################################### # Sparse To Dense # --------------- def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape): with tf.Graph().as_default(): indices = tf.placeholder( shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name="indices" ) values = tf.placeholder( shape=sparse_values.shape, dtype=str(sparse_values.dtype), name="values" ) oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype)) if default_value == None: output = tf.sparse_to_dense(indices, oshape, values) compare_tf_with_tvm( [sparse_indices, sparse_values], ["indices:0", "values:0"], output.name ) else: dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value") output = tf.sparse_to_dense(indices, oshape, values, dv) compare_tf_with_tvm( [sparse_indices, sparse_values, default_value], ["indices:0", "values:0", "default_value:0"], output.name, ) def test_forward_sparse_to_dense(): # scalar _test_sparse_to_dense( sparse_indices=np.int32(1), sparse_values=np.int32(3), default_value=np.int32(0), output_shape=np.array([5]).astype("int32"), ) # vector _test_sparse_to_dense( sparse_indices=np.array([0, 1, 4]).astype("int32"), sparse_values=np.array([3, 3, 3]).astype("int32"), default_value=np.int32(0), output_shape=np.array([5]).astype("int32"), ) # vector nXd _test_sparse_to_dense( sparse_indices=np.array([[0, 0], [1, 2]]).astype("int32"), sparse_values=np.array([1, 2]).astype("int32"), default_value=np.int32(0), output_shape=np.array([3, 4]).astype("int32"), ) _test_sparse_to_dense( sparse_indices=np.array([[0, 0, 0], [1, 2, 3]]).astype("int32"), sparse_values=np.array([1, 2]).astype("int32"), default_value=np.int32(4), output_shape=np.array([2, 3, 4]).astype("int32"), ) # floats _test_sparse_to_dense( sparse_indices=np.array([0, 1, 4]).astype("int32"), sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"), default_value=np.float32(3.5), output_shape=np.array([5]).astype("int32"), ) # default value not specified _test_sparse_to_dense( sparse_indices=np.array([0, 1, 4]).astype("int32"), sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"), default_value=None, output_shape=np.array([5]).astype("int32"), ) ####################################################################### # infinity ops # ------------ def _verify_infiniteness_ops(tf_op, name): """test operator infinity ops""" # Only float types are allowed in Tensorflow for isfinite and isinf # float16 is failing on cuda tf_dtypes = ["float32", "float64"] for tf_dtype in tf_dtypes: shape = (8, 8) data = np.random.uniform(size=shape).astype(tf_dtype) data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan tf.reset_default_graph() in_data = tf.placeholder(tf_dtype, shape, name="in_data") tf_op(in_data, name=name) compare_tf_with_tvm([data], ["in_data:0"], "{}:0".format(name)) def test_forward_isinf(): _verify_infiniteness_ops(tf.is_inf, "isinf") def test_forward_isfinite(): _verify_infiniteness_ops(tf.is_finite, "isfinite") def _test_spop_placeholder_without_shape_info(): with tf.Graph().as_default(): @function.Defun(*[tf.int32] * 2) def Forward(x, y): print(x.name) print(y.name) b = tf.add(x, y) return b pl1 = tf.placeholder(tf.int32, name="pl1") pl2 = tf.placeholder(tf.int32, name="pl2") pl3 = tf.placeholder(tf.int32, name="pl3") data = np.array([[-1, 1], [2, -2]], dtype=np.int32) data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32) data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32) z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward) z2 = z1 + pl3 compare_tf_with_tvm( [data, data2, data3], ["pl1:0", "pl2:0", "pl3:0"], ["StatefulPartitionedCall:0", z2.name], mode="vm", init_global_variables=True, ) def _test_spop_placeholder_with_shape_and_default_value(): with tf.Graph().as_default(): data = np.ones([1], dtype=int).astype(np.int32) dataVar = tf.Variable(data, shape=data.shape) pl1 = array_ops.placeholder_with_default(dataVar, shape=data.shape, name="pl1") tpl = tf.convert_to_tensor(pl1, dtype=tf.int32) @function.Defun(*[tf.int32]) def pl_with_default(pl): return tf.expand_dims(tf.multiply(pl, pl), 0) z = gen_functional_ops.StatefulPartitionedCall( args=[tpl], Tout=[tf.int32], f=pl_with_default ) compare_tf_with_tvm( data, ["pl1:0"], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True ) def _test_spop_placeholder_numpy_arange_feed(): with tf.Graph().as_default(): t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1") t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2") t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) @tf.function def add(x, y): return tf.add(x, y, "add_t1_t2") t3 = add(t1, t2) compare_tf_with_tvm( [t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True ) def _test_spop_placeholder_numpy_array_feed(): with tf.Graph().as_default(): t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32) t1 = tf.placeholder(tf.int32, name="t1") t2 = tf.placeholder(tf.int32, name="t2") @tf.function def add(x, y): return tf.add(x, y, "add_t1_t2") t3 = add(t1, t2) compare_tf_with_tvm( [t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True ) def _test_spop_function_invocation_basic(): with tf.Graph().as_default(): def fun1(a): return tf.multiply(a, a) def fun2(b): return tf.multiply(b, 10) @tf.function def fun3(x, y): x = fun2(x) y = fun1(y) z = tf.add(x, y) return z t3 = fun3(tf.constant(10.5), tf.constant(20.4)) compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True) def _test_spop_function_invocation_nested(): with tf.Graph().as_default(): t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1") t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) t2 = tf.placeholder(tf.int32, name="t2") t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) @tf.function def myfunc(x, y): return tf.add(x, y, "myfunc") @tf.function def myfunc2(x, y): z = myfunc(x, y) l = myfunc(z, y) m = myfunc(l, z) return tf.add(l, m, "myfunc2") res1 = myfunc(t1, t2) res2 = myfunc2(res1, t1) compare_tf_with_tvm( [t1_data, t2_data], ["t1:0", "t2:0"], [res2.name], mode="vm", init_global_variables=True ) def _test_spop_function_invocation_no_autograph(): with tf.Graph().as_default(): @tf.function(autograph=False) def fun1(a): return tf.multiply(a, a) @tf.function(autograph=False) def fun2(b): return tf.multiply(b, 10) @tf.function def fun3(x, y): x = fun2(x) y = fun1(y) z = tf.add(x, y) return z t3 = fun3(tf.constant(10.5), tf.constant(20.4)) compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True) def _test_spop_function_invocation_defun(): with tf.Graph().as_default(): def fun1(a): return tf.multiply(a, a) def fun2(b): return tf.multiply(b, b) @function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3") def fun3(x, y): x = fun2(x) y = fun1(y) z = tf.add(x, y) return z op = gen_functional_ops.StatefulPartitionedCall( args=[tf.constant(10.5), tf.constant(20.4)], Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation", ) compare_tf_with_tvm([], [], "SpopFnInvocation:0", mode="vm", init_global_variables=True) def _test_spop_arithmetic(): with tf.Graph().as_default(): @function.Defun(*[dtypes.int32] * 3) def arithmetic(m, x, c): z = tf.add(tf.multiply(m, x), c) return z m = tf.constant(10) x = tf.constant(20) c = tf.constant(2) spopFn = gen_functional_ops.StatefulPartitionedCall( args=[m, x, c], Tout=[tf.int32], f=arithmetic ) compare_tf_with_tvm( [], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True ) def _test_spop_control_flow(): with tf.Graph().as_default(): @function.Defun(*[dtypes.float32] * 2) def Body1(x, y): with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"): z = math_ops.multiply(x, y) i = 0 while i < 10: i += 1 if i == 5: continue z = math_ops.multiply(x, y * i) return z op = gen_functional_ops.StatefulPartitionedCall( args=[constant_op.constant(32.0), constant_op.constant(100.0)], Tout=[dtypes.float32], f=Body1, ) compare_tf_with_tvm( [], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True ) def _test_spop_variables(): with tf.Graph().as_default(): const1 = tf.constant(10) const2 = tf.constant(20) var1 = tf.Variable(const1, dtype=tf.int32) var2 = tf.Variable(const2, dtype=tf.int32) @function.Defun(tf.int32, tf.int32) def Forward(x, y): return tf.multiply(x, y) z = gen_functional_ops.StatefulPartitionedCall( args=[var1, var2], Tout=[tf.int32], f=Forward ) compare_tf_with_tvm( [], [], "StatefulPartitionedCall:0", init_global_variables=True, mode="vm" ) def _test_spop_constants(): with tf.Graph().as_default(): @function.Defun(*[dtypes.int32] * 2) def constantsFn(x, y): vv = tf.constant([2, 3, 4], name="vv") z = tf.add(vv + x, y) return z a = tf.constant(20000, name="a") b = tf.constant(40000, name="b") spopFn = gen_functional_ops.StatefulPartitionedCall( args=[a, b], Tout=[tf.int32], f=constantsFn ) compare_tf_with_tvm( [], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True ) def _test_spop_stateful(): # This test case is to test that TVM rejects any TF stateful operations # (including Resource Variables) except StatefulPartitionedCall/PartitionedCall # (as these two operators can still be used as container graphs to execute # "stateless" operations internally. tf.reset_default_graph() with tf.Graph().as_default(): @tf.function def FunctionWithStatefulOp_One(i): b = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10) y = tf.multiply(b, i) return y @tf.function def FunctionWithStatefulOp(m, n): a = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10) x = tf.multiply(a, m) y = FunctionWithStatefulOp_One(n) z = tf.multiply(x, y) return z op = FunctionWithStatefulOp(constant_op.constant(1.0), constant_op.constant(2.0)) with pytest.raises(Exception) as execinfo: compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm") assert execinfo.value.args[0].startswith("The following operators are not implemented") def _test_spop_device_assignment(): # This test case is to test that TVM rejects inconsistent device assignment # while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will # be used as container graphs to internally execute "stateless" operations. tf.reset_default_graph() with tf.Graph().as_default(): def fun1(a): with ops.device("/GPU:0"): return tf.multiply(a, a) def fun2(b): with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"): return tf.multiply(b, b) @function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3") def fun3(x, y): with ops.device("/CPU:0"): x = fun2(x) with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"): y = fun1(y) with ops.device("/job:localhost/replica:0/task:0/device:CPU:3"): z = tf.add(x, y) return z op = gen_functional_ops.StatefulPartitionedCall( args=[tf.constant(10.5), tf.constant(20.4)], Tout=[dtypes.float32], f=fun3 ) with pytest.raises(Exception) as execinfo: compare_tf_with_tvm( [], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True ) assert execinfo.value.args[0].startswith("Found inconsistent Device assignment") def _test_spop_resource_variables(): # This test case is to test that TVM rejects any graph containing # resource variables with StatefulPartitionedOp. tf.reset_default_graph() with tf.Graph().as_default(): const1 = tf.constant(10) const2 = tf.constant(20) var1 = tf.Variable(const1, dtype=tf.int32, use_resource=True) var2 = tf.Variable(const2, dtype=tf.int32, use_resource=True) @tf.function def resourceVariablesTest(x, y): return tf.multiply(x, y) op = resourceVariablesTest(var1, var2) with pytest.raises(Exception) as execinfo: compare_tf_with_tvm( [], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True ) assert execinfo.value.args[0].startswith("Graph is not frozen." " Provide a frozen graph") def test_forward_spop(): _test_spop_stateful() _test_spop_device_assignment() _test_spop_resource_variables() # Placeholder test cases _test_spop_placeholder_without_shape_info() _test_spop_placeholder_with_shape_and_default_value() _test_spop_placeholder_numpy_arange_feed() _test_spop_placeholder_numpy_array_feed() # Function Invocation test cases _test_spop_function_invocation_basic() _test_spop_function_invocation_nested() _test_spop_function_invocation_no_autograph() _test_spop_function_invocation_defun() # Test cases for various other TF constructs _test_spop_arithmetic() _test_spop_control_flow() _test_spop_variables() _test_spop_constants() ####################################################################### # Dynamic input shape # ------------------- def test_forward_dynamic_input_shape(): tf.reset_default_graph() with tf.Graph().as_default(): data = tf.placeholder(tf.float32, name="data", shape=(None,)) out = data + 1 np_data = np.random.uniform(size=(2,)).astype("float32") out_name = "add" with tf.Session() as sess: graph_def = tf_testing.AddShapesToGraphDef(sess, out_name) tf_output = run_tf_graph(sess, np_data, "data:0", ["{}:0".format(out_name)]) # TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready. for device in ["llvm"]: ctx = tvm.context(device, 0) if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) continue tvm_output = run_tvm_graph( graph_def, np_data, ["data"], 1, target=device, layout="NCHW", out_names=[out_name], mode="vm", ignore_in_shape=True, ) tvm.testing.assert_allclose(tvm_output[0], tf_output[0], rtol=1e-5, atol=1e-5) def test_forward_dynmaic_rnn_lstmblockcell(): if package_version.parse(tf.VERSION) >= package_version.parse("2.0.0"): return total_series_length = 50000 truncated_backprop_length = 15 state_size = 4 echo_step = 3 batch_size = 5 num_layers = 5 def generateData(): x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5])) y = np.roll(x, echo_step) y[0:echo_step] = 0 x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows y = y.reshape((batch_size, -1)) return (x, y) batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length]) init_state = tf.placeholder(tf.float32, [num_layers, 2, batch_size, state_size]) state_per_layer_list = tf.unstack(init_state, axis=0) rnn_tuple_state = tuple( [ tf.nn.rnn_cell.LSTMStateTuple( state_per_layer_list[idx][0], state_per_layer_list[idx][1] ) for idx in range(num_layers) ] ) # Forward passes def lstm_cell(): return tensorflow.contrib.rnn.LSTMBlockCell(state_size) cell = tf.nn.rnn_cell.MultiRNNCell( [lstm_cell() for _ in range(num_layers)], state_is_tuple=True ) states_series, current_state = tf.nn.dynamic_rnn( cell, tf.expand_dims(batchX_placeholder, -1), initial_state=rnn_tuple_state ) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) x, y = generateData() _current_state = np.zeros((num_layers, 2, batch_size, state_size)) start_idx = 0 end_idx = start_idx + truncated_backprop_length batchX = x[:, start_idx:end_idx] # Save current state for TVM current_state_tvm = _current_state _current_state, _states_series = sess.run( [current_state, states_series], feed_dict={batchX_placeholder: batchX, init_state: _current_state}, ) # Organize results and corresponding names tf_output = [_states_series] for c in _current_state: tf_output.append(c.c) tf_output.append(c.h) name = [states_series.name.split(":")[0]] for t in current_state: name.append(t.c.name.split(":")[0]) name.append(t.h.name.split(":")[0]) graph_def = sess.graph.as_graph_def(add_shapes=True) final_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, name) tvm_output = run_tvm_graph( final_graph_def, [batchX.astype("float32"), current_state_tvm.astype("float32")], ["Placeholder", "Placeholder_1"], out_names=name, num_output=len(name), mode="vm", disabled_pass=["FoldScaleAxis"], ) # Compare result for i in range(len(tf_output)): tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5) if __name__ == "__main__": pytest.main([__file__])
manage.py
from multiprocessing import Process, Pipe, Queue import tcp_recv import process import time import threading def main(): queue1 = Queue() queue2 = Queue() queues = [queue1, queue2] thread = threading.Thread(target=tcp_recv.OpenRecv, args=(queues,)) p1 = Process(target=process.func, args=("proc1", queue1,)) p2 = Process(target=process.func, args=("proc2", queue2,)) thread.start() p1.start() p2.start() input("Running>") thread.join() p1.join() p2.join() if __name__ == '__main__': main()
test_weakref.py
import gc import sys import unittest import collections import weakref import operator import contextlib import copy import threading import time import random from test import support from test.support import script_helper # Used in ReferencesTestCase.test_ref_created_during_del() . ref_from_del = None # Used by FinalizeTestCase as a global that may be replaced by None # when the interpreter shuts down. _global_var = 'foobar' class C: def method(self): pass class Callable: bar = None def __call__(self, x): self.bar = x def create_function(): def f(): pass return f def create_bound_method(): return C().method class Object: def __init__(self, arg): self.arg = arg def __repr__(self): return "<Object %r>" % self.arg def __eq__(self, other): if isinstance(other, Object): return self.arg == other.arg return NotImplemented def __lt__(self, other): if isinstance(other, Object): return self.arg < other.arg return NotImplemented def __hash__(self): return hash(self.arg) def some_method(self): return 4 def other_method(self): return 5 class RefCycle: def __init__(self): self.cycle = self class TestBase(unittest.TestCase): def setUp(self): self.cbcalled = 0 def callback(self, ref): self.cbcalled += 1 @contextlib.contextmanager def collect_in_thread(period=0.0001): """ Ensure GC collections happen in a different thread, at a high frequency. """ please_stop = False def collect(): while not please_stop: time.sleep(period) gc.collect() with support.disable_gc(): t = threading.Thread(target=collect) t.start() try: yield finally: please_stop = True t.join() class ReferencesTestCase(TestBase): def test_basic_ref(self): self.check_basic_ref(C) self.check_basic_ref(create_function) self.check_basic_ref(create_bound_method) # Just make sure the tp_repr handler doesn't raise an exception. # Live reference: o = C() wr = weakref.ref(o) repr(wr) # Dead reference: del o repr(wr) def test_basic_callback(self): self.check_basic_callback(C) self.check_basic_callback(create_function) self.check_basic_callback(create_bound_method) @support.cpython_only def test_cfunction(self): import _testcapi create_cfunction = _testcapi.create_cfunction f = create_cfunction() wr = weakref.ref(f) self.assertIs(wr(), f) del f self.assertIsNone(wr()) self.check_basic_ref(create_cfunction) self.check_basic_callback(create_cfunction) def test_multiple_callbacks(self): o = C() ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del o self.assertIsNone(ref1(), "expected reference to be invalidated") self.assertIsNone(ref2(), "expected reference to be invalidated") self.assertEqual(self.cbcalled, 2, "callback not called the right number of times") def test_multiple_selfref_callbacks(self): # Make sure all references are invalidated before callbacks are called # # What's important here is that we're using the first # reference in the callback invoked on the second reference # (the most recently created ref is cleaned up first). This # tests that all references to the object are invalidated # before any of the callbacks are invoked, so that we only # have one invocation of _weakref.c:cleanup_helper() active # for a particular object at a time. # def callback(object, self=self): self.ref() c = C() self.ref = weakref.ref(c, callback) ref1 = weakref.ref(c, callback) del c def test_constructor_kwargs(self): c = C() self.assertRaises(TypeError, weakref.ref, c, callback=None) def test_proxy_ref(self): o = C() o.bar = 1 ref1 = weakref.proxy(o, self.callback) ref2 = weakref.proxy(o, self.callback) del o def check(proxy): proxy.bar self.assertRaises(ReferenceError, check, ref1) self.assertRaises(ReferenceError, check, ref2) self.assertRaises(ReferenceError, bool, weakref.proxy(C())) self.assertEqual(self.cbcalled, 2) def check_basic_ref(self, factory): o = factory() ref = weakref.ref(o) self.assertIsNotNone(ref(), "weak reference to live object should be live") o2 = ref() self.assertIs(o, o2, "<ref>() should return original object if live") def check_basic_callback(self, factory): self.cbcalled = 0 o = factory() ref = weakref.ref(o, self.callback) del o self.assertEqual(self.cbcalled, 1, "callback did not properly set 'cbcalled'") self.assertIsNone(ref(), "ref2 should be dead after deleting object reference") def test_ref_reuse(self): o = C() ref1 = weakref.ref(o) # create a proxy to make sure that there's an intervening creation # between these two; it should make no difference proxy = weakref.proxy(o) ref2 = weakref.ref(o) self.assertIs(ref1, ref2, "reference object w/out callback should be re-used") o = C() proxy = weakref.proxy(o) ref1 = weakref.ref(o) ref2 = weakref.ref(o) self.assertIs(ref1, ref2, "reference object w/out callback should be re-used") self.assertEqual(weakref.getweakrefcount(o), 2, "wrong weak ref count for object") del proxy self.assertEqual(weakref.getweakrefcount(o), 1, "wrong weak ref count for object after deleting proxy") def test_proxy_reuse(self): o = C() proxy1 = weakref.proxy(o) ref = weakref.ref(o) proxy2 = weakref.proxy(o) self.assertIs(proxy1, proxy2, "proxy object w/out callback should have been re-used") def test_basic_proxy(self): o = C() self.check_proxy(o, weakref.proxy(o)) L = collections.UserList() p = weakref.proxy(L) self.assertFalse(p, "proxy for empty UserList should be false") p.append(12) self.assertEqual(len(L), 1) self.assertTrue(p, "proxy for non-empty UserList should be true") p[:] = [2, 3] self.assertEqual(len(L), 2) self.assertEqual(len(p), 2) self.assertIn(3, p, "proxy didn't support __contains__() properly") p[1] = 5 self.assertEqual(L[1], 5) self.assertEqual(p[1], 5) L2 = collections.UserList(L) p2 = weakref.proxy(L2) self.assertEqual(p, p2) ## self.assertEqual(repr(L2), repr(p2)) L3 = collections.UserList(range(10)) p3 = weakref.proxy(L3) self.assertEqual(L3[:], p3[:]) self.assertEqual(L3[5:], p3[5:]) self.assertEqual(L3[:5], p3[:5]) self.assertEqual(L3[2:5], p3[2:5]) def test_proxy_unicode(self): # See bug 5037 class C(object): def __str__(self): return "string" def __bytes__(self): return b"bytes" instance = C() self.assertIn("__bytes__", dir(weakref.proxy(instance))) self.assertEqual(bytes(weakref.proxy(instance)), b"bytes") def test_proxy_index(self): class C: def __index__(self): return 10 o = C() p = weakref.proxy(o) self.assertEqual(operator.index(p), 10) def test_proxy_div(self): class C: def __floordiv__(self, other): return 42 def __ifloordiv__(self, other): return 21 o = C() p = weakref.proxy(o) self.assertEqual(p // 5, 42) p //= 5 self.assertEqual(p, 21) def test_proxy_matmul(self): class C: def __matmul__(self, other): return 1729 def __rmatmul__(self, other): return -163 def __imatmul__(self, other): return 561 o = C() p = weakref.proxy(o) self.assertEqual(p @ 5, 1729) self.assertEqual(5 @ p, -163) p @= 5 self.assertEqual(p, 561) # The PyWeakref_* C API is documented as allowing either NULL or # None as the value for the callback, where either means "no # callback". The "no callback" ref and proxy objects are supposed # to be shared so long as they exist by all callers so long as # they are active. In Python 2.3.3 and earlier, this guarantee # was not honored, and was broken in different ways for # PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.) def test_shared_ref_without_callback(self): self.check_shared_without_callback(weakref.ref) def test_shared_proxy_without_callback(self): self.check_shared_without_callback(weakref.proxy) def check_shared_without_callback(self, makeref): o = Object(1) p1 = makeref(o, None) p2 = makeref(o, None) self.assertIs(p1, p2, "both callbacks were None in the C API") del p1, p2 p1 = makeref(o) p2 = makeref(o, None) self.assertIs(p1, p2, "callbacks were NULL, None in the C API") del p1, p2 p1 = makeref(o) p2 = makeref(o) self.assertIs(p1, p2, "both callbacks were NULL in the C API") del p1, p2 p1 = makeref(o, None) p2 = makeref(o) self.assertIs(p1, p2, "callbacks were None, NULL in the C API") def test_callable_proxy(self): o = Callable() ref1 = weakref.proxy(o) self.check_proxy(o, ref1) self.assertIs(type(ref1), weakref.CallableProxyType, "proxy is not of callable type") ref1('twinkies!') self.assertEqual(o.bar, 'twinkies!', "call through proxy not passed through to original") ref1(x='Splat.') self.assertEqual(o.bar, 'Splat.', "call through proxy not passed through to original") # expect due to too few args self.assertRaises(TypeError, ref1) # expect due to too many args self.assertRaises(TypeError, ref1, 1, 2, 3) def check_proxy(self, o, proxy): o.foo = 1 self.assertEqual(proxy.foo, 1, "proxy does not reflect attribute addition") o.foo = 2 self.assertEqual(proxy.foo, 2, "proxy does not reflect attribute modification") del o.foo self.assertFalse(hasattr(proxy, 'foo'), "proxy does not reflect attribute removal") proxy.foo = 1 self.assertEqual(o.foo, 1, "object does not reflect attribute addition via proxy") proxy.foo = 2 self.assertEqual(o.foo, 2, "object does not reflect attribute modification via proxy") del proxy.foo self.assertFalse(hasattr(o, 'foo'), "object does not reflect attribute removal via proxy") def test_proxy_deletion(self): # Test clearing of SF bug #762891 class Foo: result = None def __delitem__(self, accessor): self.result = accessor g = Foo() f = weakref.proxy(g) del f[0] self.assertEqual(f.result, 0) def test_proxy_bool(self): # Test clearing of SF bug #1170766 class List(list): pass lyst = List() self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst)) def test_getweakrefcount(self): o = C() ref1 = weakref.ref(o) ref2 = weakref.ref(o, self.callback) self.assertEqual(weakref.getweakrefcount(o), 2, "got wrong number of weak reference objects") proxy1 = weakref.proxy(o) proxy2 = weakref.proxy(o, self.callback) self.assertEqual(weakref.getweakrefcount(o), 4, "got wrong number of weak reference objects") del ref1, ref2, proxy1, proxy2 self.assertEqual(weakref.getweakrefcount(o), 0, "weak reference objects not unlinked from" " referent when discarded.") # assumes ints do not support weakrefs self.assertEqual(weakref.getweakrefcount(1), 0, "got wrong number of weak reference objects for int") def test_getweakrefs(self): o = C() ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del ref1 self.assertEqual(weakref.getweakrefs(o), [ref2], "list of refs does not match") o = C() ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del ref2 self.assertEqual(weakref.getweakrefs(o), [ref1], "list of refs does not match") del ref1 self.assertEqual(weakref.getweakrefs(o), [], "list of refs not cleared") # assumes ints do not support weakrefs self.assertEqual(weakref.getweakrefs(1), [], "list of refs does not match for int") def test_newstyle_number_ops(self): class F(float): pass f = F(2.0) p = weakref.proxy(f) self.assertEqual(p + 1.0, 3.0) self.assertEqual(1.0 + p, 3.0) # this used to SEGV def test_callbacks_protected(self): # Callbacks protected from already-set exceptions? # Regression test for SF bug #478534. class BogusError(Exception): pass data = {} def remove(k): del data[k] def encapsulate(): f = lambda : () data[weakref.ref(f, remove)] = None raise BogusError try: encapsulate() except BogusError: pass else: self.fail("exception not properly restored") try: encapsulate() except BogusError: pass else: self.fail("exception not properly restored") def test_sf_bug_840829(self): # "weakref callbacks and gc corrupt memory" # subtype_dealloc erroneously exposed a new-style instance # already in the process of getting deallocated to gc, # causing double-deallocation if the instance had a weakref # callback that triggered gc. # If the bug exists, there probably won't be an obvious symptom # in a release build. In a debug build, a segfault will occur # when the second attempt to remove the instance from the "list # of all objects" occurs. import gc class C(object): pass c = C() wr = weakref.ref(c, lambda ignore: gc.collect()) del c # There endeth the first part. It gets worse. del wr c1 = C() c1.i = C() wr = weakref.ref(c1.i, lambda ignore: gc.collect()) c2 = C() c2.c1 = c1 del c1 # still alive because c2 points to it # Now when subtype_dealloc gets called on c2, it's not enough just # that c2 is immune from gc while the weakref callbacks associated # with c2 execute (there are none in this 2nd half of the test, btw). # subtype_dealloc goes on to call the base classes' deallocs too, # so any gc triggered by weakref callbacks associated with anything # torn down by a base class dealloc can also trigger double # deallocation of c2. del c2 def test_callback_in_cycle_1(self): import gc class J(object): pass class II(object): def acallback(self, ignore): self.J I = II() I.J = J I.wr = weakref.ref(J, I.acallback) # Now J and II are each in a self-cycle (as all new-style class # objects are, since their __mro__ points back to them). I holds # both a weak reference (I.wr) and a strong reference (I.J) to class # J. I is also in a cycle (I.wr points to a weakref that references # I.acallback). When we del these three, they all become trash, but # the cycles prevent any of them from getting cleaned up immediately. # Instead they have to wait for cyclic gc to deduce that they're # trash. # # gc used to call tp_clear on all of them, and the order in which # it does that is pretty accidental. The exact order in which we # built up these things manages to provoke gc into running tp_clear # in just the right order (I last). Calling tp_clear on II leaves # behind an insane class object (its __mro__ becomes NULL). Calling # tp_clear on J breaks its self-cycle, but J doesn't get deleted # just then because of the strong reference from I.J. Calling # tp_clear on I starts to clear I's __dict__, and just happens to # clear I.J first -- I.wr is still intact. That removes the last # reference to J, which triggers the weakref callback. The callback # tries to do "self.J", and instances of new-style classes look up # attributes ("J") in the class dict first. The class (II) wants to # search II.__mro__, but that's NULL. The result was a segfault in # a release build, and an assert failure in a debug build. del I, J, II gc.collect() def test_callback_in_cycle_2(self): import gc # This is just like test_callback_in_cycle_1, except that II is an # old-style class. The symptom is different then: an instance of an # old-style class looks in its own __dict__ first. 'J' happens to # get cleared from I.__dict__ before 'wr', and 'J' was never in II's # __dict__, so the attribute isn't found. The difference is that # the old-style II doesn't have a NULL __mro__ (it doesn't have any # __mro__), so no segfault occurs. Instead it got: # test_callback_in_cycle_2 (__main__.ReferencesTestCase) ... # Exception exceptions.AttributeError: # "II instance has no attribute 'J'" in <bound method II.acallback # of <?.II instance at 0x00B9B4B8>> ignored class J(object): pass class II: def acallback(self, ignore): self.J I = II() I.J = J I.wr = weakref.ref(J, I.acallback) del I, J, II gc.collect() def test_callback_in_cycle_3(self): import gc # This one broke the first patch that fixed the last two. In this # case, the objects reachable from the callback aren't also reachable # from the object (c1) *triggering* the callback: you can get to # c1 from c2, but not vice-versa. The result was that c2's __dict__ # got tp_clear'ed by the time the c2.cb callback got invoked. class C: def cb(self, ignore): self.me self.c1 self.wr c1, c2 = C(), C() c2.me = c2 c2.c1 = c1 c2.wr = weakref.ref(c1, c2.cb) del c1, c2 gc.collect() def test_callback_in_cycle_4(self): import gc # Like test_callback_in_cycle_3, except c2 and c1 have different # classes. c2's class (C) isn't reachable from c1 then, so protecting # objects reachable from the dying object (c1) isn't enough to stop # c2's class (C) from getting tp_clear'ed before c2.cb is invoked. # The result was a segfault (C.__mro__ was NULL when the callback # tried to look up self.me). class C(object): def cb(self, ignore): self.me self.c1 self.wr class D: pass c1, c2 = D(), C() c2.me = c2 c2.c1 = c1 c2.wr = weakref.ref(c1, c2.cb) del c1, c2, C, D gc.collect() @support.requires_type_collecting def test_callback_in_cycle_resurrection(self): import gc # Do something nasty in a weakref callback: resurrect objects # from dead cycles. For this to be attempted, the weakref and # its callback must also be part of the cyclic trash (else the # objects reachable via the callback couldn't be in cyclic trash # to begin with -- the callback would act like an external root). # But gc clears trash weakrefs with callbacks early now, which # disables the callbacks, so the callbacks shouldn't get called # at all (and so nothing actually gets resurrected). alist = [] class C(object): def __init__(self, value): self.attribute = value def acallback(self, ignore): alist.append(self.c) c1, c2 = C(1), C(2) c1.c = c2 c2.c = c1 c1.wr = weakref.ref(c2, c1.acallback) c2.wr = weakref.ref(c1, c2.acallback) def C_went_away(ignore): alist.append("C went away") wr = weakref.ref(C, C_went_away) del c1, c2, C # make them all trash self.assertEqual(alist, []) # del isn't enough to reclaim anything gc.collect() # c1.wr and c2.wr were part of the cyclic trash, so should have # been cleared without their callbacks executing. OTOH, the weakref # to C is bound to a function local (wr), and wasn't trash, so that # callback should have been invoked when C went away. self.assertEqual(alist, ["C went away"]) # The remaining weakref should be dead now (its callback ran). self.assertEqual(wr(), None) del alist[:] gc.collect() self.assertEqual(alist, []) def test_callbacks_on_callback(self): import gc # Set up weakref callbacks *on* weakref callbacks. alist = [] def safe_callback(ignore): alist.append("safe_callback called") class C(object): def cb(self, ignore): alist.append("cb called") c, d = C(), C() c.other = d d.other = c callback = c.cb c.wr = weakref.ref(d, callback) # this won't trigger d.wr = weakref.ref(callback, d.cb) # ditto external_wr = weakref.ref(callback, safe_callback) # but this will self.assertIs(external_wr(), callback) # The weakrefs attached to c and d should get cleared, so that # C.cb is never called. But external_wr isn't part of the cyclic # trash, and no cyclic trash is reachable from it, so safe_callback # should get invoked when the bound method object callback (c.cb) # -- which is itself a callback, and also part of the cyclic trash -- # gets reclaimed at the end of gc. del callback, c, d, C self.assertEqual(alist, []) # del isn't enough to clean up cycles gc.collect() self.assertEqual(alist, ["safe_callback called"]) self.assertEqual(external_wr(), None) del alist[:] gc.collect() self.assertEqual(alist, []) def test_gc_during_ref_creation(self): self.check_gc_during_creation(weakref.ref) def test_gc_during_proxy_creation(self): self.check_gc_during_creation(weakref.proxy) def check_gc_during_creation(self, makeref): thresholds = gc.get_threshold() gc.set_threshold(1, 1, 1) gc.collect() class A: pass def callback(*args): pass referenced = A() a = A() a.a = a a.wr = makeref(referenced) try: # now make sure the object and the ref get labeled as # cyclic trash: a = A() weakref.ref(referenced, callback) finally: gc.set_threshold(*thresholds) def test_ref_created_during_del(self): # Bug #1377858 # A weakref created in an object's __del__() would crash the # interpreter when the weakref was cleaned up since it would refer to # non-existent memory. This test should not segfault the interpreter. class Target(object): def __del__(self): global ref_from_del ref_from_del = weakref.ref(self) w = Target() def test_init(self): # Issue 3634 # <weakref to class>.__init__() doesn't check errors correctly r = weakref.ref(Exception) self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0) # No exception should be raised here gc.collect() def test_classes(self): # Check that classes are weakrefable. class A(object): pass l = [] weakref.ref(int) a = weakref.ref(A, l.append) A = None gc.collect() self.assertEqual(a(), None) self.assertEqual(l, [a]) def test_equality(self): # Alive weakrefs defer equality testing to their underlying object. x = Object(1) y = Object(1) z = Object(2) a = weakref.ref(x) b = weakref.ref(y) c = weakref.ref(z) d = weakref.ref(x) # Note how we directly test the operators here, to stress both # __eq__ and __ne__. self.assertTrue(a == b) self.assertFalse(a != b) self.assertFalse(a == c) self.assertTrue(a != c) self.assertTrue(a == d) self.assertFalse(a != d) del x, y, z gc.collect() for r in a, b, c: # Sanity check self.assertIs(r(), None) # Dead weakrefs compare by identity: whether `a` and `d` are the # same weakref object is an implementation detail, since they pointed # to the same original object and didn't have a callback. # (see issue #16453). self.assertFalse(a == b) self.assertTrue(a != b) self.assertFalse(a == c) self.assertTrue(a != c) self.assertEqual(a == d, a is d) self.assertEqual(a != d, a is not d) def test_ordering(self): # weakrefs cannot be ordered, even if the underlying objects can. ops = [operator.lt, operator.gt, operator.le, operator.ge] x = Object(1) y = Object(1) a = weakref.ref(x) b = weakref.ref(y) for op in ops: self.assertRaises(TypeError, op, a, b) # Same when dead. del x, y gc.collect() for op in ops: self.assertRaises(TypeError, op, a, b) def test_hashing(self): # Alive weakrefs hash the same as the underlying object x = Object(42) y = Object(42) a = weakref.ref(x) b = weakref.ref(y) self.assertEqual(hash(a), hash(42)) del x, y gc.collect() # Dead weakrefs: # - retain their hash is they were hashed when alive; # - otherwise, cannot be hashed. self.assertEqual(hash(a), hash(42)) self.assertRaises(TypeError, hash, b) def test_trashcan_16602(self): # Issue #16602: when a weakref's target was part of a long # deallocation chain, the trashcan mechanism could delay clearing # of the weakref and make the target object visible from outside # code even though its refcount had dropped to 0. A crash ensued. class C: def __init__(self, parent): if not parent: return wself = weakref.ref(self) def cb(wparent): o = wself() self.wparent = weakref.ref(parent, cb) d = weakref.WeakKeyDictionary() root = c = C(None) for n in range(100): d[c] = c = C(c) del root gc.collect() def test_callback_attribute(self): x = Object(1) callback = lambda ref: None ref1 = weakref.ref(x, callback) self.assertIs(ref1.__callback__, callback) ref2 = weakref.ref(x) self.assertIsNone(ref2.__callback__) def test_callback_attribute_after_deletion(self): x = Object(1) ref = weakref.ref(x, self.callback) self.assertIsNotNone(ref.__callback__) del x support.gc_collect() self.assertIsNone(ref.__callback__) def test_set_callback_attribute(self): x = Object(1) callback = lambda ref: None ref1 = weakref.ref(x, callback) with self.assertRaises(AttributeError): ref1.__callback__ = lambda ref: None def test_callback_gcs(self): class ObjectWithDel(Object): def __del__(self): pass x = ObjectWithDel(1) ref1 = weakref.ref(x, lambda ref: support.gc_collect()) del x support.gc_collect() class SubclassableWeakrefTestCase(TestBase): def test_subclass_refs(self): class MyRef(weakref.ref): def __init__(self, ob, callback=None, value=42): self.value = value super().__init__(ob, callback) def __call__(self): self.called = True return super().__call__() o = Object("foo") mr = MyRef(o, value=24) self.assertIs(mr(), o) self.assertTrue(mr.called) self.assertEqual(mr.value, 24) del o self.assertIsNone(mr()) self.assertTrue(mr.called) def test_subclass_refs_dont_replace_standard_refs(self): class MyRef(weakref.ref): pass o = Object(42) r1 = MyRef(o) r2 = weakref.ref(o) self.assertIsNot(r1, r2) self.assertEqual(weakref.getweakrefs(o), [r2, r1]) self.assertEqual(weakref.getweakrefcount(o), 2) r3 = MyRef(o) self.assertEqual(weakref.getweakrefcount(o), 3) refs = weakref.getweakrefs(o) self.assertEqual(len(refs), 3) self.assertIs(r2, refs[0]) self.assertIn(r1, refs[1:]) self.assertIn(r3, refs[1:]) def test_subclass_refs_dont_conflate_callbacks(self): class MyRef(weakref.ref): pass o = Object(42) r1 = MyRef(o, id) r2 = MyRef(o, str) self.assertIsNot(r1, r2) refs = weakref.getweakrefs(o) self.assertIn(r1, refs) self.assertIn(r2, refs) def test_subclass_refs_with_slots(self): class MyRef(weakref.ref): __slots__ = "slot1", "slot2" def __new__(type, ob, callback, slot1, slot2): return weakref.ref.__new__(type, ob, callback) def __init__(self, ob, callback, slot1, slot2): self.slot1 = slot1 self.slot2 = slot2 def meth(self): return self.slot1 + self.slot2 o = Object(42) r = MyRef(o, None, "abc", "def") self.assertEqual(r.slot1, "abc") self.assertEqual(r.slot2, "def") self.assertEqual(r.meth(), "abcdef") self.assertFalse(hasattr(r, "__dict__")) def test_subclass_refs_with_cycle(self): """Confirm https://bugs.python.org/issue3100 is fixed.""" # An instance of a weakref subclass can have attributes. # If such a weakref holds the only strong reference to the object, # deleting the weakref will delete the object. In this case, # the callback must not be called, because the ref object is # being deleted. class MyRef(weakref.ref): pass # Use a local callback, for "regrtest -R::" # to detect refcounting problems def callback(w): self.cbcalled += 1 o = C() r1 = MyRef(o, callback) r1.o = o del o del r1 # Used to crash here self.assertEqual(self.cbcalled, 0) # Same test, with two weakrefs to the same object # (since code paths are different) o = C() r1 = MyRef(o, callback) r2 = MyRef(o, callback) r1.r = r2 r2.o = o del o del r2 del r1 # Used to crash here self.assertEqual(self.cbcalled, 0) class WeakMethodTestCase(unittest.TestCase): def _subclass(self): """Return an Object subclass overriding `some_method`.""" class C(Object): def some_method(self): return 6 return C def test_alive(self): o = Object(1) r = weakref.WeakMethod(o.some_method) self.assertIsInstance(r, weakref.ReferenceType) self.assertIsInstance(r(), type(o.some_method)) self.assertIs(r().__self__, o) self.assertIs(r().__func__, o.some_method.__func__) self.assertEqual(r()(), 4) def test_object_dead(self): o = Object(1) r = weakref.WeakMethod(o.some_method) del o gc.collect() self.assertIs(r(), None) def test_method_dead(self): C = self._subclass() o = C(1) r = weakref.WeakMethod(o.some_method) del C.some_method gc.collect() self.assertIs(r(), None) def test_callback_when_object_dead(self): # Test callback behaviour when object dies first. C = self._subclass() calls = [] def cb(arg): calls.append(arg) o = C(1) r = weakref.WeakMethod(o.some_method, cb) del o gc.collect() self.assertEqual(calls, [r]) # Callback is only called once. C.some_method = Object.some_method gc.collect() self.assertEqual(calls, [r]) def test_callback_when_method_dead(self): # Test callback behaviour when method dies first. C = self._subclass() calls = [] def cb(arg): calls.append(arg) o = C(1) r = weakref.WeakMethod(o.some_method, cb) del C.some_method gc.collect() self.assertEqual(calls, [r]) # Callback is only called once. del o gc.collect() self.assertEqual(calls, [r]) @support.cpython_only def test_no_cycles(self): # A WeakMethod doesn't create any reference cycle to itself. o = Object(1) def cb(_): pass r = weakref.WeakMethod(o.some_method, cb) wr = weakref.ref(r) del r self.assertIs(wr(), None) def test_equality(self): def _eq(a, b): self.assertTrue(a == b) self.assertFalse(a != b) def _ne(a, b): self.assertTrue(a != b) self.assertFalse(a == b) x = Object(1) y = Object(1) a = weakref.WeakMethod(x.some_method) b = weakref.WeakMethod(y.some_method) c = weakref.WeakMethod(x.other_method) d = weakref.WeakMethod(y.other_method) # Objects equal, same method _eq(a, b) _eq(c, d) # Objects equal, different method _ne(a, c) _ne(a, d) _ne(b, c) _ne(b, d) # Objects unequal, same or different method z = Object(2) e = weakref.WeakMethod(z.some_method) f = weakref.WeakMethod(z.other_method) _ne(a, e) _ne(a, f) _ne(b, e) _ne(b, f) del x, y, z gc.collect() # Dead WeakMethods compare by identity refs = a, b, c, d, e, f for q in refs: for r in refs: self.assertEqual(q == r, q is r) self.assertEqual(q != r, q is not r) def test_hashing(self): # Alive WeakMethods are hashable if the underlying object is # hashable. x = Object(1) y = Object(1) a = weakref.WeakMethod(x.some_method) b = weakref.WeakMethod(y.some_method) c = weakref.WeakMethod(y.other_method) # Since WeakMethod objects are equal, the hashes should be equal. self.assertEqual(hash(a), hash(b)) ha = hash(a) # Dead WeakMethods retain their old hash value del x, y gc.collect() self.assertEqual(hash(a), ha) self.assertEqual(hash(b), ha) # If it wasn't hashed when alive, a dead WeakMethod cannot be hashed. self.assertRaises(TypeError, hash, c) class MappingTestCase(TestBase): COUNT = 10 def check_len_cycles(self, dict_type, cons): N = 20 items = [RefCycle() for i in range(N)] dct = dict_type(cons(o) for o in items) # Keep an iterator alive it = dct.items() try: next(it) except StopIteration: pass del items gc.collect() n1 = len(dct) del it gc.collect() n2 = len(dct) # one item may be kept alive inside the iterator self.assertIn(n1, (0, 1)) self.assertEqual(n2, 0) def test_weak_keyed_len_cycles(self): self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1)) def test_weak_valued_len_cycles(self): self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k)) def check_len_race(self, dict_type, cons): # Extended sanity checks for len() in the face of cyclic collection self.addCleanup(gc.set_threshold, *gc.get_threshold()) for th in range(1, 100): N = 20 gc.collect(0) gc.set_threshold(th, th, th) items = [RefCycle() for i in range(N)] dct = dict_type(cons(o) for o in items) del items # All items will be collected at next garbage collection pass it = dct.items() try: next(it) except StopIteration: pass n1 = len(dct) del it n2 = len(dct) self.assertGreaterEqual(n1, 0) self.assertLessEqual(n1, N) self.assertGreaterEqual(n2, 0) self.assertLessEqual(n2, n1) def test_weak_keyed_len_race(self): self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1)) def test_weak_valued_len_race(self): self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k)) def test_weak_values(self): # # This exercises d.copy(), d.items(), d[], del d[], len(d). # dict, objects = self.make_weak_valued_dict() for o in objects: self.assertEqual(weakref.getweakrefcount(o), 1) self.assertIs(o, dict[o.arg], "wrong object returned by weak dict!") items1 = list(dict.items()) items2 = list(dict.copy().items()) items1.sort() items2.sort() self.assertEqual(items1, items2, "cloning of weak-valued dictionary did not work!") del items1, items2 self.assertEqual(len(dict), self.COUNT) del objects[0] self.assertEqual(len(dict), self.COUNT - 1, "deleting object did not cause dictionary update") del objects, o self.assertEqual(len(dict), 0, "deleting the values did not clear the dictionary") # regression on SF bug #447152: dict = weakref.WeakValueDictionary() self.assertRaises(KeyError, dict.__getitem__, 1) dict[2] = C() self.assertRaises(KeyError, dict.__getitem__, 2) def test_weak_keys(self): # # This exercises d.copy(), d.items(), d[] = v, d[], del d[], # len(d), k in d. # dict, objects = self.make_weak_keyed_dict() for o in objects: self.assertEqual(weakref.getweakrefcount(o), 1, "wrong number of weak references to %r!" % o) self.assertIs(o.arg, dict[o], "wrong object returned by weak dict!") items1 = dict.items() items2 = dict.copy().items() self.assertEqual(set(items1), set(items2), "cloning of weak-keyed dictionary did not work!") del items1, items2 self.assertEqual(len(dict), self.COUNT) del objects[0] self.assertEqual(len(dict), (self.COUNT - 1), "deleting object did not cause dictionary update") del objects, o self.assertEqual(len(dict), 0, "deleting the keys did not clear the dictionary") o = Object(42) dict[o] = "What is the meaning of the universe?" self.assertIn(o, dict) self.assertNotIn(34, dict) def test_weak_keyed_iters(self): dict, objects = self.make_weak_keyed_dict() self.check_iters(dict) # Test keyrefs() refs = dict.keyrefs() self.assertEqual(len(refs), len(objects)) objects2 = list(objects) for wr in refs: ob = wr() self.assertIn(ob, dict) self.assertIn(ob, dict) self.assertEqual(ob.arg, dict[ob]) objects2.remove(ob) self.assertEqual(len(objects2), 0) # Test iterkeyrefs() objects2 = list(objects) self.assertEqual(len(list(dict.keyrefs())), len(objects)) for wr in dict.keyrefs(): ob = wr() self.assertIn(ob, dict) self.assertIn(ob, dict) self.assertEqual(ob.arg, dict[ob]) objects2.remove(ob) self.assertEqual(len(objects2), 0) def test_weak_valued_iters(self): dict, objects = self.make_weak_valued_dict() self.check_iters(dict) # Test valuerefs() refs = dict.valuerefs() self.assertEqual(len(refs), len(objects)) objects2 = list(objects) for wr in refs: ob = wr() self.assertEqual(ob, dict[ob.arg]) self.assertEqual(ob.arg, dict[ob.arg].arg) objects2.remove(ob) self.assertEqual(len(objects2), 0) # Test itervaluerefs() objects2 = list(objects) self.assertEqual(len(list(dict.itervaluerefs())), len(objects)) for wr in dict.itervaluerefs(): ob = wr() self.assertEqual(ob, dict[ob.arg]) self.assertEqual(ob.arg, dict[ob.arg].arg) objects2.remove(ob) self.assertEqual(len(objects2), 0) def check_iters(self, dict): # item iterator: items = list(dict.items()) for item in dict.items(): items.remove(item) self.assertFalse(items, "items() did not touch all items") # key iterator, via __iter__(): keys = list(dict.keys()) for k in dict: keys.remove(k) self.assertFalse(keys, "__iter__() did not touch all keys") # key iterator, via iterkeys(): keys = list(dict.keys()) for k in dict.keys(): keys.remove(k) self.assertFalse(keys, "iterkeys() did not touch all keys") # value iterator: values = list(dict.values()) for v in dict.values(): values.remove(v) self.assertFalse(values, "itervalues() did not touch all values") def check_weak_destroy_while_iterating(self, dict, objects, iter_name): n = len(dict) it = iter(getattr(dict, iter_name)()) next(it) # Trigger internal iteration # Destroy an object del objects[-1] gc.collect() # just in case # We have removed either the first consumed object, or another one self.assertIn(len(list(it)), [len(objects), len(objects) - 1]) del it # The removal has been committed self.assertEqual(len(dict), n - 1) def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext): # Check that we can explicitly mutate the weak dict without # interfering with delayed removal. # `testcontext` should create an iterator, destroy one of the # weakref'ed objects and then return a new key/value pair corresponding # to the destroyed object. with testcontext() as (k, v): self.assertNotIn(k, dict) with testcontext() as (k, v): self.assertRaises(KeyError, dict.__delitem__, k) self.assertNotIn(k, dict) with testcontext() as (k, v): self.assertRaises(KeyError, dict.pop, k) self.assertNotIn(k, dict) with testcontext() as (k, v): dict[k] = v self.assertEqual(dict[k], v) ddict = copy.copy(dict) with testcontext() as (k, v): dict.update(ddict) self.assertEqual(dict, ddict) with testcontext() as (k, v): dict.clear() self.assertEqual(len(dict), 0) def check_weak_del_and_len_while_iterating(self, dict, testcontext): # Check that len() works when both iterating and removing keys # explicitly through various means (.pop(), .clear()...), while # implicit mutation is deferred because an iterator is alive. # (each call to testcontext() should schedule one item for removal # for this test to work properly) o = Object(123456) with testcontext(): n = len(dict) # Since underlaying dict is ordered, first item is popped dict.pop(next(dict.keys())) self.assertEqual(len(dict), n - 1) dict[o] = o self.assertEqual(len(dict), n) # last item in objects is removed from dict in context shutdown with testcontext(): self.assertEqual(len(dict), n - 1) # Then, (o, o) is popped dict.popitem() self.assertEqual(len(dict), n - 2) with testcontext(): self.assertEqual(len(dict), n - 3) del dict[next(dict.keys())] self.assertEqual(len(dict), n - 4) with testcontext(): self.assertEqual(len(dict), n - 5) dict.popitem() self.assertEqual(len(dict), n - 6) with testcontext(): dict.clear() self.assertEqual(len(dict), 0) self.assertEqual(len(dict), 0) def test_weak_keys_destroy_while_iterating(self): # Issue #7105: iterators shouldn't crash when a key is implicitly removed dict, objects = self.make_weak_keyed_dict() self.check_weak_destroy_while_iterating(dict, objects, 'keys') self.check_weak_destroy_while_iterating(dict, objects, 'items') self.check_weak_destroy_while_iterating(dict, objects, 'values') self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs') dict, objects = self.make_weak_keyed_dict() @contextlib.contextmanager def testcontext(): try: it = iter(dict.items()) next(it) # Schedule a key/value for removal and recreate it v = objects.pop().arg gc.collect() # just in case yield Object(v), v finally: it = None # should commit all removals gc.collect() self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext) # Issue #21173: len() fragile when keys are both implicitly and # explicitly removed. dict, objects = self.make_weak_keyed_dict() self.check_weak_del_and_len_while_iterating(dict, testcontext) def test_weak_values_destroy_while_iterating(self): # Issue #7105: iterators shouldn't crash when a key is implicitly removed dict, objects = self.make_weak_valued_dict() self.check_weak_destroy_while_iterating(dict, objects, 'keys') self.check_weak_destroy_while_iterating(dict, objects, 'items') self.check_weak_destroy_while_iterating(dict, objects, 'values') self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs') self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs') dict, objects = self.make_weak_valued_dict() @contextlib.contextmanager def testcontext(): try: it = iter(dict.items()) next(it) # Schedule a key/value for removal and recreate it k = objects.pop().arg gc.collect() # just in case yield k, Object(k) finally: it = None # should commit all removals gc.collect() self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext) dict, objects = self.make_weak_valued_dict() self.check_weak_del_and_len_while_iterating(dict, testcontext) def test_make_weak_keyed_dict_from_dict(self): o = Object(3) dict = weakref.WeakKeyDictionary({o:364}) self.assertEqual(dict[o], 364) def test_make_weak_keyed_dict_from_weak_keyed_dict(self): o = Object(3) dict = weakref.WeakKeyDictionary({o:364}) dict2 = weakref.WeakKeyDictionary(dict) self.assertEqual(dict[o], 364) def make_weak_keyed_dict(self): dict = weakref.WeakKeyDictionary() objects = list(map(Object, range(self.COUNT))) for o in objects: dict[o] = o.arg return dict, objects def test_make_weak_valued_dict_from_dict(self): o = Object(3) dict = weakref.WeakValueDictionary({364:o}) self.assertEqual(dict[364], o) def test_make_weak_valued_dict_from_weak_valued_dict(self): o = Object(3) dict = weakref.WeakValueDictionary({364:o}) dict2 = weakref.WeakValueDictionary(dict) self.assertEqual(dict[364], o) def test_make_weak_valued_dict_misc(self): # errors self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__) self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {}) self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ()) # special keyword arguments o = Object(3) for kw in 'self', 'dict', 'other', 'iterable': d = weakref.WeakValueDictionary(**{kw: o}) self.assertEqual(list(d.keys()), [kw]) self.assertEqual(d[kw], o) def make_weak_valued_dict(self): dict = weakref.WeakValueDictionary() objects = list(map(Object, range(self.COUNT))) for o in objects: dict[o.arg] = o return dict, objects def check_popitem(self, klass, key1, value1, key2, value2): weakdict = klass() weakdict[key1] = value1 weakdict[key2] = value2 self.assertEqual(len(weakdict), 2) k, v = weakdict.popitem() self.assertEqual(len(weakdict), 1) if k is key1: self.assertIs(v, value1) else: self.assertIs(v, value2) k, v = weakdict.popitem() self.assertEqual(len(weakdict), 0) if k is key1: self.assertIs(v, value1) else: self.assertIs(v, value2) def test_weak_valued_dict_popitem(self): self.check_popitem(weakref.WeakValueDictionary, "key1", C(), "key2", C()) def test_weak_keyed_dict_popitem(self): self.check_popitem(weakref.WeakKeyDictionary, C(), "value 1", C(), "value 2") def check_setdefault(self, klass, key, value1, value2): self.assertIsNot(value1, value2, "invalid test" " -- value parameters must be distinct objects") weakdict = klass() o = weakdict.setdefault(key, value1) self.assertIs(o, value1) self.assertIn(key, weakdict) self.assertIs(weakdict.get(key), value1) self.assertIs(weakdict[key], value1) o = weakdict.setdefault(key, value2) self.assertIs(o, value1) self.assertIn(key, weakdict) self.assertIs(weakdict.get(key), value1) self.assertIs(weakdict[key], value1) def test_weak_valued_dict_setdefault(self): self.check_setdefault(weakref.WeakValueDictionary, "key", C(), C()) def test_weak_keyed_dict_setdefault(self): self.check_setdefault(weakref.WeakKeyDictionary, C(), "value 1", "value 2") def check_update(self, klass, dict): # # This exercises d.update(), len(d), d.keys(), k in d, # d.get(), d[]. # weakdict = klass() weakdict.update(dict) self.assertEqual(len(weakdict), len(dict)) for k in weakdict.keys(): self.assertIn(k, dict, "mysterious new key appeared in weak dict") v = dict.get(k) self.assertIs(v, weakdict[k]) self.assertIs(v, weakdict.get(k)) for k in dict.keys(): self.assertIn(k, weakdict, "original key disappeared in weak dict") v = dict[k] self.assertIs(v, weakdict[k]) self.assertIs(v, weakdict.get(k)) def test_weak_valued_dict_update(self): self.check_update(weakref.WeakValueDictionary, {1: C(), 'a': C(), C(): C()}) # errors self.assertRaises(TypeError, weakref.WeakValueDictionary.update) d = weakref.WeakValueDictionary() self.assertRaises(TypeError, d.update, {}, {}) self.assertRaises(TypeError, d.update, (), ()) self.assertEqual(list(d.keys()), []) # special keyword arguments o = Object(3) for kw in 'self', 'dict', 'other', 'iterable': d = weakref.WeakValueDictionary() d.update(**{kw: o}) self.assertEqual(list(d.keys()), [kw]) self.assertEqual(d[kw], o) def test_weak_keyed_dict_update(self): self.check_update(weakref.WeakKeyDictionary, {C(): 1, C(): 2, C(): 3}) def test_weak_keyed_delitem(self): d = weakref.WeakKeyDictionary() o1 = Object('1') o2 = Object('2') d[o1] = 'something' d[o2] = 'something' self.assertEqual(len(d), 2) del d[o1] self.assertEqual(len(d), 1) self.assertEqual(list(d.keys()), [o2]) def test_weak_valued_delitem(self): d = weakref.WeakValueDictionary() o1 = Object('1') o2 = Object('2') d['something'] = o1 d['something else'] = o2 self.assertEqual(len(d), 2) del d['something'] self.assertEqual(len(d), 1) self.assertEqual(list(d.items()), [('something else', o2)]) def test_weak_keyed_bad_delitem(self): d = weakref.WeakKeyDictionary() o = Object('1') # An attempt to delete an object that isn't there should raise # KeyError. It didn't before 2.3. self.assertRaises(KeyError, d.__delitem__, o) self.assertRaises(KeyError, d.__getitem__, o) # If a key isn't of a weakly referencable type, __getitem__ and # __setitem__ raise TypeError. __delitem__ should too. self.assertRaises(TypeError, d.__delitem__, 13) self.assertRaises(TypeError, d.__getitem__, 13) self.assertRaises(TypeError, d.__setitem__, 13, 13) def test_weak_keyed_cascading_deletes(self): # SF bug 742860. For some reason, before 2.3 __delitem__ iterated # over the keys via self.data.iterkeys(). If things vanished from # the dict during this (or got added), that caused a RuntimeError. d = weakref.WeakKeyDictionary() mutate = False class C(object): def __init__(self, i): self.value = i def __hash__(self): return hash(self.value) def __eq__(self, other): if mutate: # Side effect that mutates the dict, by removing the # last strong reference to a key. del objs[-1] return self.value == other.value objs = [C(i) for i in range(4)] for o in objs: d[o] = o.value del o # now the only strong references to keys are in objs # Find the order in which iterkeys sees the keys. objs = list(d.keys()) # Reverse it, so that the iteration implementation of __delitem__ # has to keep looping to find the first object we delete. objs.reverse() # Turn on mutation in C.__eq__. The first time through the loop, # under the iterkeys() business the first comparison will delete # the last item iterkeys() would see, and that causes a # RuntimeError: dictionary changed size during iteration # when the iterkeys() loop goes around to try comparing the next # key. After this was fixed, it just deletes the last object *our* # "for o in obj" loop would have gotten to. mutate = True count = 0 for o in objs: count += 1 del d[o] self.assertEqual(len(d), 0) self.assertEqual(count, 2) def test_make_weak_valued_dict_repr(self): dict = weakref.WeakValueDictionary() self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>') def test_make_weak_keyed_dict_repr(self): dict = weakref.WeakKeyDictionary() self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>') def test_threaded_weak_valued_setdefault(self): d = weakref.WeakValueDictionary() with collect_in_thread(): for i in range(100000): x = d.setdefault(10, RefCycle()) self.assertIsNot(x, None) # we never put None in there! del x def test_threaded_weak_valued_pop(self): d = weakref.WeakValueDictionary() with collect_in_thread(): for i in range(100000): d[10] = RefCycle() x = d.pop(10, 10) self.assertIsNot(x, None) # we never put None in there! def test_threaded_weak_valued_consistency(self): # Issue #28427: old keys should not remove new values from # WeakValueDictionary when collecting from another thread. d = weakref.WeakValueDictionary() with collect_in_thread(): for i in range(200000): o = RefCycle() d[10] = o # o is still alive, so the dict can't be empty self.assertEqual(len(d), 1) o = None # lose ref def check_threaded_weak_dict_copy(self, type_, deepcopy): # `type_` should be either WeakKeyDictionary or WeakValueDictionary. # `deepcopy` should be either True or False. exc = [] class DummyKey: def __init__(self, ctr): self.ctr = ctr class DummyValue: def __init__(self, ctr): self.ctr = ctr def dict_copy(d, exc): try: if deepcopy is True: _ = copy.deepcopy(d) else: _ = d.copy() except Exception as ex: exc.append(ex) def pop_and_collect(lst): gc_ctr = 0 while lst: i = random.randint(0, len(lst) - 1) gc_ctr += 1 lst.pop(i) if gc_ctr % 10000 == 0: gc.collect() # just in case self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary)) d = type_() keys = [] values = [] # Initialize d with many entries for i in range(70000): k, v = DummyKey(i), DummyValue(i) keys.append(k) values.append(v) d[k] = v del k del v t_copy = threading.Thread(target=dict_copy, args=(d, exc,)) if type_ is weakref.WeakKeyDictionary: t_collect = threading.Thread(target=pop_and_collect, args=(keys,)) else: # weakref.WeakValueDictionary t_collect = threading.Thread(target=pop_and_collect, args=(values,)) t_copy.start() t_collect.start() t_copy.join() t_collect.join() # Test exceptions if exc: raise exc[0] def test_threaded_weak_key_dict_copy(self): # Issue #35615: Weakref keys or values getting GC'ed during dict # copying should not result in a crash. self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False) def test_threaded_weak_key_dict_deepcopy(self): # Issue #35615: Weakref keys or values getting GC'ed during dict # copying should not result in a crash. self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True) def test_threaded_weak_value_dict_copy(self): # Issue #35615: Weakref keys or values getting GC'ed during dict # copying should not result in a crash. self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False) def test_threaded_weak_value_dict_deepcopy(self): # Issue #35615: Weakref keys or values getting GC'ed during dict # copying should not result in a crash. self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True) @support.cpython_only def test_remove_closure(self): d = weakref.WeakValueDictionary() self.assertIsNone(d._remove.__closure__) from test import mapping_tests class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol): """Check that WeakValueDictionary conforms to the mapping protocol""" __ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)} type2test = weakref.WeakValueDictionary def _reference(self): return self.__ref.copy() class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol): """Check that WeakKeyDictionary conforms to the mapping protocol""" __ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3} type2test = weakref.WeakKeyDictionary def _reference(self): return self.__ref.copy() class FinalizeTestCase(unittest.TestCase): class A: pass def _collect_if_necessary(self): # we create no ref-cycles so in CPython no gc should be needed if sys.implementation.name != 'cpython': support.gc_collect() def test_finalize(self): def add(x,y,z): res.append(x + y + z) return x + y + z a = self.A() res = [] f = weakref.finalize(a, add, 67, 43, z=89) self.assertEqual(f.alive, True) self.assertEqual(f.peek(), (a, add, (67,43), {'z':89})) self.assertEqual(f(), 199) self.assertEqual(f(), None) self.assertEqual(f(), None) self.assertEqual(f.peek(), None) self.assertEqual(f.detach(), None) self.assertEqual(f.alive, False) self.assertEqual(res, [199]) res = [] f = weakref.finalize(a, add, 67, 43, 89) self.assertEqual(f.peek(), (a, add, (67,43,89), {})) self.assertEqual(f.detach(), (a, add, (67,43,89), {})) self.assertEqual(f(), None) self.assertEqual(f(), None) self.assertEqual(f.peek(), None) self.assertEqual(f.detach(), None) self.assertEqual(f.alive, False) self.assertEqual(res, []) res = [] f = weakref.finalize(a, add, x=67, y=43, z=89) del a self._collect_if_necessary() self.assertEqual(f(), None) self.assertEqual(f(), None) self.assertEqual(f.peek(), None) self.assertEqual(f.detach(), None) self.assertEqual(f.alive, False) self.assertEqual(res, [199]) def test_arg_errors(self): def fin(*args, **kwargs): res.append((args, kwargs)) a = self.A() res = [] f = weakref.finalize(a, fin, 1, 2, func=3, obj=4) self.assertEqual(f.peek(), (a, fin, (1, 2), {'func': 3, 'obj': 4})) f() self.assertEqual(res, [((1, 2), {'func': 3, 'obj': 4})]) res = [] with self.assertWarns(DeprecationWarning): f = weakref.finalize(a, func=fin, arg=1) self.assertEqual(f.peek(), (a, fin, (), {'arg': 1})) f() self.assertEqual(res, [((), {'arg': 1})]) res = [] with self.assertWarns(DeprecationWarning): f = weakref.finalize(obj=a, func=fin, arg=1) self.assertEqual(f.peek(), (a, fin, (), {'arg': 1})) f() self.assertEqual(res, [((), {'arg': 1})]) self.assertRaises(TypeError, weakref.finalize, a) self.assertRaises(TypeError, weakref.finalize) def test_order(self): a = self.A() res = [] f1 = weakref.finalize(a, res.append, 'f1') f2 = weakref.finalize(a, res.append, 'f2') f3 = weakref.finalize(a, res.append, 'f3') f4 = weakref.finalize(a, res.append, 'f4') f5 = weakref.finalize(a, res.append, 'f5') # make sure finalizers can keep themselves alive del f1, f4 self.assertTrue(f2.alive) self.assertTrue(f3.alive) self.assertTrue(f5.alive) self.assertTrue(f5.detach()) self.assertFalse(f5.alive) f5() # nothing because previously unregistered res.append('A') f3() # => res.append('f3') self.assertFalse(f3.alive) res.append('B') f3() # nothing because previously called res.append('C') del a self._collect_if_necessary() # => res.append('f4') # => res.append('f2') # => res.append('f1') self.assertFalse(f2.alive) res.append('D') f2() # nothing because previously called by gc expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D'] self.assertEqual(res, expected) def test_all_freed(self): # we want a weakrefable subclass of weakref.finalize class MyFinalizer(weakref.finalize): pass a = self.A() res = [] def callback(): res.append(123) f = MyFinalizer(a, callback) wr_callback = weakref.ref(callback) wr_f = weakref.ref(f) del callback, f self.assertIsNotNone(wr_callback()) self.assertIsNotNone(wr_f()) del a self._collect_if_necessary() self.assertIsNone(wr_callback()) self.assertIsNone(wr_f()) self.assertEqual(res, [123]) @classmethod def run_in_child(cls): def error(): # Create an atexit finalizer from inside a finalizer called # at exit. This should be the next to be run. g1 = weakref.finalize(cls, print, 'g1') print('f3 error') 1/0 # cls should stay alive till atexit callbacks run f1 = weakref.finalize(cls, print, 'f1', _global_var) f2 = weakref.finalize(cls, print, 'f2', _global_var) f3 = weakref.finalize(cls, error) f4 = weakref.finalize(cls, print, 'f4', _global_var) assert f1.atexit == True f2.atexit = False assert f3.atexit == True assert f4.atexit == True def test_atexit(self): prog = ('from test.test_weakref import FinalizeTestCase;'+ 'FinalizeTestCase.run_in_child()') rc, out, err = script_helper.assert_python_ok('-c', prog) out = out.decode('ascii').splitlines() self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar']) self.assertTrue(b'ZeroDivisionError' in err) libreftest = """ Doctest for examples in the library reference: weakref.rst >>> import weakref >>> class Dict(dict): ... pass ... >>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable >>> r = weakref.ref(obj) >>> print(r() is obj) True >>> import weakref >>> class Object: ... pass ... >>> o = Object() >>> r = weakref.ref(o) >>> o2 = r() >>> o is o2 True >>> del o, o2 >>> print(r()) None >>> import weakref >>> class ExtendedRef(weakref.ref): ... def __init__(self, ob, callback=None, **annotations): ... super().__init__(ob, callback) ... self.__counter = 0 ... for k, v in annotations.items(): ... setattr(self, k, v) ... def __call__(self): ... '''Return a pair containing the referent and the number of ... times the reference has been called. ... ''' ... ob = super().__call__() ... if ob is not None: ... self.__counter += 1 ... ob = (ob, self.__counter) ... return ob ... >>> class A: # not in docs from here, just testing the ExtendedRef ... pass ... >>> a = A() >>> r = ExtendedRef(a, foo=1, bar="baz") >>> r.foo 1 >>> r.bar 'baz' >>> r()[1] 1 >>> r()[1] 2 >>> r()[0] is a True >>> import weakref >>> _id2obj_dict = weakref.WeakValueDictionary() >>> def remember(obj): ... oid = id(obj) ... _id2obj_dict[oid] = obj ... return oid ... >>> def id2obj(oid): ... return _id2obj_dict[oid] ... >>> a = A() # from here, just testing >>> a_id = remember(a) >>> id2obj(a_id) is a True >>> del a >>> try: ... id2obj(a_id) ... except KeyError: ... print('OK') ... else: ... print('WeakValueDictionary error') OK """ __test__ = {'libreftest' : libreftest} def test_main(): support.run_unittest( ReferencesTestCase, WeakMethodTestCase, MappingTestCase, WeakValueDictionaryTestCase, WeakKeyDictionaryTestCase, SubclassableWeakrefTestCase, FinalizeTestCase, ) support.run_doctest(sys.modules[__name__]) if __name__ == "__main__": test_main()
train.py
"""Initial experiments with the lenet network to check the trends of the time with k, batch and parallelism""" import argparse import time from multiprocessing import Process from typing import Tuple from common.experiment import * from common.metrics import start_api from common.utils import * output_folder = './tests/' EPOCHS = 30 def run_lenet(k: int, batch: int, parallelism: int): req = TrainRequest( model_type='lenet', batch_size=batch, epochs=EPOCHS, dataset='mnist', lr=0.01, function_name='lenet', options=TrainOptions( default_parallelism=parallelism, static_parallelism=True, k=k, validate_every=1, goal_accuracy=100 ) ) exp = KubemlExperiment(get_title(req), req) exp.run() # exp._fake_history() exp.save(output_folder) def run_resnet(k: int, batch: int, parallelism: int): req = TrainRequest( model_type='resnet34', batch_size=batch, epochs=EPOCHS, dataset='cifar10', lr=0.1, function_name='resnet', options=TrainOptions( default_parallelism=parallelism, static_parallelism=True, k=k, validate_every=1, goal_accuracy=100 ) ) exp = KubemlExperiment(get_title(req), req) exp.run() # print(exp.to_dataframe()) exp.save(output_folder) def run_api(path=None) -> Process: """Starts the API for setting the metrics""" print('Starting api') if path is not None: p = Process(target=start_api, args=(path,)) else: p = Process(target=start_api) p.start() print('Process started...') return p def full_parameter_grid(network: str) -> List[Tuple[int, int, int]]: """Generator for the full experiments""" if network == 'lenet': grid = lenet_grid else: grid = resnet_grid exps = [] for b in grid['batch']: for k in grid['k']: for p in grid['parallelism']: exps.append((b, k, p)) return exps def resume_parameter_grid(network: str, folder: str, replications: int = 1): # find the missing experiments from the folder missing = check_missing_experiments(network, folder, replications) return missing def check_folder(path: str) -> bool: return os.path.isdir(path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--network', help='Network type for the experiments from [lenet, resnet]') parser.add_argument('--resume', dest='resume', action='store_true', help='''Whether to check for missing experiments and just run those (best in case of errors preventing the execution of part of the experiments)''') parser.add_argument('--folder', help='''if resume is true, path to the folder where all the finished experiments reside''') parser.add_argument('--dry', dest='dry', action='store_true', help='If true, just print the experiments') parser.add_argument('-o', help='Folder to save the experiment results to') parser.add_argument('-m', help='folder to save the metrics to') parser.add_argument('-r', help='Number of replications to run', default=1, type=int) parser.set_defaults(resume=False, dry=False) args = parser.parse_args() net = args.network if not net: print("Network not set") exit(-1) elif net not in ('lenet', 'resnet'): print('Network', net, 'not among accepted (lenet, resnet)') exit(-1) if args.o: if not check_folder(args.o): print('Given folder does not exist', args.o) raise ValueError print("Using", args.o, 'as output folder') output_folder = args.o if args.resume: if not args.folder: print("Error: Folder not specified with resume") exit(-1) exps = resume_parameter_grid(net, args.folder, args.r) output_folder = args.folder print("Using", args.folder, 'as output folder') else: exps = full_parameter_grid(net) # if dry, simply print the experiments and return if args.dry: for e in exps: print(e) exit(0) api: Process = None try: if args.m: if not check_folder(args.m): print('Given folder does not exist', args.o) raise ValueError api = run_api(path=args.m) else: # Start the API to collect the metrics api = run_api() time.sleep(5) # based on the arg determine the function func = run_resnet if net == 'resnet' else run_lenet print('Using func', func) replications = args.r # if resume the experiments already come with the # replications implicit if args.resume: for batch, k, parallelism in exps: print(batch, k, parallelism) func(k, batch, parallelism) time.sleep(25) else: for i in range(1, replications + 1): print('Starting with replication', i) for batch, k, parallelism in exps: print(batch, k, parallelism) func(k, batch, parallelism) time.sleep(25) print('Replication', i, 'finished') finally: print("all experiments finished") print(api.pid) api.terminate() api.join()
store.py
import json import logging import os import threading import time import uuid as uuid_builder from copy import deepcopy from os import mkdir, path, unlink from threading import Lock from changedetectionio.notification import ( default_notification_body, default_notification_format, default_notification_title, ) # Is there an existing library to ensure some data store (JSON etc) is in sync with CRUD methods? # Open a github issue if you know something :) # https://stackoverflow.com/questions/6190468/how-to-trigger-function-on-value-change class ChangeDetectionStore: lock = Lock() def __init__(self, datastore_path="/datastore", include_default_watches=True, version_tag="0.0.0"): # Should only be active for docker # logging.basicConfig(filename='/dev/stdout', level=logging.INFO) self.needs_write = False self.datastore_path = datastore_path self.json_store_path = "{}/url-watches.json".format(self.datastore_path) self.stop_thread = False self.__data = { 'note': "Hello! If you change this file manually, please be sure to restart your changedetection.io instance!", 'watching': {}, 'settings': { 'headers': { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Encoding': 'gzip, deflate', # No support for brolti in python requests yet. 'Accept-Language': 'en-GB,en-US;q=0.9,en;' }, 'requests': { 'timeout': 15, # Default 15 seconds 'minutes_between_check': 3 * 60, # Default 3 hours 'workers': 10 # Number of threads, lower is better for slow connections }, 'application': { 'password': False, 'base_url' : None, 'extract_title_as_title': False, 'fetch_backend': 'html_requests', 'global_ignore_text': [], # List of text to ignore when calculating the comparison checksum 'global_subtractive_selectors': [], 'ignore_whitespace': False, 'notification_urls': [], # Apprise URL list # Custom notification content 'notification_title': default_notification_title, 'notification_body': default_notification_body, 'notification_format': default_notification_format, 'real_browser_save_screenshot': True, } } } # Base definition for all watchers self.generic_definition = { 'url': None, 'tag': None, 'last_checked': 0, 'last_changed': 0, 'paused': False, 'last_viewed': 0, # history key value of the last viewed via the [diff] link 'newest_history_key': "", 'title': None, # Re #110, so then if this is set to None, we know to use the default value instead # Requires setting to None on submit if it's the same as the default 'minutes_between_check': None, 'previous_md5': "", 'uuid': str(uuid_builder.uuid4()), 'headers': {}, # Extra headers to send 'body': None, 'method': 'GET', 'history': {}, # Dict of timestamp and output stripped filename 'ignore_text': [], # List of text to ignore when calculating the comparison checksum # Custom notification content 'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise) 'notification_title': default_notification_title, 'notification_body': default_notification_body, 'notification_format': default_notification_format, 'css_filter': "", 'subtractive_selectors': [], 'trigger_text': [], # List of text or regex to wait for until a change is detected 'fetch_backend': None, 'extract_title_as_title': False } if path.isfile('changedetectionio/source.txt'): with open('changedetectionio/source.txt') as f: # Should be set in Dockerfile to look for /source.txt , this will give us the git commit # # So when someone gives us a backup file to examine, we know exactly what code they were running. self.__data['build_sha'] = f.read() try: # @todo retest with ", encoding='utf-8'" with open(self.json_store_path) as json_file: from_disk = json.load(json_file) # @todo isnt there a way todo this dict.update recursively? # Problem here is if the one on the disk is missing a sub-struct, it wont be present anymore. if 'watching' in from_disk: self.__data['watching'].update(from_disk['watching']) if 'app_guid' in from_disk: self.__data['app_guid'] = from_disk['app_guid'] if 'settings' in from_disk: if 'headers' in from_disk['settings']: self.__data['settings']['headers'].update(from_disk['settings']['headers']) if 'requests' in from_disk['settings']: self.__data['settings']['requests'].update(from_disk['settings']['requests']) if 'application' in from_disk['settings']: self.__data['settings']['application'].update(from_disk['settings']['application']) # Reinitialise each `watching` with our generic_definition in the case that we add a new var in the future. # @todo pretty sure theres a python we todo this with an abstracted(?) object! for uuid, watch in self.__data['watching'].items(): _blank = deepcopy(self.generic_definition) _blank.update(watch) self.__data['watching'].update({uuid: _blank}) self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid) print("Watching:", uuid, self.__data['watching'][uuid]['url']) # First time ran, doesnt exist. except (FileNotFoundError, json.decoder.JSONDecodeError): if include_default_watches: print("Creating JSON store at", self.datastore_path) self.add_watch(url='http://www.quotationspage.com/random.php', tag='test') self.add_watch(url='https://news.ycombinator.com/', tag='Tech news') self.add_watch(url='https://www.gov.uk/coronavirus', tag='Covid') self.add_watch(url='https://changedetection.io/CHANGELOG.txt') self.__data['version_tag'] = version_tag # Helper to remove password protection password_reset_lockfile = "{}/removepassword.lock".format(self.datastore_path) if path.isfile(password_reset_lockfile): self.__data['settings']['application']['password'] = False unlink(password_reset_lockfile) if not 'app_guid' in self.__data: import os import sys if "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ: self.__data['app_guid'] = "test-" + str(uuid_builder.uuid4()) else: self.__data['app_guid'] = str(uuid_builder.uuid4()) # Generate the URL access token for RSS feeds if not 'rss_access_token' in self.__data['settings']['application']: import secrets secret = secrets.token_hex(16) self.__data['settings']['application']['rss_access_token'] = secret self.needs_write = True # Finally start the thread that will manage periodic data saves to JSON save_data_thread = threading.Thread(target=self.save_datastore).start() # Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0. def get_newest_history_key(self, uuid): if len(self.__data['watching'][uuid]['history']) == 1: return 0 dates = list(self.__data['watching'][uuid]['history'].keys()) # Convert to int, sort and back to str again # @todo replace datastore getter that does this automatically dates = [int(i) for i in dates] dates.sort(reverse=True) if len(dates): # always keyed as str return str(dates[0]) return 0 def set_last_viewed(self, uuid, timestamp): self.data['watching'][uuid].update({'last_viewed': int(timestamp)}) self.needs_write = True def update_watch(self, uuid, update_obj): with self.lock: # In python 3.9 we have the |= dict operator, but that still will lose data on nested structures... for dict_key, d in self.generic_definition.items(): if isinstance(d, dict): if update_obj is not None and dict_key in update_obj: self.__data['watching'][uuid][dict_key].update(update_obj[dict_key]) del (update_obj[dict_key]) self.__data['watching'][uuid].update(update_obj) self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid) self.needs_write = True @property def data(self): has_unviewed = False for uuid, v in self.__data['watching'].items(): self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid) if int(v['newest_history_key']) <= int(v['last_viewed']): self.__data['watching'][uuid]['viewed'] = True else: self.__data['watching'][uuid]['viewed'] = False has_unviewed = True # #106 - Be sure this is None on empty string, False, None, etc # Default var for fetch_backend if not self.__data['watching'][uuid]['fetch_backend']: self.__data['watching'][uuid]['fetch_backend'] = self.__data['settings']['application']['fetch_backend'] # Re #152, Return env base_url if not overriden, @todo also prefer the proxy pass url env_base_url = os.getenv('BASE_URL','') if not self.__data['settings']['application']['base_url']: self.__data['settings']['application']['base_url'] = env_base_url.strip('" ') self.__data['has_unviewed'] = has_unviewed return self.__data def get_all_tags(self): tags = [] for uuid, watch in self.data['watching'].items(): # Support for comma separated list of tags. for tag in watch['tag'].split(','): tag = tag.strip() if tag not in tags: tags.append(tag) tags.sort() return tags def unlink_history_file(self, path): try: unlink(path) except (FileNotFoundError, IOError): pass # Delete a single watch by UUID def delete(self, uuid): with self.lock: if uuid == 'all': self.__data['watching'] = {} # GitHub #30 also delete history records for uuid in self.data['watching']: for path in self.data['watching'][uuid]['history'].values(): self.unlink_history_file(path) else: for path in self.data['watching'][uuid]['history'].values(): self.unlink_history_file(path) del self.data['watching'][uuid] self.needs_write = True # Clone a watch by UUID def clone(self, uuid): url = self.data['watching'][uuid]['url'] tag = self.data['watching'][uuid]['tag'] extras = self.data['watching'][uuid] new_uuid = self.add_watch(url=url, tag=tag, extras=extras) return new_uuid def url_exists(self, url): # Probably their should be dict... for watch in self.data['watching'].values(): if watch['url'] == url: return True return False def get_val(self, uuid, val): # Probably their should be dict... return self.data['watching'][uuid].get(val) # Remove a watchs data but keep the entry (URL etc) def scrub_watch(self, uuid, limit_timestamp = False): import hashlib del_timestamps = [] changes_removed = 0 for timestamp, path in self.data['watching'][uuid]['history'].items(): if not limit_timestamp or (limit_timestamp is not False and int(timestamp) > limit_timestamp): self.unlink_history_file(path) del_timestamps.append(timestamp) changes_removed += 1 if not limit_timestamp: self.data['watching'][uuid]['last_checked'] = 0 self.data['watching'][uuid]['last_changed'] = 0 self.data['watching'][uuid]['previous_md5'] = "" for timestamp in del_timestamps: del self.data['watching'][uuid]['history'][str(timestamp)] # If there was a limitstamp, we need to reset some meta data about the entry # This has to happen after we remove the others from the list if limit_timestamp: newest_key = self.get_newest_history_key(uuid) if newest_key: self.data['watching'][uuid]['last_checked'] = int(newest_key) # @todo should be the original value if it was less than newest key self.data['watching'][uuid]['last_changed'] = int(newest_key) try: with open(self.data['watching'][uuid]['history'][str(newest_key)], "rb") as fp: content = fp.read() self.data['watching'][uuid]['previous_md5'] = hashlib.md5(content).hexdigest() except (FileNotFoundError, IOError): self.data['watching'][uuid]['previous_md5'] = "" pass self.needs_write = True return changes_removed def add_watch(self, url, tag="", extras=None): if extras is None: extras = {} with self.lock: # @todo use a common generic version of this new_uuid = str(uuid_builder.uuid4()) _blank = deepcopy(self.generic_definition) _blank.update({ 'url': url, 'tag': tag }) # Incase these are copied across, assume it's a reference and deepcopy() apply_extras = deepcopy(extras) for k in ['uuid', 'history', 'last_checked', 'last_changed', 'newest_history_key', 'previous_md5', 'viewed']: if k in apply_extras: del apply_extras[k] _blank.update(apply_extras) self.data['watching'][new_uuid] = _blank # Get the directory ready output_path = "{}/{}".format(self.datastore_path, new_uuid) try: mkdir(output_path) except FileExistsError: print(output_path, "already exists.") self.sync_to_json() return new_uuid # Save some text file to the appropriate path and bump the history # result_obj from fetch_site_status.run() def save_history_text(self, watch_uuid, contents): import uuid output_path = "{}/{}".format(self.datastore_path, watch_uuid) # Incase the operator deleted it, check and create. if not os.path.isdir(output_path): mkdir(output_path) fname = "{}/{}.stripped.txt".format(output_path, uuid.uuid4()) with open(fname, 'wb') as f: f.write(contents) f.close() return fname def get_screenshot(self, watch_uuid): output_path = "{}/{}".format(self.datastore_path, watch_uuid) fname = "{}/last-screenshot.png".format(output_path) if path.isfile(fname): return fname return False # Save as PNG, PNG is larger but better for doing visual diff in the future def save_screenshot(self, watch_uuid, screenshot: bytes): output_path = "{}/{}".format(self.datastore_path, watch_uuid) fname = "{}/last-screenshot.png".format(output_path) with open(fname, 'wb') as f: f.write(screenshot) f.close() def sync_to_json(self): logging.info("Saving JSON..") try: data = deepcopy(self.__data) except RuntimeError as e: # Try again in 15 seconds time.sleep(15) logging.error ("! Data changed when writing to JSON, trying again.. %s", str(e)) self.sync_to_json() return else: try: # Re #286 - First write to a temp file, then confirm it looks OK and rename it # This is a fairly basic strategy to deal with the case that the file is corrupted, # system was out of memory, out of RAM etc with open(self.json_store_path+".tmp", 'w') as json_file: json.dump(data, json_file, indent=4) os.replace(self.json_store_path+".tmp", self.json_store_path) except Exception as e: logging.error("Error writing JSON!! (Main JSON file save was skipped) : %s", str(e)) self.needs_write = False # Thread runner, this helps with thread/write issues when there are many operations that want to update the JSON # by just running periodically in one thread, according to python, dict updates are threadsafe. def save_datastore(self): while True: if self.stop_thread: print("Shutting down datastore thread") return if self.needs_write: self.sync_to_json() # Once per minute is enough, more and it can cause high CPU usage # better here is to use something like self.app.config.exit.wait(1), but we cant get to 'app' from here for i in range(30): time.sleep(2) if self.stop_thread: break # Go through the datastore path and remove any snapshots that are not mentioned in the index # This usually is not used, but can be handy. def remove_unused_snapshots(self): print ("Removing snapshots from datastore that are not in the index..") index=[] for uuid in self.data['watching']: for id in self.data['watching'][uuid]['history']: index.append(self.data['watching'][uuid]['history'][str(id)]) import pathlib # Only in the sub-directories for item in pathlib.Path(self.datastore_path).rglob("*/*txt"): if not str(item) in index: print ("Removing",item) unlink(item)
test_decimal.py
# Copyright (c) 2004 Python Software Foundation. # All rights reserved. # Written by Eric Price <eprice at tjhsst.edu> # and Facundo Batista <facundo at taniquetil.com.ar> # and Raymond Hettinger <python at rcn.com> # and Aahz (aahz at pobox.com) # and Tim Peters """ These are the test cases for the Decimal module. There are two groups of tests, Arithmetic and Behaviour. The former test the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter test the pythonic behaviour according to PEP 327. Cowlishaw's tests can be downloaded from: www2.hursley.ibm.com/decimal/dectest.zip This test module can be called from command line with one parameter (Arithmetic or Behaviour) to test each part, or without parameter to test both parts. If you're working through IDLE, you can import this test module and call test_main() with the corresponding argument. """ from __future__ import division import unittest import glob import os, sys import pickle, copy from decimal import * from test.test_support import TestSkipped, run_unittest, run_doctest, is_resource_enabled import threading import random # Useful Test Constant Signals = getcontext().flags.keys() # Tests are built around these assumed context defaults DefaultContext.prec=9 DefaultContext.rounding=ROUND_HALF_EVEN DefaultContext.traps=dict.fromkeys(Signals, 0) setcontext(DefaultContext) TESTDATADIR = 'decimaltestdata' if __name__ == '__main__': file = sys.argv[0] else: file = __file__ testdir = os.path.dirname(file) or os.curdir dir = testdir + os.sep + TESTDATADIR + os.sep skip_expected = not os.path.isdir(dir) # Make sure it actually raises errors when not expected and caught in flags # Slower, since it runs some things several times. EXTENDEDERRORTEST = False #Map the test cases' error names to the actual errors ErrorNames = {'clamped' : Clamped, 'conversion_syntax' : InvalidOperation, 'division_by_zero' : DivisionByZero, 'division_impossible' : InvalidOperation, 'division_undefined' : InvalidOperation, 'inexact' : Inexact, 'invalid_context' : InvalidOperation, 'invalid_operation' : InvalidOperation, 'overflow' : Overflow, 'rounded' : Rounded, 'subnormal' : Subnormal, 'underflow' : Underflow} def Nonfunction(*args): """Doesn't do anything.""" return None RoundingDict = {'ceiling' : ROUND_CEILING, #Maps test-case names to roundings. 'down' : ROUND_DOWN, 'floor' : ROUND_FLOOR, 'half_down' : ROUND_HALF_DOWN, 'half_even' : ROUND_HALF_EVEN, 'half_up' : ROUND_HALF_UP, 'up' : ROUND_UP} # Name adapter to be able to change the Decimal and Context # interface without changing the test files from Cowlishaw nameAdapter = {'toeng':'to_eng_string', 'tosci':'to_sci_string', 'samequantum':'same_quantum', 'tointegral':'to_integral', 'remaindernear':'remainder_near', 'divideint':'divide_int', 'squareroot':'sqrt', 'apply':'_apply', } class DecimalTest(unittest.TestCase): """Class which tests the Decimal class against the test cases. Changed for unittest. """ def setUp(self): global dir self.context = Context() for key in DefaultContext.traps.keys(): DefaultContext.traps[key] = 1 self.ignore_list = ['#'] # Basically, a # means return NaN InvalidOperation. # Different from a sNaN in trim self.ChangeDict = {'precision' : self.change_precision, 'rounding' : self.change_rounding_method, 'maxexponent' : self.change_max_exponent, 'minexponent' : self.change_min_exponent, 'clamp' : self.change_clamp} def tearDown(self): """Cleaning up enviroment.""" # leaving context in original state for key in DefaultContext.traps.keys(): DefaultContext.traps[key] = 0 return def eval_file(self, file): global skip_expected if skip_expected: raise TestSkipped return for line in open(file).xreadlines(): line = line.replace('\r\n', '').replace('\n', '') #print line try: t = self.eval_line(line) except InvalidOperation: print 'Error in test cases:' print line continue except DecimalException, exception: #Exception raised where there shoudn't have been one. self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line) return def eval_line(self, s): if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'): s = (s.split('->')[0] + '->' + s.split('->')[1].split('--')[0]).strip() else: s = s.split('--')[0].strip() for ignore in self.ignore_list: if s.find(ignore) >= 0: #print s.split()[0], 'NotImplemented--', ignore return if not s: return elif ':' in s: return self.eval_directive(s) else: return self.eval_equation(s) def eval_directive(self, s): funct, value = map(lambda x: x.strip().lower(), s.split(':')) if funct == 'rounding': value = RoundingDict[value] else: try: value = int(value) except ValueError: pass funct = self.ChangeDict.get(funct, Nonfunction) funct(value) def eval_equation(self, s): #global DEFAULT_PRECISION #print DEFAULT_PRECISION if not TEST_ALL and random.random() < 0.90: return try: Sides = s.split('->') L = Sides[0].strip().split() id = L[0] # print id, funct = L[1].lower() valstemp = L[2:] L = Sides[1].strip().split() ans = L[0] exceptions = L[1:] except (TypeError, AttributeError, IndexError): raise InvalidOperation def FixQuotes(val): val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote') val = val.replace("'", '').replace('"', '') val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"') return val fname = nameAdapter.get(funct, funct) if fname == 'rescale': return funct = getattr(self.context, fname) vals = [] conglomerate = '' quote = 0 theirexceptions = [ErrorNames[x.lower()] for x in exceptions] for exception in Signals: self.context.traps[exception] = 1 #Catch these bugs... for exception in theirexceptions: self.context.traps[exception] = 0 for i, val in enumerate(valstemp): if val.count("'") % 2 == 1: quote = 1 - quote if quote: conglomerate = conglomerate + ' ' + val continue else: val = conglomerate + val conglomerate = '' v = FixQuotes(val) if fname in ('to_sci_string', 'to_eng_string'): if EXTENDEDERRORTEST: for error in theirexceptions: self.context.traps[error] = 1 try: funct(self.context.create_decimal(v)) except error: pass except Signals, e: self.fail("Raised %s in %s when %s disabled" % \ (e, s, error)) else: self.fail("Did not raise %s in %s" % (error, s)) self.context.traps[error] = 0 v = self.context.create_decimal(v) else: v = Decimal(v) vals.append(v) ans = FixQuotes(ans) if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'): for error in theirexceptions: self.context.traps[error] = 1 try: funct(*vals) except error: pass except Signals, e: self.fail("Raised %s in %s when %s disabled" % \ (e, s, error)) else: self.fail("Did not raise %s in %s" % (error, s)) self.context.traps[error] = 0 try: result = str(funct(*vals)) if fname == 'same_quantum': result = str(int(eval(result))) # 'True', 'False' -> '1', '0' except Signals, error: self.fail("Raised %s in %s" % (error, s)) except: #Catch any error long enough to state the test case. print "ERROR:", s raise myexceptions = self.getexceptions() self.context.clear_flags() myexceptions.sort() theirexceptions.sort() self.assertEqual(result, ans, 'Incorrect answer for ' + s + ' -- got ' + result) self.assertEqual(myexceptions, theirexceptions, 'Incorrect flags set in ' + s + ' -- got ' \ + str(myexceptions)) return def getexceptions(self): return [e for e in Signals if self.context.flags[e]] def change_precision(self, prec): self.context.prec = prec def change_rounding_method(self, rounding): self.context.rounding = rounding def change_min_exponent(self, exp): self.context.Emin = exp def change_max_exponent(self, exp): self.context.Emax = exp def change_clamp(self, clamp): self.context._clamp = clamp # Dynamically build custom test definition for each file in the test # directory and add the definitions to the DecimalTest class. This # procedure insures that new files do not get skipped. for filename in os.listdir(dir): if '.decTest' not in filename: continue head, tail = filename.split('.') tester = lambda self, f=filename: self.eval_file(dir + f) setattr(DecimalTest, 'test_' + head, tester) del filename, head, tail, tester # The following classes test the behaviour of Decimal according to PEP 327 class DecimalExplicitConstructionTest(unittest.TestCase): '''Unit tests for Explicit Construction cases of Decimal.''' def test_explicit_empty(self): self.assertEqual(Decimal(), Decimal("0")) def test_explicit_from_None(self): self.assertRaises(TypeError, Decimal, None) def test_explicit_from_int(self): #positive d = Decimal(45) self.assertEqual(str(d), '45') #very large positive d = Decimal(500000123) self.assertEqual(str(d), '500000123') #negative d = Decimal(-45) self.assertEqual(str(d), '-45') #zero d = Decimal(0) self.assertEqual(str(d), '0') def test_explicit_from_string(self): #empty self.assertEqual(str(Decimal('')), 'NaN') #int self.assertEqual(str(Decimal('45')), '45') #float self.assertEqual(str(Decimal('45.34')), '45.34') #engineer notation self.assertEqual(str(Decimal('45e2')), '4.5E+3') #just not a number self.assertEqual(str(Decimal('ugly')), 'NaN') def test_explicit_from_tuples(self): #zero d = Decimal( (0, (0,), 0) ) self.assertEqual(str(d), '0') #int d = Decimal( (1, (4, 5), 0) ) self.assertEqual(str(d), '-45') #float d = Decimal( (0, (4, 5, 3, 4), -2) ) self.assertEqual(str(d), '45.34') #weird d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(str(d), '-4.34913534E-17') #wrong number of items self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) ) #bad sign self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) ) #bad exp self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') ) #bad coefficients self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) ) self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) ) def test_explicit_from_Decimal(self): #positive d = Decimal(45) e = Decimal(d) self.assertEqual(str(e), '45') self.assertNotEqual(id(d), id(e)) #very large positive d = Decimal(500000123) e = Decimal(d) self.assertEqual(str(e), '500000123') self.assertNotEqual(id(d), id(e)) #negative d = Decimal(-45) e = Decimal(d) self.assertEqual(str(e), '-45') self.assertNotEqual(id(d), id(e)) #zero d = Decimal(0) e = Decimal(d) self.assertEqual(str(e), '0') self.assertNotEqual(id(d), id(e)) def test_explicit_context_create_decimal(self): nc = copy.copy(getcontext()) nc.prec = 3 # empty d = Decimal() self.assertEqual(str(d), '0') d = nc.create_decimal() self.assertEqual(str(d), '0') # from None self.assertRaises(TypeError, nc.create_decimal, None) # from int d = nc.create_decimal(456) self.failUnless(isinstance(d, Decimal)) self.assertEqual(nc.create_decimal(45678), nc.create_decimal('457E+2')) # from string d = Decimal('456789') self.assertEqual(str(d), '456789') d = nc.create_decimal('456789') self.assertEqual(str(d), '4.57E+5') # from tuples d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(str(d), '-4.34913534E-17') d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(str(d), '-4.35E-17') # from Decimal prevdec = Decimal(500000123) d = Decimal(prevdec) self.assertEqual(str(d), '500000123') d = nc.create_decimal(prevdec) self.assertEqual(str(d), '5.00E+8') class DecimalImplicitConstructionTest(unittest.TestCase): '''Unit tests for Implicit Construction cases of Decimal.''' def test_implicit_from_None(self): self.assertRaises(TypeError, eval, 'Decimal(5) + None', globals()) def test_implicit_from_int(self): #normal self.assertEqual(str(Decimal(5) + 45), '50') #exceeding precision self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000)) def test_implicit_from_string(self): self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', globals()) def test_implicit_from_float(self): self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', globals()) def test_implicit_from_Decimal(self): self.assertEqual(Decimal(5) + Decimal(45), Decimal(50)) class DecimalArithmeticOperatorsTest(unittest.TestCase): '''Unit tests for all arithmetic operators, binary and unary.''' def test_addition(self): d1 = Decimal('-11.1') d2 = Decimal('22.2') #two Decimals self.assertEqual(d1+d2, Decimal('11.1')) self.assertEqual(d2+d1, Decimal('11.1')) #with other type, left c = d1 + 5 self.assertEqual(c, Decimal('-6.1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 5 + d1 self.assertEqual(c, Decimal('-6.1')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 += d2 self.assertEqual(d1, Decimal('11.1')) #inline with other type d1 += 5 self.assertEqual(d1, Decimal('16.1')) def test_subtraction(self): d1 = Decimal('-11.1') d2 = Decimal('22.2') #two Decimals self.assertEqual(d1-d2, Decimal('-33.3')) self.assertEqual(d2-d1, Decimal('33.3')) #with other type, left c = d1 - 5 self.assertEqual(c, Decimal('-16.1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 5 - d1 self.assertEqual(c, Decimal('16.1')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 -= d2 self.assertEqual(d1, Decimal('-33.3')) #inline with other type d1 -= 5 self.assertEqual(d1, Decimal('-38.3')) def test_multiplication(self): d1 = Decimal('-5') d2 = Decimal('3') #two Decimals self.assertEqual(d1*d2, Decimal('-15')) self.assertEqual(d2*d1, Decimal('-15')) #with other type, left c = d1 * 5 self.assertEqual(c, Decimal('-25')) self.assertEqual(type(c), type(d1)) #with other type, right c = 5 * d1 self.assertEqual(c, Decimal('-25')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 *= d2 self.assertEqual(d1, Decimal('-15')) #inline with other type d1 *= 5 self.assertEqual(d1, Decimal('-75')) def test_division(self): d1 = Decimal('-5') d2 = Decimal('2') #two Decimals self.assertEqual(d1/d2, Decimal('-2.5')) self.assertEqual(d2/d1, Decimal('-0.4')) #with other type, left c = d1 / 4 self.assertEqual(c, Decimal('-1.25')) self.assertEqual(type(c), type(d1)) #with other type, right c = 4 / d1 self.assertEqual(c, Decimal('-0.8')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 /= d2 self.assertEqual(d1, Decimal('-2.5')) #inline with other type d1 /= 4 self.assertEqual(d1, Decimal('-0.625')) def test_floor_division(self): d1 = Decimal('5') d2 = Decimal('2') #two Decimals self.assertEqual(d1//d2, Decimal('2')) self.assertEqual(d2//d1, Decimal('0')) #with other type, left c = d1 // 4 self.assertEqual(c, Decimal('1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 7 // d1 self.assertEqual(c, Decimal('1')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 //= d2 self.assertEqual(d1, Decimal('2')) #inline with other type d1 //= 2 self.assertEqual(d1, Decimal('1')) def test_powering(self): d1 = Decimal('5') d2 = Decimal('2') #two Decimals self.assertEqual(d1**d2, Decimal('25')) self.assertEqual(d2**d1, Decimal('32')) #with other type, left c = d1 ** 4 self.assertEqual(c, Decimal('625')) self.assertEqual(type(c), type(d1)) #with other type, right c = 7 ** d1 self.assertEqual(c, Decimal('16807')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 **= d2 self.assertEqual(d1, Decimal('25')) #inline with other type d1 **= 4 self.assertEqual(d1, Decimal('390625')) def test_module(self): d1 = Decimal('5') d2 = Decimal('2') #two Decimals self.assertEqual(d1%d2, Decimal('1')) self.assertEqual(d2%d1, Decimal('2')) #with other type, left c = d1 % 4 self.assertEqual(c, Decimal('1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 7 % d1 self.assertEqual(c, Decimal('2')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 %= d2 self.assertEqual(d1, Decimal('1')) #inline with other type d1 %= 4 self.assertEqual(d1, Decimal('1')) def test_floor_div_module(self): d1 = Decimal('5') d2 = Decimal('2') #two Decimals (p, q) = divmod(d1, d2) self.assertEqual(p, Decimal('2')) self.assertEqual(q, Decimal('1')) self.assertEqual(type(p), type(d1)) self.assertEqual(type(q), type(d1)) #with other type, left (p, q) = divmod(d1, 4) self.assertEqual(p, Decimal('1')) self.assertEqual(q, Decimal('1')) self.assertEqual(type(p), type(d1)) self.assertEqual(type(q), type(d1)) #with other type, right (p, q) = divmod(7, d1) self.assertEqual(p, Decimal('1')) self.assertEqual(q, Decimal('2')) self.assertEqual(type(p), type(d1)) self.assertEqual(type(q), type(d1)) def test_unary_operators(self): self.assertEqual(+Decimal(45), Decimal(+45)) # + self.assertEqual(-Decimal(45), Decimal(-45)) # - self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs # The following are two functions used to test threading in the next class def thfunc1(cls): d1 = Decimal(1) d3 = Decimal(3) cls.assertEqual(d1/d3, Decimal('0.333333333')) cls.synchro.wait() cls.assertEqual(d1/d3, Decimal('0.333333333')) cls.finish1.set() return def thfunc2(cls): d1 = Decimal(1) d3 = Decimal(3) cls.assertEqual(d1/d3, Decimal('0.333333333')) thiscontext = getcontext() thiscontext.prec = 18 cls.assertEqual(d1/d3, Decimal('0.333333333333333333')) cls.synchro.set() cls.finish2.set() return class DecimalUseOfContextTest(unittest.TestCase): '''Unit tests for Use of Context cases in Decimal.''' import threading # Take care executing this test from IDLE, there's an issue in threading # that hangs IDLE and I couldn't find it def test_threading(self): #Test the "threading isolation" of a Context. self.synchro = threading.Event() self.finish1 = threading.Event() self.finish2 = threading.Event() th1 = threading.Thread(target=thfunc1, args=(self,)) th2 = threading.Thread(target=thfunc2, args=(self,)) th1.start() th2.start() self.finish1.wait() self.finish1.wait() return class DecimalUsabilityTest(unittest.TestCase): '''Unit tests for Usability cases of Decimal.''' def test_comparison_operators(self): da = Decimal('23.42') db = Decimal('23.42') dc = Decimal('45') #two Decimals self.failUnless(dc > da) self.failUnless(dc >= da) self.failUnless(da < dc) self.failUnless(da <= dc) self.failUnless(da == db) self.failUnless(da != dc) self.failUnless(da <= db) self.failUnless(da >= db) self.assertEqual(cmp(dc,da), 1) self.assertEqual(cmp(da,dc), -1) self.assertEqual(cmp(da,db), 0) #a Decimal and an int self.failUnless(dc > 23) self.failUnless(23 < dc) self.failUnless(dc == 45) self.assertEqual(cmp(dc,23), 1) self.assertEqual(cmp(23,dc), -1) self.assertEqual(cmp(dc,45), 0) #a Decimal and uncomparable self.assertNotEqual(da, 'ugly') self.assertNotEqual(da, 32.7) self.assertNotEqual(da, object()) self.assertNotEqual(da, object) # sortable a = map(Decimal, xrange(100)) b = a[:] random.shuffle(a) a.sort() self.assertEqual(a, b) def test_copy_and_deepcopy_methods(self): d = Decimal('43.24') c = copy.copy(d) self.assertEqual(id(c), id(d)) dc = copy.deepcopy(d) self.assertEqual(id(dc), id(d)) def test_hash_method(self): #just that it's hashable hash(Decimal(23)) #the same hash that to an int self.assertEqual(hash(Decimal(23)), hash(23)) def test_min_and_max_methods(self): d1 = Decimal('15.32') d2 = Decimal('28.5') l1 = 15 l2 = 28 #between Decimals self.failUnless(min(d1,d2) is d1) self.failUnless(min(d2,d1) is d1) self.failUnless(max(d1,d2) is d2) self.failUnless(max(d2,d1) is d2) #between Decimal and long self.failUnless(min(d1,l2) is d1) self.failUnless(min(l2,d1) is d1) self.failUnless(max(l1,d2) is d2) self.failUnless(max(d2,l1) is d2) def test_as_nonzero(self): #as false self.failIf(Decimal(0)) #as true self.failUnless(Decimal('0.372')) def test_tostring_methods(self): #Test str and repr methods. d = Decimal('15.32') self.assertEqual(str(d), '15.32') # str self.assertEqual(repr(d), 'Decimal("15.32")') # repr def test_tonum_methods(self): #Test float, int and long methods. d1 = Decimal('66') d2 = Decimal('15.32') #int self.assertEqual(int(d1), 66) self.assertEqual(int(d2), 15) #long self.assertEqual(long(d1), 66) self.assertEqual(long(d2), 15) #float self.assertEqual(float(d1), 66) self.assertEqual(float(d2), 15.32) def test_eval_round_trip(self): #with zero d = Decimal( (0, (0,), 0) ) self.assertEqual(d, eval(repr(d))) #int d = Decimal( (1, (4, 5), 0) ) self.assertEqual(d, eval(repr(d))) #float d = Decimal( (0, (4, 5, 3, 4), -2) ) self.assertEqual(d, eval(repr(d))) #weird d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(d, eval(repr(d))) def test_as_tuple(self): #with zero d = Decimal(0) self.assertEqual(d.as_tuple(), (0, (0,), 0) ) #int d = Decimal(-45) self.assertEqual(d.as_tuple(), (1, (4, 5), 0) ) #complicated string d = Decimal("-4.34913534E-17") self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) #inf d = Decimal("Infinity") self.assertEqual(d.as_tuple(), (0, (0,), 'F') ) def test_immutability_operations(self): # Do operations and check that it didn't change change internal objects. d1 = Decimal('-25e55') b1 = Decimal('-25e55') d2 = Decimal('33e-33') b2 = Decimal('33e-33') def checkSameDec(operation, useOther=False): if useOther: eval("d1." + operation + "(d2)") self.assertEqual(d1._sign, b1._sign) self.assertEqual(d1._int, b1._int) self.assertEqual(d1._exp, b1._exp) self.assertEqual(d2._sign, b2._sign) self.assertEqual(d2._int, b2._int) self.assertEqual(d2._exp, b2._exp) else: eval("d1." + operation + "()") self.assertEqual(d1._sign, b1._sign) self.assertEqual(d1._int, b1._int) self.assertEqual(d1._exp, b1._exp) return Decimal(d1) self.assertEqual(d1._sign, b1._sign) self.assertEqual(d1._int, b1._int) self.assertEqual(d1._exp, b1._exp) checkSameDec("__abs__") checkSameDec("__add__", True) checkSameDec("__div__", True) checkSameDec("__divmod__", True) checkSameDec("__cmp__", True) checkSameDec("__float__") checkSameDec("__floordiv__", True) checkSameDec("__hash__") checkSameDec("__int__") checkSameDec("__long__") checkSameDec("__mod__", True) checkSameDec("__mul__", True) checkSameDec("__neg__") checkSameDec("__nonzero__") checkSameDec("__pos__") checkSameDec("__pow__", True) checkSameDec("__radd__", True) checkSameDec("__rdiv__", True) checkSameDec("__rdivmod__", True) checkSameDec("__repr__") checkSameDec("__rfloordiv__", True) checkSameDec("__rmod__", True) checkSameDec("__rmul__", True) checkSameDec("__rpow__", True) checkSameDec("__rsub__", True) checkSameDec("__str__") checkSameDec("__sub__", True) checkSameDec("__truediv__", True) checkSameDec("adjusted") checkSameDec("as_tuple") checkSameDec("compare", True) checkSameDec("max", True) checkSameDec("min", True) checkSameDec("normalize") checkSameDec("quantize", True) checkSameDec("remainder_near", True) checkSameDec("same_quantum", True) checkSameDec("sqrt") checkSameDec("to_eng_string") checkSameDec("to_integral") class DecimalPythonAPItests(unittest.TestCase): def test_pickle(self): d = Decimal('-3.141590000') p = pickle.dumps(d) e = pickle.loads(p) self.assertEqual(d, e) def test_int(self): for x in range(-250, 250): s = '%0.2f' % (x / 100.0) # should work the same as for floats self.assertEqual(int(Decimal(s)), int(float(s))) # should work the same as to_integral in the ROUND_DOWN mode d = Decimal(s) r = d.to_integral(ROUND_DOWN) self.assertEqual(Decimal(int(d)), r) class ContextAPItests(unittest.TestCase): def test_pickle(self): c = Context() e = pickle.loads(pickle.dumps(c)) for k in vars(c): v1 = vars(c)[k] v2 = vars(e)[k] self.assertEqual(v1, v2) def test_equality_with_other_types(self): self.assert_(Decimal(10) in ['a', 1.0, Decimal(10), (1,2), {}]) self.assert_(Decimal(10) not in ['a', 1.0, (1,2), {}]) def test_copy(self): # All copies should be deep c = Context() d = c.copy() self.assertNotEqual(id(c), id(d)) self.assertNotEqual(id(c.flags), id(d.flags)) self.assertNotEqual(id(c.traps), id(d.traps)) def test_main(arith=False, verbose=None): """ Execute the tests. Runs all arithmetic tests if arith is True or if the "decimal" resource is enabled in regrtest.py """ global TEST_ALL TEST_ALL = arith or is_resource_enabled('decimal') test_classes = [ DecimalExplicitConstructionTest, DecimalImplicitConstructionTest, DecimalArithmeticOperatorsTest, DecimalUseOfContextTest, DecimalUsabilityTest, DecimalPythonAPItests, ContextAPItests, DecimalTest, ] run_unittest(*test_classes) import decimal as DecimalModule run_doctest(DecimalModule, verbose) if __name__ == '__main__': # Calling with no arguments runs all tests. # Calling with "Skip" will skip over 90% of the arithmetic tests. if len(sys.argv) == 1: test_main(arith=True, verbose=True) elif len(sys.argv) == 2: arith = sys.argv[1].lower() != 'skip' test_main(arith=arith, verbose=True) else: raise ValueError("test called with wrong arguments, use test_Decimal [Skip]")
utils.py
import multiprocessing from multiprocessing.queues import Queue as BaseQueue class Queue(BaseQueue): """ Multiprocessing queue that has a stable qsize value, and supports these methods for OSX. Also taken from https://github.com/vterron/lemon/commit/9ca6b4b1212228dbd4f69b88aaf88b12952d7d6f """ def __init__(self, *args, **kwargs): self._internal_size = multiprocessing.Value("i", 0) if "ctx" not in kwargs: kwargs["ctx"] = multiprocessing.get_context() super().__init__(*args, **kwargs) def put(self, *args, **kwargs): with self._internal_size.get_lock(): self._internal_size.value += 1 super().put(*args, **kwargs) def get(self, *args, **kwargs): res = super().get(*args, **kwargs) # Ensure the size only decrements once the element has been gained. with self._internal_size.get_lock(): self._internal_size.value -= 1 return res def qsize(self): return self._internal_size.value def empty(self): return not self.qsize() def recursive_merge(dict1, dict2): # Recursively merge two dictionaries into dict1. for key in dict2: if key not in dict1 or type(dict1[key]) != type(dict2[key]): dict1[key] = dict2[key] elif isinstance(dict2[key], dict): # Recursively merge. recursive_merge(dict1[key], dict2[key]) else: dict1[key] = dict2[key] def _latest_version(q, i): from ev3sim import __version__ import requests q._internal_size = i try: r = requests.get("https://pypi.org/pypi/ev3sim/json") j = r.json() v = j["info"]["version"] q.put(v) except: q.put(__version__) def canUpdate(): from multiprocessing import Process from ev3sim import __version__ Q = Queue() process = Process(target=_latest_version, args=(Q, Q._internal_size)) process.start() process.join(2) if process.is_alive(): process.terminate() return False else: def transform_to_list(version_string): s = version_string.split(".") for x in range(len(s)): try: s[x] = int(s[x]) except: # This ensures that an ordering can be made. # If the version number is text, we can assume this is a development version, # and so updates that match previous numbers but not the last don't need to be updated. s[x] = 9999 return s online_version = transform_to_list(Q.get()) local_version = transform_to_list(__version__) return online_version > local_version APP_VSCODE = "VSCODE" APP_MINDSTORMS = "MINDSTORMS" APP_EXPLORER = "EXPLORER" def open_file(filepath, pref_app, folder=""): import os import platform import subprocess if pref_app != APP_EXPLORER: # Try opening with vs or mindstorms if platform.system() == "Windows": paths = [ os.path.join(os.environ["ALLUSERSPROFILE"], "Microsoft", "Windows", "Start Menu", "Programs"), os.path.join(os.environ["APPDATA"], "Microsoft", "Windows", "Start Menu", "Programs"), ] for path in paths: if os.path.exists(path): for fd in os.listdir(path): if pref_app == APP_VSCODE and "Visual Studio Code" in fd: f = os.path.join(path, fd) for file in os.listdir(f): if folder: subprocess.run( f'start "code" "{os.path.join(f, file)}" ""{folder}" --goto "{filepath}""', shell=True, ) else: subprocess.run( f'start "code" "{os.path.join(f, file)}" ""{filepath}""', shell=True, ) return if pref_app == APP_MINDSTORMS and "MINDSTORMS" in fd: f = os.path.join(path, fd) for file in os.listdir(f): subprocess.run( f'start "{os.path.join(f, file)}" "{filepath}"', shell=True, ) return if platform.system() == "Windows": subprocess.Popen(["explorer", "/select,", filepath]) elif platform.system() == "Darwin": subprocess.Popen(["open", filepath]) else: subprocess.Popen(["xdg-open", filepath]) return
labels.py
import hashlib import requests import threading import json import sys import traceback import aes import base64 import uwallet from uwallet.plugins import BasePlugin, hook from uwallet.i18n import _ class LabelsPlugin(BasePlugin): def __init__(self, parent, config, name): BasePlugin.__init__(self, parent, config, name) self.target_host = 'sync.bytesized-hosting.com:9090' self.wallets = {} def encode(self, wallet, msg): password, iv, wallet_id = self.wallets[wallet] encrypted = uwallet.bitcoin.aes_encrypt_with_iv(password, iv, msg.encode('utf8')) return base64.b64encode(encrypted) def decode(self, wallet, message): password, iv, wallet_id = self.wallets[wallet] decoded = base64.b64decode(message) decrypted = uwallet.bitcoin.aes_decrypt_with_iv(password, iv, decoded) return decrypted.decode('utf8') def get_nonce(self, wallet): # nonce is the nonce to be used with the next change nonce = wallet.storage.get('wallet_nonce') if nonce is None: nonce = 1 self.set_nonce(wallet, nonce) return nonce def set_nonce(self, wallet, nonce): self.print_error("set", wallet.basename(), "nonce to", nonce) wallet.storage.put("wallet_nonce", nonce) @hook def set_label(self, wallet, item, label): if not wallet in self.wallets: return nonce = self.get_nonce(wallet) wallet_id = self.wallets[wallet][2] bundle = {"walletId": wallet_id, "walletNonce": nonce, "externalId": self.encode(wallet, item), "encryptedLabel": self.encode(wallet, label)} t = threading.Thread(target=self.do_request, args=["POST", "/label", False, bundle]) t.setDaemon(True) t.start() # Caller will write the wallet self.set_nonce(wallet, nonce + 1) def do_request(self, method, url = "/labels", is_batch=False, data=None): url = 'https://' + self.target_host + url kwargs = {'headers': {}} if method == 'GET' and data: kwargs['params'] = data elif method == 'POST' and data: kwargs['data'] = json.dumps(data) kwargs['headers']['Content-Type'] = 'application/json' response = requests.request(method, url, **kwargs) if response.status_code != 200: raise BaseException(response.status_code, response.text) response = response.json() if "error" in response: raise BaseException(response["error"]) return response def push_thread(self, wallet): wallet_id = self.wallets[wallet][2] bundle = {"labels": [], "walletId": wallet_id, "walletNonce": self.get_nonce(wallet)} for key, value in wallet.labels.iteritems(): try: encoded_key = self.encode(wallet, key) encoded_value = self.encode(wallet, value) except: self.print_error('cannot encode', repr(key), repr(value)) continue bundle["labels"].append({'encryptedLabel': encoded_value, 'externalId': encoded_key}) self.do_request("POST", "/labels", True, bundle) def pull_thread(self, wallet, force): wallet_id = self.wallets[wallet][2] nonce = 1 if force else self.get_nonce(wallet) - 1 self.print_error("asking for labels since nonce", nonce) try: response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) )) if response["labels"] is None: self.print_error('no new labels') return result = {} for label in response["labels"]: try: key = self.decode(wallet, label["externalId"]) value = self.decode(wallet, label["encryptedLabel"]) except: continue try: json.dumps(key) json.dumps(value) except: self.print_error('error: no json', key) continue result[key] = value for key, value in result.items(): if force or not wallet.labels.get(key): wallet.labels[key] = value self.print_error("received %d labels" % len(response)) # do not write to disk because we're in a daemon thread wallet.storage.put('labels', wallet.labels) self.set_nonce(wallet, response["nonce"] + 1) self.on_pulled(wallet) except Exception as e: traceback.print_exc(file=sys.stderr) self.print_error("could not retrieve labels") def start_wallet(self, wallet): nonce = self.get_nonce(wallet) self.print_error("wallet", wallet.basename(), "nonce is", nonce) mpk = wallet.get_fingerprint() if not mpk: return password = hashlib.sha1(mpk).digest().encode('hex')[:32] iv = hashlib.sha256(password).digest()[:16] wallet_id = hashlib.sha256(mpk).digest().encode('hex') self.wallets[wallet] = (password, iv, wallet_id) # If there is an auth token we can try to actually start syncing t = threading.Thread(target=self.pull_thread, args=(wallet, False)) t.setDaemon(True) t.start() def stop_wallet(self, wallet): self.wallets.pop(wallet, None)
cnn_util_test.py
# coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tf_cnn_benchmarks.cnn_util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading import time import tensorflow.compat.v1 as tf from cnn_quantization.tf_cnn_benchmarks import cnn_util class CnnUtilBarrierTest(tf.test.TestCase): def testBarrier(self): num_tasks = 20 num_waits = 4 barrier = cnn_util.Barrier(num_tasks) threads = [] sync_matrix = [] for i in range(num_tasks): sync_times = [0] * num_waits thread = threading.Thread( target=self._run_task, args=(barrier, sync_times)) thread.start() threads.append(thread) sync_matrix.append(sync_times) for thread in threads: thread.join() for wait_index in range(num_waits - 1): # Max of times at iteration i < min of times at iteration i + 1 self.assertLessEqual( max([sync_matrix[i][wait_index] for i in range(num_tasks)]), min([sync_matrix[i][wait_index + 1] for i in range(num_tasks)])) def _run_task(self, barrier, sync_times): for wait_index in range(len(sync_times)): sync_times[wait_index] = time.time() barrier.wait() def testBarrierAbort(self): num_tasks = 2 num_waits = 1 sync_times = [0] * num_waits barrier = cnn_util.Barrier(num_tasks) thread = threading.Thread( target=self._run_task, args=(barrier, sync_times)) thread.start() barrier.abort() # thread won't be blocked by done barrier. thread.join() class ImageProducerTest(tf.test.TestCase): def _slow_tensorflow_op(self): """Returns a TensorFlow op that takes approximately 0.1s to complete.""" def slow_func(v): time.sleep(0.1) return v return tf.py_func(slow_func, [tf.constant(0.)], tf.float32).op def _test_image_producer(self, batch_group_size, put_slower_than_get): # We use the variable x to simulate a staging area of images. x represents # the number of batches in the staging area. x = tf.Variable(0, dtype=tf.int32) if put_slower_than_get: put_dep = self._slow_tensorflow_op() get_dep = tf.no_op() else: put_dep = tf.no_op() get_dep = self._slow_tensorflow_op() with tf.control_dependencies([put_dep]): put_op = x.assign_add(batch_group_size, use_locking=True) with tf.control_dependencies([get_dep]): get_op = x.assign_sub(1, use_locking=True) with self.test_session() as sess: sess.run(tf.variables_initializer([x])) image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size, use_python32_barrier=False) image_producer.start() for _ in range(5 * batch_group_size): sess.run(get_op) # We assert x is nonnegative, to ensure image_producer never causes # an unstage op to block. We assert x is at most 2 * batch_group_size, # to ensure it doesn't use too much memory by storing too many batches # in the staging area. self.assertGreaterEqual(sess.run(x), 0) self.assertLessEqual(sess.run(x), 2 * batch_group_size) image_producer.notify_image_consumption() self.assertGreaterEqual(sess.run(x), 0) self.assertLessEqual(sess.run(x), 2 * batch_group_size) image_producer.done() time.sleep(0.1) self.assertGreaterEqual(sess.run(x), 0) self.assertLessEqual(sess.run(x), 2 * batch_group_size) def test_image_producer(self): self._test_image_producer(1, False) self._test_image_producer(1, True) self._test_image_producer(2, False) self._test_image_producer(2, True) self._test_image_producer(3, False) self._test_image_producer(3, True) self._test_image_producer(8, False) self._test_image_producer(8, True) if __name__ == '__main__': tf.test.main()
call_methylation.py
#!/usr/bin/env python """Takes alignments from signalAlign and calls methylation status""" from __future__ import print_function, division import sys sys.path.append("../") from argparse import ArgumentParser from alignmentAnalysisLib import CallMethylation from signalAlignLib import parse_substitution_file, degenerate_enum from variantCallingLib import get_alignments_labels_and_mask, get_reference_sequence from multiprocessing import Process, current_process, Manager def parse_args(): parser = ArgumentParser(description=__doc__) # query files parser.add_argument('--input', '-i', action='store', dest='in_files', required=False, type=str, default=None, help="files, with file-suffix") parser.add_argument('--ref', '-r', required=False, action='store', type=str, dest='ref', default=None, help="path to fasta reference file") parser.add_argument('--positions', '-p', required=False, action='store', type=str, dest='positions', help='positions file') parser.add_argument('--degenerate', '-x', action='store', dest='degenerate', default="variant", help="Specify degenerate nucleotide options: " "variant -> {ACGT}, twoWay -> {CE} threeWay -> {CEO}") parser.add_argument('--kmer_length', '-k', action='store', dest='kmer_length', required=True, type=int, help="Kmer length of model used") parser.add_argument('--threshold', '-t', action='store', dest='threshold', default=0.0, type=float, help='Minimum threshold aligned pairs to consider') parser.add_argument('-n', required=False, action='store', type=int, dest='n', default=100, help='Max number of alignments from each category to look at') parser.add_argument('--jobs', '-j', action='store', dest='nb_jobs', required=False, default=4, type=int, help="number of jobs to run concurrently") parser.add_argument('--out', '-o', action='store', type=str, required=True, dest='out') return parser.parse_args() def run_methyl_caller(work_queue, done_queue): try: for f in iter(work_queue.get, 'STOP'): c = CallMethylation(**f) c.write() except Exception, e: done_queue.put("%s failed with %s" % (current_process().name, e.message)) def main(args): args = parse_args() if args.ref is not None: reference_sequence = get_reference_sequence(args.ref) else: reference_sequence = None alns, forward_mask = get_alignments_labels_and_mask(args.in_files, args.n) out_file = args.out if args.positions is not None: positions = {} f, b = parse_substitution_file(args.positions) positions['forward'] = f[1] positions['backward'] = b[1] else: assert reference_sequence is not None, "Need to provide reference sequence if not providing positions" positions = None workers = args.nb_jobs work_queue = Manager().Queue() done_queue = Manager().Queue() jobs = [] for aln, forward_bool in zip(alns, forward_mask): call_methyl_args = { "sequence": reference_sequence, "alignment_file": aln, "out_file": out_file, "positions": positions, "degenerate_type": degenerate_enum(args.degenerate), "threshold": args.threshold, "kmer_length": args.kmer_length, } #c = CallMethylation(**call_methyl_args) #c.write() work_queue.put(call_methyl_args) for w in xrange(workers): p = Process(target=run_methyl_caller, args=(work_queue, done_queue)) p.start() jobs.append(p) work_queue.put('STOP') for p in jobs: p.join() done_queue.put('STOP') if __name__ == "__main__": sys.exit(main(sys.argv))
base.py
from __future__ import annotations import logging import threading import typing from concurrent.futures import Future from bioimageio.core.prediction_pipeline import PredictionPipeline from tiktorch.configkeys import TRAINING, VALIDATION from tiktorch.server.session import types from tiktorch.server.session.backend import commands, supervisor from tiktorch.tiktypes import TikTensorBatch logger = logging.getLogger(__name__) class SessionBackend: def __init__(self, pipeline: PredictionPipeline): self._supervisor = supervisor.Supervisor(pipeline) self._supervisor_thread = threading.Thread(target=self._supervisor.run, name="ModelThread") self._supervisor_thread.start() def update_dataset(self, name: str, *, data: TikTensorBatch, labels: TikTensorBatch) -> None: assert name in (TRAINING, VALIDATION), f"{name} not in ({TRAINING}, {VALIDATION})" update_cmd = commands.UpdateDatasetCmd(name, raw_data=data, labels=labels) self._supervisor.send_command(update_cmd) def set_max_num_iterations(self, num: int) -> None: self._supervisor.send_command(commands.SetMaxNumIterations(num)) def forward(self, input_tensors): res = Future() self._supervisor.send_command(commands.ForwardPass(res, input_tensors)) return res def shutdown(self) -> None: logger.debug("Shutting down...") stop_cmd = commands.StopCmd() self._supervisor.send_command(stop_cmd.awaitable) stop_cmd.awaitable.wait() self._supervisor_thread.join() logger.debug("Shutdown complete") def resume_training(self) -> None: resume_cmd = commands.ResumeCmd() self._supervisor.send_command(resume_cmd.awaitable) resume_cmd.awaitable.wait() def pause_training(self) -> None: self._supervisor.send_command(commands.PauseCmd()) def get_idle(self) -> bool: return self._supervisor.state == types.State.Paused def on_idle(self, callback: typing.Callable[[], None]) -> None: self._supervisor.on_idle(callback)
util.py
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import functools import itertools import os import platform import re import sys import threading import traceback import types try: from collections.abc import Callable except AttributeError: from collections import Callable from py4j.clientserver import ClientServer __all__ = [] # type: ignore def print_exec(stream): ei = sys.exc_info() traceback.print_exception(ei[0], ei[1], ei[2], None, stream) class VersionUtils(object): """ Provides utility method to determine Spark versions with given input string. """ @staticmethod def majorMinorVersion(sparkVersion: str): """ Given a Spark version string, return the (major version number, minor version number). E.g., for 2.0.1-SNAPSHOT, return (2, 0). Examples -------- >>> sparkVersion = "2.4.0" >>> VersionUtils.majorMinorVersion(sparkVersion) (2, 4) >>> sparkVersion = "2.3.0-SNAPSHOT" >>> VersionUtils.majorMinorVersion(sparkVersion) (2, 3) """ m = re.search(r'^(\d+)\.(\d+)(\..*)?$', sparkVersion) if m is not None: return (int(m.group(1)), int(m.group(2))) else: raise ValueError("Spark tried to parse '%s' as a Spark" % sparkVersion + " version string, but it could not find the major and minor" + " version numbers.") def fail_on_stopiteration(f): """ Wraps the input function to fail on 'StopIteration' by raising a 'RuntimeError' prevents silent loss of data when 'f' is used in a for loop in Spark code """ def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except StopIteration as exc: raise RuntimeError( "Caught StopIteration thrown from user's code; failing the task", exc ) return wrapper def walk_tb(tb): while tb is not None: yield tb tb = tb.tb_next def try_simplify_traceback(tb): """ Simplify the traceback. It removes the tracebacks in the current package, and only shows the traceback that is related to the thirdparty and user-specified codes. Returns ------- TracebackType or None Simplified traceback instance. It returns None if it fails to simplify. Notes ----- This keeps the tracebacks once it sees they are from a different file even though the following tracebacks are from the current package. Examples -------- >>> import importlib >>> import sys >>> import traceback >>> import tempfile >>> with tempfile.TemporaryDirectory() as tmp_dir: ... with open("%s/dummy_module.py" % tmp_dir, "w") as f: ... _ = f.write( ... 'def raise_stop_iteration():\\n' ... ' raise StopIteration()\\n\\n' ... 'def simple_wrapper(f):\\n' ... ' def wrapper(*a, **k):\\n' ... ' return f(*a, **k)\\n' ... ' return wrapper\\n') ... f.flush() ... spec = importlib.util.spec_from_file_location( ... "dummy_module", "%s/dummy_module.py" % tmp_dir) ... dummy_module = importlib.util.module_from_spec(spec) ... spec.loader.exec_module(dummy_module) >>> def skip_doctest_traceback(tb): ... import pyspark ... root = os.path.dirname(pyspark.__file__) ... pairs = zip(walk_tb(tb), traceback.extract_tb(tb)) ... for cur_tb, cur_frame in pairs: ... if cur_frame.filename.startswith(root): ... return cur_tb Regular exceptions should show the file name of the current package as below. >>> exc_info = None >>> try: ... fail_on_stopiteration(dummy_module.raise_stop_iteration)() ... except Exception as e: ... tb = sys.exc_info()[-1] ... e.__cause__ = None ... exc_info = "".join( ... traceback.format_exception(type(e), e, tb)) >>> print(exc_info) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS Traceback (most recent call last): File ... ... File "/.../pyspark/util.py", line ... ... RuntimeError: ... >>> "pyspark/util.py" in exc_info True If the traceback is simplified with this method, it hides the current package file name: >>> exc_info = None >>> try: ... fail_on_stopiteration(dummy_module.raise_stop_iteration)() ... except Exception as e: ... tb = try_simplify_traceback(sys.exc_info()[-1]) ... e.__cause__ = None ... exc_info = "".join( ... traceback.format_exception( ... type(e), e, try_simplify_traceback(skip_doctest_traceback(tb)))) >>> print(exc_info) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS RuntimeError: ... >>> "pyspark/util.py" in exc_info False In the case below, the traceback contains the current package in the middle. In this case, it just hides the top occurrence only. >>> exc_info = None >>> try: ... fail_on_stopiteration(dummy_module.simple_wrapper( ... fail_on_stopiteration(dummy_module.raise_stop_iteration)))() ... except Exception as e: ... tb = sys.exc_info()[-1] ... e.__cause__ = None ... exc_info_a = "".join( ... traceback.format_exception(type(e), e, tb)) ... exc_info_b = "".join( ... traceback.format_exception( ... type(e), e, try_simplify_traceback(skip_doctest_traceback(tb)))) >>> exc_info_a.count("pyspark/util.py") 2 >>> exc_info_b.count("pyspark/util.py") 1 """ if "pypy" in platform.python_implementation().lower(): # Traceback modification is not supported with PyPy in PySpark. return None if sys.version_info[:2] < (3, 7): # Traceback creation is not supported Python < 3.7. # See https://bugs.python.org/issue30579. return None import pyspark root = os.path.dirname(pyspark.__file__) tb_next = None new_tb = None pairs = zip(walk_tb(tb), traceback.extract_tb(tb)) last_seen = [] for cur_tb, cur_frame in pairs: if not cur_frame.filename.startswith(root): # Filter the stacktrace from the PySpark source itself. last_seen = [(cur_tb, cur_frame)] break for cur_tb, cur_frame in reversed(list(itertools.chain(last_seen, pairs))): # Once we have seen the file names outside, don't skip. new_tb = types.TracebackType( tb_next=tb_next, tb_frame=cur_tb.tb_frame, tb_lasti=cur_tb.tb_frame.f_lasti, tb_lineno=cur_tb.tb_frame.f_lineno) tb_next = new_tb return new_tb def _print_missing_jar(lib_name, pkg_name, jar_name, spark_version): print(""" ________________________________________________________________________________________________ Spark %(lib_name)s libraries not found in class path. Try one of the following. 1. Include the %(lib_name)s library and its dependencies with in the spark-submit command as $ bin/spark-submit --packages org.apache.spark:spark-%(pkg_name)s:%(spark_version)s ... 2. Download the JAR of the artifact from Maven Central http://search.maven.org/, Group Id = org.apache.spark, Artifact Id = spark-%(jar_name)s, Version = %(spark_version)s. Then, include the jar in the spark-submit command as $ bin/spark-submit --jars <spark-%(jar_name)s.jar> ... ________________________________________________________________________________________________ """ % { "lib_name": lib_name, "pkg_name": pkg_name, "jar_name": jar_name, "spark_version": spark_version }) def _parse_memory(s): """ Parse a memory string in the format supported by Java (e.g. 1g, 200m) and return the value in MiB Examples -------- >>> _parse_memory("256m") 256 >>> _parse_memory("2g") 2048 """ units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024} if s[-1].lower() not in units: raise ValueError("invalid format: " + s) return int(float(s[:-1]) * units[s[-1].lower()]) def inheritable_thread_target(f: Callable) -> Callable: """ Return thread target wrapper which is recommended to be used in PySpark when the pinned thread mode is enabled. The wrapper function, before calling original thread target, it inherits the inheritable properties specific to JVM thread such as ``InheritableThreadLocal``. Also, note that pinned thread mode does not close the connection from Python to JVM when the thread is finished in the Python side. With this wrapper, Python garbage-collects the Python thread instance and also closes the connection which finishes JVM thread correctly. When the pinned thread mode is off, it return the original ``f``. .. versionadded:: 3.2.0 Parameters ---------- f : function the original thread target. Notes ----- This API is experimental. It is important to know that it captures the local properties when you decorate it whereas :class:`InheritableThread` captures when the thread is started. Therefore, it is encouraged to decorate it when you want to capture the local properties. For example, the local properties from the current Spark context is captured when you define a function here instead of the invocation: >>> @inheritable_thread_target ... def target_func(): ... pass # your codes. If you have any updates on local properties afterwards, it would not be reflected to the Spark context in ``target_func()``. The example below mimics the behavior of JVM threads as close as possible: >>> Thread(target=inheritable_thread_target(target_func)).start() # doctest: +SKIP """ from pyspark import SparkContext if isinstance(SparkContext._gateway, ClientServer): # type: ignore[attr-defined] # Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on. # NOTICE the internal difference vs `InheritableThread`. `InheritableThread` # copies local properties when the thread starts but `inheritable_thread_target` # copies when the function is wrapped. properties = (SparkContext ._active_spark_context # type: ignore[attr-defined] ._jsc.sc() .getLocalProperties().clone()) @functools.wraps(f) def wrapped(*args, **kwargs): try: # Set local properties in child thread. SparkContext._active_spark_context._jsc.sc().setLocalProperties(properties) return f(*args, **kwargs) finally: InheritableThread._clean_py4j_conn_for_current_thread() return wrapped else: return f class InheritableThread(threading.Thread): """ Thread that is recommended to be used in PySpark instead of :class:`threading.Thread` when the pinned thread mode is enabled. The usage of this class is exactly same as :class:`threading.Thread` but correctly inherits the inheritable properties specific to JVM thread such as ``InheritableThreadLocal``. Also, note that pinned thread mode does not close the connection from Python to JVM when the thread is finished in the Python side. With this class, Python garbage-collects the Python thread instance and also closes the connection which finishes JVM thread correctly. When the pinned thread mode is off, this works as :class:`threading.Thread`. .. versionadded:: 3.1.0 Notes ----- This API is experimental. """ def __init__(self, target, *args, **kwargs): from pyspark import SparkContext if isinstance(SparkContext._gateway, ClientServer): # Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on. def copy_local_properties(*a, **k): # self._props is set before starting the thread to match the behavior with JVM. assert hasattr(self, "_props") SparkContext._active_spark_context._jsc.sc().setLocalProperties(self._props) try: return target(*a, **k) finally: InheritableThread._clean_py4j_conn_for_current_thread() super(InheritableThread, self).__init__( target=copy_local_properties, *args, **kwargs) else: super(InheritableThread, self).__init__(target=target, *args, **kwargs) def start(self, *args, **kwargs): from pyspark import SparkContext if isinstance(SparkContext._gateway, ClientServer): # Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on. # Local property copy should happen in Thread.start to mimic JVM's behavior. self._props = SparkContext._active_spark_context._jsc.sc().getLocalProperties().clone() return super(InheritableThread, self).start(*args, **kwargs) @staticmethod def _clean_py4j_conn_for_current_thread(): from pyspark import SparkContext jvm = SparkContext._jvm thread_connection = jvm._gateway_client.get_thread_connection() if thread_connection is not None: try: # Dequeue is shared across other threads but it's thread-safe. # If this function has to be invoked one more time in the same thead # Py4J will create a new connection automatically. jvm._gateway_client.deque.remove(thread_connection) except ValueError: # Should never reach this point return finally: thread_connection.close() if __name__ == "__main__": if "pypy" not in platform.python_implementation().lower() and sys.version_info[:2] >= (3, 7): import doctest import pyspark.util from pyspark.context import SparkContext globs = pyspark.util.__dict__.copy() globs['sc'] = SparkContext('local[4]', 'PythonTest') (failure_count, test_count) = doctest.testmod(pyspark.util, globs=globs) globs['sc'].stop() if failure_count: sys.exit(-1)
parallel.py
import sys import socket import struct import time import threading class server_msi( object ): def __recv( self, sckt ): msg = sckt.recv( self.slen ) try: cmd = msg[0:1] who = int( msg[1:4] ) dst = int( msg[4:7] ) siz = int( msg[7:17] ) dat = msg[17:] while( len( dat ) < siz ): dat += sckt.recv( self.slen ) return( cmd, who, dst, dat[0:siz] ) except: return( b"#", -1, -1, b"" ) def __send( self, sckt, msg ): s = len( msg ) j = 0 for i in range( s // self.slen ): sckt.sendall( msg[j:j+self.slen] ) j += self.slen s %= self.slen if( s != 0 ): sckt.sendall( msg[j:] + b"0" * ( self.slen - s ) ) def __serve( self, chld ): cmd, who, dst, dat = self.__recv( chld ) if( cmd == b"#" or who < 0 or who >= self.ncpu ): chld.close() sys.stderr.write( "[server] rejected invalid node(%d) or malformed message...\n"%( who ) ) sys.stderr.flush() return() self.lock.acquire() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if( cmd == b"B" ): self.barc[who] += 1 if( not( self.barc[who] in self.barb and self.barc[who] in self.bard ) ): self.barb[self.barc[who]] = [ 0 for i in range( self.ncpu ) ] self.bard[self.barc[who]] = True sys.stderr.write( "[server] barrier %d +++ (node: %03d)\n"%( self.barc[who], who ) ) sys.stderr.flush() self.barb[self.barc[who]][who] = 1 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ elif( cmd == b"P" ): if( self.bard[self.barc[who]] ): self.bard[self.barc[who]] = sum( self.barb[self.barc[who]] ) < self.ncpu if( self.bard[self.barc[who]] ): self.__send( chld, b"10" ) else: self.barb[self.barc[who]][who] = 0 self.__send( chld, b"00" ) else: self.barb[self.barc[who]][who] = 0 if( sum( self.barb[self.barc[who]] ) == 0 ): sys.stderr.write( "[server] barrier %d --- (node: %03d)\n"%( self.barc[who], who ) ) sys.stderr.flush() del self.barb[self.barc[who]] del self.bard[self.barc[who]] self.__send( chld, b"00" ) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ elif( cmd == b"W" ): if( self.data[dst][who] == b"" ): self.data[dst][who] = dat self.__send( chld, b"10" ) else: self.__send( chld, b"00" ) elif( cmd == b"R" ): if( self.data[who][dst] != b"" ): self.__send( chld, b"1" + self.data[who][dst] ) self.data[who][dst] = b"" else: self.__send( chld, b"00" ) self.lock.release() chld.close() def __init__( self, ncpu, inet = ( "", 6969 ), unix = None ): self.ncpu = ncpu self.slen = 1024 self.data = [ [ b"" for j in range( self.ncpu ) ] for i in range( self.ncpu ) ] self.barb = {} self.bard = {} self.barc = [ -1 for i in range( self.ncpu ) ] self.lock = threading.Lock() if( unix ): sys.stderr.write( "[server] listening at: %s\n"%( unix ) ) sys.stderr.flush() self.sckt = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM ) self.sckt.bind( unix ) else: if( inet[0] == "" ): inet = ( socket.gethostbyname( socket.gethostname() ), inet[1] ) sys.stderr.write( "[server] listening at: %s\n"%( str( inet ) ) ) sys.stderr.flush() self.sckt = socket.socket( socket.AF_INET, socket.SOCK_STREAM ) self.sckt.bind( inet ) while( True ): self.sckt.listen( ncpu * 10 ) chld, addr = self.sckt.accept() threading.Thread( target = self.__serve, args = ( chld, ) ).start() # self.sckt.close() # sys.stderr.write( "[server] done!\n" ) # sys.stderr.flush() class client_msi( object ): def __send( self, sckt, msg ): s = len( msg ) j = 0 for i in range( s // self.slen ): sckt.sendall( msg[j:j+self.slen] ) j += self.slen s %= self.slen if( s != 0 ): sckt.sendall( msg[j:] + b"0" * ( self.slen - s ) ) def __recv( self, sckt, siz ): msg = sckt.recv( self.slen ) if( msg[0:1] == b"1" ): while( len( msg ) <= siz ): msg += sckt.recv( self.slen ) return( True, msg[1:siz+1] ) else: return( False, b"" ) def __init__( self, node, inet = ( "", 6969 ), unix = None ): self.node = node self.slen = 1024 if( unix ): self.kind = socket.AF_UNIX self.addr = unix else: if( inet[0] == "" ): inet = ( socket.gethostbyname( socket.gethostname() ), inet[1] ) self.kind = socket.AF_INET self.addr = inet def barrier( self ): sckt = socket.socket( self.kind, socket.SOCK_STREAM ) sckt.connect( self.addr ) self.__send( sckt, b"B%03d00000"%( self.node ) ) sckt.close() flg = True while( flg ): time.sleep( 0.1 ) sckt = socket.socket( self.kind, socket.SOCK_STREAM ) sckt.connect( self.addr ) self.__send( sckt, b"P%03d00000"%( self.node ) ) flg, dat = self.__recv( sckt, 1 ) sckt.close() def send_r8( self, dst, lst ): msg = b"W%03d%03d%010d"%( self.node, dst, len( lst ) * 8 ) for r in lst: msg += struct.pack( "d", float( r ) ) sckt = socket.socket( self.kind, socket.SOCK_STREAM ) sckt.connect( self.addr ) self.__send( sckt, msg ) flg, tmp = self.__recv( sckt, 1 ) sckt.close() while( not flg ): time.sleep( 0.1 ) sckt = socket.socket( self.kind, socket.SOCK_STREAM ) sckt.connect( self.addr ) self.__send( sckt, msg ) flg, tmp = self.__recv( sckt, 1 ) sckt.close() def recv_r8( self, src, siz ): sckt = socket.socket( self.kind, socket.SOCK_STREAM ) sckt.connect( self.addr ) self.__send( sckt, b"R%03d%03d00"%( self.node, src ) ) flg, msg = self.__recv( sckt, siz * 8 ) sckt.close() while( not flg ): time.sleep( 0.1 ) sckt = socket.socket( self.kind, socket.SOCK_STREAM ) sckt.connect( self.addr ) self.__send( sckt, b"R%03d%03d00"%( self.node, src ) ) flg, msg = self.__recv( sckt, siz * 8 ) sckt.close() return( list( struct.unpack( "%dd"%( siz ), msg ) ) ) def send_i4( self, dst, lst ): msg = b"W%03d%03d%010d"%( self.node, dst, len( lst ) * 4 ) for r in lst: msg += struct.pack( "i", int( r ) ) sckt = socket.socket( self.kind, socket.SOCK_STREAM ) sckt.connect( self.addr ) self.__send( sckt, msg ) flg, tmp = self.__recv( sckt, 1 ) sckt.close() while( not flg ): time.sleep( 0.1 ) sckt = socket.socket( self.kind, socket.SOCK_STREAM ) sckt.connect( self.addr ) self.__send( sckt, msg ) flg, tmp = self.__recv( sckt, 1 ) sckt.close() def recv_i4( self, src, siz ): sckt = socket.socket( self.kind, socket.SOCK_STREAM ) sckt.connect( self.addr ) self.__send( sckt, b"R%03d%03d00"%( self.node, src ) ) flg, msg = self.__recv( sckt, siz * 4 ) sckt.close() while( not flg ): time.sleep( 0.1 ) sckt = socket.socket( self.kind, socket.SOCK_STREAM ) sckt.connect( self.addr ) self.__send( sckt, b"R%03d%03d00"%( self.node, src ) ) flg, msg = self.__recv( sckt, siz * 4 ) sckt.close() return( list( struct.unpack( "%di"%( siz ), msg ) ) ) def stop( self ): pass try: import qm3.utils._mpi class client_mpi( object ): def __init__( self ): self.node, self.ncpu = qm3.utils._mpi.init() def barrier( self ): qm3.utils._mpi.barrier() def stop( self ): qm3.utils._mpi.stop() def send_r8( self, dst, lst ): qm3.utils._mpi.send_r8( dst, lst ) def recv_r8( self, src, siz ): return( qm3.utils._mpi.recv_r8( src, siz ) ) def send_i4( self, dst, lst ): qm3.utils._mpi.send_i4( dst, lst ) def recv_i4( self, src, siz ): return( qm3.utils._mpi.recv_i4( src, siz ) ) except: pass
cat.py
import os import json import pandas import spacy from time import sleep from functools import partial from multiprocessing import Process, Manager, Queue, Pool, Array from medcat.cdb import CDB from medcat.spacy_cat import SpacyCat from medcat.preprocessing.tokenizers import spacy_split_all from medcat.utils.spelling import CustomSpellChecker from medcat.utils.spacy_pipe import SpacyPipe from medcat.preprocessing.cleaners import spacy_tag_punct from medcat.utils.helpers import get_all_from_name, tkn_inds_from_doc from medcat.utils.loggers import basic_logger from medcat.utils.data_utils import make_mc_train_test import time import sys, traceback from tqdm.autonotebook import tqdm log = basic_logger("CAT") class CAT(object): r''' The main MedCAT class used to annotate documents, it is built on top of spaCy and works as a spaCy pipline. Creates an instance of a spaCy pipline that can be used as a spacy nlp model. Args: cdb (medcat.cdb.CDB): The concept database that will be used for NER+L vocab (medcat.utils.vocab.Vocab, optional): Vocabulary used for vector embeddings and spelling. Default: None skip_stopwords (bool): If True the stopwords will be ignored and not detected in the pipeline. Default: True meta_cats (list of medcat.meta_cat.MetaCAT, optional): A list of models that will be applied sequentially on each detected annotation. Attributes (limited): cdb (medcat.cdb.CDB): Concept database used with this CAT instance, please do not assign this value directly. vocab (medcat.utils.vocab.Vocab): The vocabulary object used with this instance, please do not assign this value directly. config - WILL BE REMOVED - TEMPORARY PLACEHOLDER Examples: >>>cat = CAT(cdb, vocab) >>>spacy_doc = cat("Put some text here") >>>print(spacy_doc.ents) # Detected entites ''' def __init__(self, cdb, vocab=None, skip_stopwords=True, meta_cats=[], config={}, tokenizer=None): self.cdb = cdb self.vocab = vocab self.config = config # Build the spacy pipeline self.nlp = SpacyPipe(spacy_split_all) #self.nlp.add_punct_tagger(tagger=spacy_tag_punct) self.nlp.add_punct_tagger(tagger=partial(spacy_tag_punct, skip_stopwords=skip_stopwords, keep_punct=self.config.get("keep_punct", [':', '.']))) # Add spell checker self.spell_checker = CustomSpellChecker(cdb_vocab=self.cdb.vocab, data_vocab=self.vocab) self.nlp.add_spell_checker(spell_checker=self.spell_checker) # Add them cat class that does entity detection self.spacy_cat = SpacyCat(cdb=self.cdb, vocab=self.vocab, tokenizer=tokenizer) self.nlp.add_cat(spacy_cat=self.spacy_cat) # Add meta_annotaiton classes if they exist self._meta_annotations = False for meta_cat in meta_cats: self.nlp.add_meta_cat(meta_cat, meta_cat.category_name) self._meta_annotations = True def __call__(self, text): r''' Push the text through the pipeline. Args: text (string): The text to be annotated Returns: A spacy document with the extracted entities ''' return self.nlp(text) def add_concept_cntx(self, cui, text, tkn_inds, negative=False, lr=None, anneal=None, spacy_doc=None): if spacy_doc is None: spacy_doc = self(text) tkns = [spacy_doc[ind] for ind in range(tkn_inds[0], tkn_inds[-1] + 1)] self.spacy_cat._add_cntx_vec(cui=cui, doc=spacy_doc, tkns=tkns, negative=negative, lr=lr, anneal=anneal) def unlink_concept_name(self, cui, name, full_unlink=True): r''' Unlink a concept name from the CUI (or all CUIs if full_unlink), removes the link from the Concept Database (CDB). As a consequence medcat will never again link the `name` to this CUI - meaning the name will not be detected as a concept in the future. Args: cui (str): The CUI from which the `name` will be removed name (str): The span of text to be removed from the linking dictionary full_unlink (boolean): If True, the `name` will not only be removed from the given `cui` but from each concept in the database that is associated with this name. Examples: >>> # To never again link C0020538 to HTN >>> cat.unlink_concept_name('C0020538', 'htn', False) ''' names = [name, name.lower()] # Unlink a concept from a name p_name, tokens, _, _ = get_all_from_name(name=name, source_value=name, nlp=self.nlp, version='clean') # Add the clean version of the name names.append(p_name) # Get the raw version p_name, tokens, _, _ = get_all_from_name(name=name, source_value=name, nlp=self.nlp, version='raw') # Append the raw evrsion names.append(p_name) if tokens[-1].lower() == "s": # Remove last 's' - a stupid bug names.append(p_name[0:-1]) for name in names: cuis = [cui] if full_unlink and name in self.cdb.name2cui: cuis = list(self.cdb.name2cui[name]) for cui in cuis: if cui in self.cdb.cui2names and name in self.cdb.cui2names[cui]: self.cdb.cui2names[cui].remove(name) if len(self.cdb.cui2names[cui]) == 0: del self.cdb.cui2names[cui] if name in self.cdb.name2cui: if cui in self.cdb.name2cui[name]: self.cdb.name2cui[name].remove(cui) if len(self.cdb.name2cui[name]) == 0: del self.cdb.name2cui[name] def _add_name(self, cui, source_val, is_pref_name, only_new=False, desc=None, tui=None): r''' Please do not use directly. This function will add a name to a CUI (existing or new). Args: cui (str): The CUI to which to add the name source_val (str): The `name` or span or source_value that will be linked to the cui is_pref_name (boolean): Is this source_val the prefered `name` for this CUI (concept) only_new (bool): Only add the name if it does not exist in the current CDB and is not linked to any concept (CUI) in the current CDB. desc (str): Description for this concept tui (str): Semenantic Type identifer for this concept, should be a TUI that exisit in the current CDB. Have a look at cdb.tui2names - for a list of all existing TUIs in the current CDB. Examples: Do not use. ''' onto = 'def' all_cuis = [] if cui in self.cdb.cui2ontos and self.cdb.cui2ontos[cui]: onto = list(self.cdb.cui2ontos[cui])[0] # Add the original version of the name just lowercased p_name, tokens, snames, tokens_vocab = get_all_from_name(name=source_val, source_value=source_val, nlp=self.nlp, version='none') if cui not in self.cdb.cui2names or p_name not in self.cdb.cui2names[cui]: if not only_new or p_name not in self.cdb.name2cui: self.cdb.add_concept(cui, p_name, onto, tokens, snames, tokens_vocab=tokens_vocab, original_name=source_val, is_pref_name=False, desc=desc, tui=tui) all_cuis.extend(self.cdb.name2cui[p_name]) p_name, tokens, snames, tokens_vocab = get_all_from_name(name=source_val, source_value=source_val, nlp=self.nlp, version='clean') # This will add a new concept if the cui doesn't exist # or link the name to an existing concept if it exists. if cui not in self.cdb.cui2names or p_name not in self.cdb.cui2names[cui]: if not only_new or p_name not in self.cdb.name2cui: self.cdb.add_concept(cui, p_name, onto, tokens, snames, tokens_vocab=tokens_vocab, original_name=source_val, is_pref_name=False, desc=desc, tui=tui) all_cuis.extend(self.cdb.name2cui[p_name]) # Add the raw also if needed p_name, tokens, snames, tokens_vocab = get_all_from_name(name=source_val, source_value=source_val, nlp=self.nlp, version='raw') if cui not in self.cdb.cui2names or p_name not in self.cdb.cui2names[cui] or is_pref_name: if not only_new or p_name not in self.cdb.name2cui: self.cdb.add_concept(cui, p_name, onto, tokens, snames, tokens_vocab=tokens_vocab, original_name=source_val, is_pref_name=is_pref_name, desc=desc, tui=tui) all_cuis.extend(self.cdb.name2cui[p_name]) # Fix for ntkns in cdb if p_name in self.cdb.name2ntkns: if len(tokens) not in self.cdb.name2ntkns[p_name]: self.cdb.name2ntkns[p_name].add(len(tokens)) return list(set(all_cuis)) def add_name(self, cui, source_val, text=None, is_pref_name=False, tkn_inds=None, text_inds=None, spacy_doc=None, lr=None, anneal=None, negative=False, only_new=False, desc=None, tui=None, manually_created=False): r''' This function will add a `name` (source_val) to a CUI (existing or new). It will teach medcat that this source_val is linked to this CUI. Args: cui (str): The CUI to which to add the name source_val (str): The `name` or span or source_value that will be linked to the cui text (str, optional): Text in which an example of this source_val can be found. Used for supervised/online training. This is basically one sample in a dataset for supervised training. is_pref_name (boolean): Is this source_val the prefered `name` for this CUI (concept) tkn_inds (list of ints, optional): Should be in the form: [3, 4, 5, ...]. This should be used only if you are providing a spacy_doc also. It gives the indicies of the tokens in a spacy document where the source_val can be found. text_inds (list, optional): A list that has only two values the start index for this `source_val` in the `text` and the end index. Used if you are not providing a spacy_doc. But are providing a `text` - it is optional and if not provided medcat will try to automatically find the start and end index. spacy_doc () TODO: lr (float): The learning rate that will be used if you are providing the `text` that will be used for supervised/active learning. only_new (bool): Only add the name if it does not exist in the current CDB and is not linked to any concept (CUI) in the current CDB. desc (str): Description for this concept tui (str): Semenantic Type identifer for this concept, should be a TUI that exisit in the current CDB. Have a look at cdb.tui2names - for a list of all existing TUIs in the current CDB. Examples: Do not use. ''' # First add the name, get bac all cuis that link to this name all_cuis = self._add_name(cui, source_val, is_pref_name, only_new=only_new, desc=desc, tui=tui) # Now add context if text is present if (text is not None and (source_val in text or text_inds)) or \ (spacy_doc is not None and (text_inds or tkn_inds)): if spacy_doc is None: spacy_doc = self(text) if tkn_inds is None: tkn_inds = tkn_inds_from_doc(spacy_doc=spacy_doc, text_inds=text_inds, source_val=source_val) if tkn_inds is not None and len(tkn_inds) > 0: self.add_concept_cntx(cui, text, tkn_inds, spacy_doc=spacy_doc, lr=lr, anneal=anneal, negative=negative) if manually_created: all_cuis.remove(cui) for _cui in all_cuis: self.add_concept_cntx(_cui, text, tkn_inds, spacy_doc=spacy_doc, lr=lr, anneal=anneal, negative=True) def _print_stats(self, data, epoch=0, use_filters=False, use_overlaps=False, use_cui_doc_limit=False, use_groups=False): r''' Print metrics on a dataset (F1, P, R), it will also print the concepts that have the most FP,FN,TP. Args: data (list of dict): The json object that we get from MedCATtrainer on export. epoch (int): Used during training, so we know what epoch is it. use_filters (boolean): Each project in medcattrainer can have filters, do we want to respect those filters when calculating metrics. use_overlaps (boolean): Allow overlapping entites, nearly always False as it is very difficult to annotate overlapping entites. use_cui_doc_limit (boolean): If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words if the document was annotated for that CUI. Useful in very specific situations when during the annotation process the set of CUIs changed. use_groups (boolean): If True concepts that have groups will be combined and stats will be reported on groups. Returns: fps (dict): False positives for each CUI fns (dict): False negatives for each CUI tps (dict): True positives for each CUI cui_prec (dict): Precision for each CUI cui_rec (dict): Recall for each CUI cui_f1 (dict): F1 for each CUI cui_counts (dict): Number of occurrence for each CUI ''' tp = 0 fp = 0 fn = 0 fps = {} fns = {} tps = {} cui_prec = {} cui_rec = {} cui_f1 = {} cui_counts = {} examples = {'fp': {}, 'fn': {}, 'tp': {}} fp_docs = set() fn_docs = set() if self.spacy_cat.TUI_FILTER is None: _tui_filter = None else: _tui_filter = list(self.spacy_cat.TUI_FILTER) if self.spacy_cat.CUI_FILTER is None: _cui_filter = None else: _cui_filter = list(self.spacy_cat.CUI_FILTER) for pind, project in tqdm(enumerate(data['projects']), desc="Stats project", total=len(data['projects']), leave=False): cui_filter = None tui_filter = None if use_filters: if 'cuis' in project and len(project['cuis'].strip()) > 0: cui_filter = set([x.strip() for x in project['cuis'].split(",")]) if 'tuis' in project and len(project['tuis'].strip()) > 0: tui_filter = set([x.strip().upper() for x in project['tuis'].split(",")]) self.spacy_cat.TUI_FILTER = tui_filter self.spacy_cat.CUI_FILTER = cui_filter start_time = time.time() for dind, doc in tqdm(enumerate(project['documents']), desc='Stats document', total=len(project['documents']), leave=False): spacy_doc = self(doc['text']) anns = doc['annotations'] if use_overlaps: p_anns = spacy_doc._.ents else: p_anns = spacy_doc.ents anns_norm = [] anns_norm_neg = [] anns_examples = [] anns_norm_cui = [] for ann in anns: if (cui_filter is None and tui_filter is None) or (cui_filter is not None and ann['cui'] in cui_filter) or \ (tui_filter is not None and self.cdb.cui2tui.get(ann['cui'], 'unk') in tui_filter): cui = ann['cui'] if use_groups: cui = self.cdb.cui2info.get(cui, {}).get("group", cui) if ann.get('validated', True) and (not ann.get('killed', False) and not ann.get('deleted', False)): anns_norm.append((ann['start'], cui)) anns_examples.append({"text": doc['text'][max(0, ann['start']-60):ann['end']+60], "cui": cui, "source value": ann['value'], "acc": 1, "project index": pind, "document inedex": dind}) elif ann.get('validated', True) and (ann.get('killed', False) or ann.get('deleted', False)): anns_norm_neg.append((ann['start'], cui)) if ann.get("validated", True): # This is used to test was someone annotating for this CUI in this document anns_norm_cui.append(cui) cui_counts[cui] = cui_counts.get(cui, 0) + 1 p_anns_norm = [] p_anns_examples = [] for ann in p_anns: cui = ann._.cui if use_groups: cui = self.cdb.cui2info.get(cui, {}).get("group", cui) p_anns_norm.append((ann.start_char, cui)) p_anns_examples.append({"text": doc['text'][max(0, ann.start_char-60):ann.end_char+60], "cui": cui, "source value": ann.text, "acc": float(ann._.acc), "project index": pind, "document inedex": dind}) for iann, ann in enumerate(p_anns_norm): if not use_cui_doc_limit or ann[1] in anns_norm_cui: cui = ann[1] if ann in anns_norm: tp += 1 tps[cui] = tps.get(cui, 0) + 1 example = p_anns_examples[iann] examples['tp'][cui] = examples['tp'].get(cui, []) + [example] else: fp += 1 fps[cui] = fps.get(cui, 0) + 1 fp_docs.add(doc['name']) # Add example for this FP prediction example = p_anns_examples[iann] if ann in anns_norm_neg: # Means that it really was annotated as negative example['real_fp'] = True examples['fp'][cui] = examples['fp'].get(cui, []) + [example] for iann, ann in enumerate(anns_norm): if ann not in p_anns_norm: cui = ann[1] fn += 1 fn_docs.add(doc['name']) fns[cui] = fns.get(cui, 0) + 1 examples['fn'][cui] = examples['fn'].get(cui, []) + [anns_examples[iann]] try: prec = tp / (tp + fp) rec = tp / (tp + fn) f1 = (prec + rec) / 2 print("Epoch: {}, Prec: {}, Rec: {}, F1: {}\n".format(epoch, prec, rec, f1)) print("Docs with false positives: {}\n".format("; ".join([str(x) for x in list(fp_docs)[0:10]]))) print("Docs with false negatives: {}\n".format("; ".join([str(x) for x in list(fn_docs)[0:10]]))) # Sort fns & prec fps = {k: v for k, v in sorted(fps.items(), key=lambda item: item[1], reverse=True)} fns = {k: v for k, v in sorted(fns.items(), key=lambda item: item[1], reverse=True)} tps = {k: v for k, v in sorted(tps.items(), key=lambda item: item[1], reverse=True)} # F1 per concept for cui in tps.keys(): prec = tps[cui] / (tps.get(cui, 0) + fps.get(cui, 0)) rec = tps[cui] / (tps.get(cui, 0) + fns.get(cui, 0)) f1 = (prec + rec) / 2 cui_prec[cui] = prec cui_rec[cui] = rec cui_f1[cui] = f1 # Get top 10 pr_fps = [(self.cdb.cui2pretty_name.get(cui, list(self.cdb.cui2original_names.get(cui, [cui]))[0]), cui, fps[cui]) for cui in list(fps.keys())[0:10]] pr_fns = [(self.cdb.cui2pretty_name.get(cui, list(self.cdb.cui2original_names.get(cui, [cui]))[0]), cui, fns[cui]) for cui in list(fns.keys())[0:10]] pr_tps = [(self.cdb.cui2pretty_name.get(cui, list(self.cdb.cui2original_names.get(cui, [cui]))[0]), cui, tps[cui]) for cui in list(tps.keys())[0:10]] print("\n\nFalse Positives\n") for one in pr_fps: print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2])) print("\n\nFalse Negatives\n") for one in pr_fns: print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2])) print("\n\nTrue Positives\n") for one in pr_tps: print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2])) print("*"*110 + "\n") except Exception as e: traceback.print_exc() self.spacy_cat.TUI_FILTER = _tui_filter self.spacy_cat.CUI_FILTER = _cui_filter return fps, fns, tps, cui_prec, cui_rec, cui_f1, cui_counts, examples def train_supervised(self, data_path, reset_cdb=False, reset_cui_count=False, nepochs=30, lr=None, anneal=None, print_stats=True, use_filters=False, terminate_last=False, use_overlaps=False, use_cui_doc_limit=False, test_size=0, force_manually_created=False, use_groups=False, never_terminate=False): r''' Run supervised training on a dataset from MedCATtrainer. Please take care that this is more a simiulated online training then supervised. Args: data_path (str): The path to the json file that we get from MedCATtrainer on export. reset_cdb (boolean): This will remove all concepts from the existing CDB and build a new CDB based on the concepts that appear in the training data. It will be impossible to get back the removed concepts. reset_cui_count (boolean): Used for training with weight_decay (annealing). Each concept has a count that is there from the beginning of the CDB, that count is used for annealing. Resetting the count will significantly incrase the training impact. This will reset the count only for concepts that exist in the the training data. nepochs (int): Number of epochs for which to run the training. lr (int): If set it will overwrite the global LR from config. anneal (boolean): If true annealing will be used when training. print_stats (boolean): If true stats will be printed during training (prints stats every 5 epochs). use_filters (boolean): Each project in medcattrainer can have filters, do we want to respect those filters when calculating metrics. terminate_last (boolean): If true, concept termination will be done after all training. use_overlaps (boolean): Allow overlapping entites, nearly always False as it is very difficult to annotate overlapping entites. use_cui_doc_limit (boolean): If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words if the document was annotated for that CUI. Useful in very specific situations when during the annotation process the set of CUIs changed. test_size (float): If > 0 the data set will be split into train test based on this ration. Should be between 0 and 1. Usually 0.1 is fine. force_manually_created (float): Check add_name for more details, if true all concepts in the dataset will be treated as manually created. use_groups (boolean): If True concepts that have groups will be combined and stats will be reported on groups. never_terminate (boolean): If True no termination will be applied Returns: fp (dict): False positives for each CUI fn (dict): False negatives for each CUI tp (dict): True positives for each CUI p (dict): Precision for each CUI r (dict): Recall for each CUI f1 (dict): F1 for each CUI cui_counts (dict): Number of occurrence for each CUI examples (dict): FP/FN examples of sentences for each CUI ''' fp = fn = tp = p = r = f1 = cui_counts = examples = {} self.train = False data = json.load(open(data_path)) cui_counts = {} if test_size == 0: test_set = data train_set = data else: train_set, test_set, _, _ = make_mc_train_test(data, self.cdb, test_size=test_size) if print_stats: self._print_stats(test_set, use_filters=use_filters, use_cui_doc_limit=use_cui_doc_limit, use_overlaps=use_overlaps, use_groups=use_groups) if reset_cdb: self.cdb = CDB() self.spacy_cat.cdb = self.cdb self.spacy_cat.cat_ann.cdb = self.cdb if reset_cui_count: # Get all CUIs cuis = [] for project in train_set['projects']: for doc in project['documents']: for ann in doc['annotations']: cuis.append(ann['cui']) for cui in set(cuis): if cui in self.cdb.cui_count: self.cdb.cui_count[cui] = 10 # Remove entites that were terminated if not never_terminate: for project in train_set['projects']: for doc in project['documents']: for ann in doc['annotations']: if ann.get('killed', False): self.unlink_concept_name(ann['cui'], ann['value']) for epoch in tqdm(range(nepochs), desc='Epoch', leave=False): # Print acc before training for project in tqdm(train_set['projects'], desc='Project', leave=False, total=len(train_set['projects'])): for i_doc, doc in tqdm(enumerate(project['documents']), desc='Document', leave=False, total=len(project['documents'])): spacy_doc = self(doc['text']) for ann in doc['annotations']: if not ann.get('killed', False): cui = ann['cui'] start = ann['start'] end = ann['end'] deleted = ann.get('deleted', False) manually_created = False if force_manually_created or ann.get('manually_created', False) or ann.get('alternative', False): manually_created = True self.add_name(cui=cui, source_val=ann['value'], spacy_doc=spacy_doc, text_inds=[start, end], negative=deleted, lr=lr, anneal=anneal, manually_created=manually_created) if terminate_last and not never_terminate: # Remove entites that were terminated, but after all training is done for project in train_set['projects']: for doc in project['documents']: for ann in doc['annotations']: if ann.get('killed', False): self.unlink_concept_name(ann['cui'], ann['value']) if epoch % 5 == 0: if print_stats: fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set, epoch=epoch+1, use_filters=use_filters, use_cui_doc_limit=use_cui_doc_limit, use_overlaps=use_overlaps, use_groups=use_groups) return fp, fn, tp, p, r, f1, cui_counts, examples @property def train(self): return self.spacy_cat.train @property def spacy_nlp(self): """ Returns the spacy nlp object utilized by cat""" return self.nlp.nlp @train.setter def train(self, val): self.spacy_cat.train = val def run_training(self, data_iterator, fine_tune=False): """ Runs training on the data data_iterator: Simple iterator over sentences/documents, e.g. a open file or an array or anything else that we can use in a for loop. fine_tune: If False old training will be removed """ self.train = True cnt = 0 if not fine_tune: print("Removing old training data!\n") self.cdb.reset_training() self.cdb.coo_dict = {} self.spacy_cat._train_skip_names = {} for line in data_iterator: if line is not None: try: _ = self(line) except Exception as e: print("LINE: '{}' \t WAS SKIPPED".format(line)) print("BECAUSE OF: " + str(e)) if cnt % 1000 == 0: print("DONE: " + str(cnt)) cnt += 1 self.train = False def get_entities(self, text, cat_filter=None, only_cui=False, skip_info=False): """ Get entities text: text to be annotated return: entities """ doc = self(text) out = [] if cat_filter: cat_filter(doc, self) out_ent = {} # if self.config.get('nested_entities', False): # _ents = doc._.ents # else: # _ents = doc.ents _tags = doc._.tags for ind, tag in enumerate(_tags): cui = str(tag['cui']) if not only_cui: out_ent['pretty_name'] = self.cdb.cui2pretty_name.get(cui, '') out_ent['cui'] = cui out_ent['tui'] = str(tag['tui']) out_ent['type'] = str(self.cdb.tui2name.get(out_ent['tui'], '')) out_ent['source_value'] = str(doc[tag['start']:tag['end']].text) out_ent['acc'] = str(tag['acc']) out_ent['start'] = doc[tag['start']:tag['end']].start_char out_ent['end'] = doc[tag['start']:tag['end']].end_char if not skip_info: out_ent['info'] = self.cdb.cui2info.get(cui, {}) out_ent['id'] = str(tag['id']) out_ent['meta_anns'] = {} if 'meta_anns' in tag: for key in tag['meta_anns'].keys(): one = {'name': key, 'value': tag['meta_anns'][key]} out_ent['meta_anns'][key] = one out.append(dict(out_ent)) else: out.append(cui) return out def get_json(self, text, cat_filter=None, only_cui=False, skip_info=False): """ Get output in json format text: text to be annotated return: json with fields {'entities': <>, 'text': text} """ ents = self.get_entities(text, cat_filter, only_cui, skip_info=skip_info) out = {'entities': ents, 'text': text} return json.dumps(out) def multi_processing(self, in_data, nproc=8, batch_size=100, cat_filter=None, only_cui=False, skip_info=False): """ Run multiprocessing NOT FOR TRAINING in_data: an iterator or array with format: [(id, text), (id, text), ...] nproc: number of processors batch_size: obvious return: an list of tuples: [(id, doc_json), (id, doc_json), ...] """ if self._meta_annotations: # Hack for torch using multithreading, which is not good here import torch torch.set_num_threads(1) # Create the input output for MP in_q = Queue(maxsize=4*nproc) manager = Manager() out_dict = manager.dict() out_dict['processed'] = [] # Create processes procs = [] for i in range(nproc): p = Process(target=self._mp_cons, args=(in_q, out_dict, i, cat_filter, only_cui, skip_info)) p.start() procs.append(p) data = [] for id, text in in_data: data.append((id, text)) if len(data) == batch_size: in_q.put(data) data = [] # Put the last batch if it exists if len(data) > 0: in_q.put(data) for _ in range(nproc): # tell workers we're done in_q.put(None) for p in procs: p.join() # Close the queue as it can cause memory leaks in_q.close() out = [] for key in out_dict.keys(): if 'pid' in key: data = out_dict[key] out.extend(data) # Sometimes necessary to free memory out_dict.clear() del out_dict return out def _mp_cons(self, in_q, out_dict, pid=0, cat_filter=None, only_cui=False, skip_info=False): cnt = 0 out = [] while True: if not in_q.empty(): data = in_q.get() if data is None: out_dict['pid: {}'.format(pid)] = out break for id, text in data: try: doc = json.loads(self.get_json(text, cat_filter, only_cui, skip_info)) out.append((id, doc)) except Exception as e: print("Exception in _mp_cons") print(e) sleep(1) def add_cui_to_group(self, cui, group_name, reset_all_groups=False): r''' Ads a CUI to a group, will appear in cdb.cui2info['group'] Args: cui (str): The concept to be added group_name (str): The group to whcih the concept will be added reset_all_groups (boolean): If True it will reset all existing groups and remove them. Examples: >>> cat.add_cui_to_group("S-17", 'pain') ''' # Reset if needed if reset_all_groups: for _cui in self.cdb.cui2info.keys(): _ = self.cdb.cui2info[_cui].pop('group', None) # Add if cui in self.cdb.cui2info: self.cdb.cui2info[cui]['group'] = group_name else: self.cdb.cui2info[cui] = {'group': group_name}
packetQueue.py
import time,threading from queue import Queue from mirage.libs.utils import exitMirage class StoppableThread(threading.Thread): ''' This class is just a simplistic implementation of a stoppable thread. The target parameter allows to provide a specific function to run continuously in background. If the stop method is called, the thread is interrupted. ''' def __init__(self,target=None): super().__init__(target=target) self.daemon = True self.signal = True def run(self): try: while self.signal: self._target(*(self._args)) except (KeyboardInterrupt,EOFError): pass def stop(self): ''' This method stops the thread. ''' self.signal = False class PacketQueue: ''' This class implements a Packet (``mirage.libs.wireless_utils.packets.Packet``) queue, and provides an API to manipulate it. The Emitter class (``mirage.libs.wireless.Emitter``) and the Receiver class (``mirage.libs.wireless.Receiver``) inherit from it. The private method _task implements a watchdog, allowing to put or get some packets in the queue and manipulate them. This watchdog is called continuously thanks to a Stoppable Thread (``mirage.libs.wireless_utils.packetQueue.StoppableThread``). Some parameters may be passed to the constructor : * waitEmpty : it indicates if the queue should wait for an empty queue before stopping * autoStart : it indicates if the queue shoud start immediatly after the instanciation of the class ''' def __init__(self, waitEmpty = False, autoStart = True): self.waitEmpty = waitEmpty self.autoStart = autoStart self.queue = Queue() self.isStarted = False if self.isDeviceUp(): self.device.subscribe(self) self.daemonThread = None if autoStart: self.start() def isDeviceUp(self): ''' This method allow to check if the Device (``mirage.libs.wireless_utils.device.Device``) linked to this Packet Queue is up and running. ''' return hasattr(self,"device") and self.device is not None and self.device.isUp() def _createDaemonThread(self): self.daemonThread = StoppableThread(target = self._task) ''' def __del__(self): self.stop() ''' def start(self): ''' This method starts the associated stoppable thread in order to continuously call the watchdog function (_task). ''' if self.daemonThread is None: self._createDaemonThread() if not self.isStarted: self.daemonThread.start() self.isStarted = True def stop(self): ''' This method stops the associated stoppable thread. ''' if hasattr(self,"isStarted") and self.isStarted: if self.waitEmpty: while not self.isEmpty(): time.sleep(0.05) # necessary ? self.daemonThread.stop() self.daemonThread = None self.isStarted = False def restart(self): ''' This method restarts the associated stoppable thread. ''' self.stop() self.start() def isBusy(self): ''' This method indicates if the queue contains some datas. :return: boolean indicating if the queue contains some datas :rtype: bool ''' return not self.isEmpty() def isEmpty(self): ''' This method indicates if the queue is empty. :return: boolean indicating if the queue is empty :rtype: bool ''' return self.queue.empty() def _task(self): pass def __getattr__(self, name): if (name != "device" and hasattr(self.device, name) and (name in self.device.__class__.sharedMethods or name == "hasCapabilities")): return getattr(self.device,name) else: raise AttributeError
vec_env.py
import redis import time import subprocess from multiprocessing import Process, Pipe def start_redis(): print('Starting Redis') subprocess.Popen(['redis-server', '--save', '\"\"', '--appendonly', 'no']) time.sleep(1) def start_openie(install_path): print('Starting OpenIE from', install_path) subprocess.Popen(['java', '-mx8g', '-cp', '*', \ 'edu.stanford.nlp.pipeline.StanfordCoreNLPServer', \ '-port', '9001', '-timeout', '15000', '-quiet'], cwd=install_path) time.sleep(1) def worker(remote, parent_remote, env): parent_remote.close() env.create() try: done = False while True: cmd, data = remote.recv() if cmd == 'step': if done: ob, info, graph_info = env.reset() rew = 0 done = False else: ob, rew, done, info, graph_info = env.step(data) remote.send((ob, rew, done, info, graph_info)) elif cmd == 'reset': ob, info, graph_info = env.reset() remote.send((ob, info, graph_info)) elif cmd == 'close': env.close() break else: raise NotImplementedError except KeyboardInterrupt: print('SubprocVecEnv worker: got KeyboardInterrupt') finally: env.close() class VecEnv: def __init__(self, num_envs, env, openie_path): start_redis() start_openie(openie_path) self.conn_valid = redis.Redis(host='localhost', port=6379, db=0) self.closed = False self.total_steps = 0 self.num_envs = num_envs self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(num_envs)]) self.ps = [Process(target=worker, args=(work_remote, remote, env)) for (work_remote, remote) in zip(self.work_remotes, self.remotes)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() def step(self, actions): if self.total_steps % 1024 == 0: self.conn_valid.flushdb() self.total_steps += 1 self._assert_not_closed() assert len(actions) == self.num_envs, "Error: incorrect number of actions." for remote, action in zip(self.remotes, actions): remote.send(('step', action)) results = [remote.recv() for remote in self.remotes] self.waiting = False return zip(*results) def reset(self): self._assert_not_closed() for remote in self.remotes: remote.send(('reset', None)) results = [remote.recv() for remote in self.remotes] return zip(*results) def close_extras(self): self.closed = True for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() def _assert_not_closed(self): assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
process.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import tempfile import subprocess import tensorflow as tf import numpy as np import tfimage as im import threading import time import multiprocessing edge_pool = None parser = argparse.ArgumentParser() parser.add_argument("--input_dir", required=True, help="path to folder containing images") parser.add_argument("--output_dir", required=True, help="output path") parser.add_argument("--operation", required=True, choices=["grayscale", "resize", "blank", "combine", "edges"]) parser.add_argument("--workers", type=int, default=1, help="number of workers") # resize parser.add_argument("--pad", action="store_true", help="pad instead of crop for resize operation") parser.add_argument("--size", type=int, default=256, help="size to use for resize operation") # combine parser.add_argument("--b_dir", type=str, help="path to folder containing B images for combine operation") a = parser.parse_args() def resize(src): height, width, _ = src.shape dst = src if height != width: if a.pad: size = max(height, width) # pad to correct ratio oh = (size - height) // 2 ow = (size - width) // 2 dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size) else: # crop to correct ratio size = min(height, width) oh = (height - size) // 2 ow = (width - size) // 2 dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size) assert(dst.shape[0] == dst.shape[1]) size, _, _ = dst.shape if size > a.size: dst = im.downscale(images=dst, size=[a.size, a.size]) elif size < a.size: dst = im.upscale(images=dst, size=[a.size, a.size]) return dst def blank(src): height, width, _ = src.shape if height != width: raise Exception("non-square image") image_size = width size = int(image_size * 0.3) offset = int(image_size / 2 - size / 2) dst = src dst[offset:offset + size,offset:offset + size,:] = np.ones([size, size, 3]) return dst def combine(src, src_path): if a.b_dir is None: raise Exception("missing b_dir") # find corresponding file in b_dir, could have a different extension basename, _ = os.path.splitext(os.path.basename(src_path)) for ext in [".png", ".jpg"]: sibling_path = os.path.join(a.b_dir, basename + ext) if os.path.exists(sibling_path): sibling = im.load(sibling_path) break else: raise Exception("could not find sibling image for " + src_path) # make sure that dimensions are correct height, width, _ = src.shape if height != sibling.shape[0] or width != sibling.shape[1]: raise Exception("differing sizes") # convert both images to RGB if necessary if src.shape[2] == 1: src = im.grayscale_to_rgb(images=src) if sibling.shape[2] == 1: sibling = im.grayscale_to_rgb(images=sibling) # remove alpha channel if src.shape[2] == 4: src = src[:,:,:3] if sibling.shape[2] == 4: sibling = sibling[:,:,:3] return np.concatenate([src, sibling], axis=1) def grayscale(src): return im.grayscale_to_rgb(images=im.rgb_to_grayscale(images=src)) net = None def run_caffe(src): # lazy load caffe and create net global net if net is None: # don't require caffe unless we are doing edge detection os.environ["GLOG_minloglevel"] = "2" # disable logging from caffe import caffe # using this requires using the docker image or assembling a bunch of dependencies # and then changing these hardcoded paths net = caffe.Net("/opt/caffe/examples/hed/deploy.prototxt", "/opt/caffe/hed_pretrained_bsds.caffemodel", caffe.TEST) net.blobs["data"].reshape(1, *src.shape) net.blobs["data"].data[...] = src net.forward() return net.blobs["sigmoid-fuse"].data[0][0,:,:] def edges(src): # based on https://github.com/phillipi/pix2pix/blob/master/scripts/edges/batch_hed.py # and https://github.com/phillipi/pix2pix/blob/master/scripts/edges/PostprocessHED.m import scipy.io src = src * 255 border = 128 # put a padding around images since edge detection seems to detect edge of image src = src[:,:,:3] # remove alpha channel if present src = np.pad(src, ((border, border), (border, border), (0,0)), "reflect") src = src[:,:,::-1] src -= np.array((104.00698793,116.66876762,122.67891434)) src = src.transpose((2, 0, 1)) # [height, width, channels] => [batch, channel, height, width] fuse = edge_pool.apply(run_caffe, [src]) fuse = fuse[border:-border, border:-border] with tempfile.NamedTemporaryFile(suffix=".png") as png_file, tempfile.NamedTemporaryFile(suffix=".mat") as mat_file: scipy.io.savemat(mat_file.name, {"input": fuse}) octave_code = r""" E = 1-load(input_path).input; E = imresize(E, [image_width,image_width]); E = 1 - E; E = single(E); [Ox, Oy] = gradient(convTri(E, 4), 1); [Oxx, ~] = gradient(Ox, 1); [Oxy, Oyy] = gradient(Oy, 1); O = mod(atan(Oyy .* sign(-Oxy) ./ (Oxx + 1e-5)), pi); E = edgesNmsMex(E, O, 1, 5, 1.01, 1); E = double(E >= max(eps, threshold)); E = bwmorph(E, 'thin', inf); E = bwareaopen(E, small_edge); E = 1 - E; E = uint8(E * 255); imwrite(E, output_path); """ config = dict( input_path="'%s'" % mat_file.name, output_path="'%s'" % png_file.name, image_width=256, threshold=25.0/255.0, small_edge=5, ) args = ["octave"] for k, v in config.items(): args.extend(["--eval", "%s=%s;" % (k, v)]) args.extend(["--eval", octave_code]) try: subprocess.check_output(args, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print("octave failed") print("returncode:", e.returncode) print("output:", e.output) raise return im.load(png_file.name) def process(src_path, dst_path): src = im.load(src_path) if a.operation == "grayscale": dst = grayscale(src) elif a.operation == "resize": dst = resize(src) elif a.operation == "blank": dst = blank(src) elif a.operation == "combine": dst = combine(src, src_path) elif a.operation == "edges": dst = edges(src) else: raise Exception("invalid operation") im.save(dst, dst_path) complete_lock = threading.Lock() start = None num_complete = 0 total = 0 def complete(): global num_complete, rate, last_complete with complete_lock: num_complete += 1 now = time.time() elapsed = now - start rate = num_complete / elapsed if rate > 0: remaining = (total - num_complete) / rate else: remaining = 0 print("%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60)) last_complete = now def main(): if not os.path.exists(a.output_dir): os.makedirs(a.output_dir) src_paths = [] dst_paths = [] skipped = 0 for src_path in im.find(a.input_dir): name, _ = os.path.splitext(os.path.basename(src_path)) dst_path = os.path.join(a.output_dir, name + ".png") if os.path.exists(dst_path): skipped += 1 else: src_paths.append(src_path) dst_paths.append(dst_path) print("skipping %d files that already exist" % skipped) global total total = len(src_paths) print("processing %d files" % total) global start start = time.time() if a.operation == "edges": # use a multiprocessing pool for this operation so it can use multiple CPUs # create the pool before we launch processing threads global edge_pool edge_pool = multiprocessing.Pool(a.workers) if a.workers == 1: with tf.compat.v1.Session() as sess: for src_path, dst_path in zip(src_paths, dst_paths): process(src_path, dst_path) complete() else: queue = tf.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1) dequeue_op = queue.dequeue() def worker(coord): with sess.as_default(): while not coord.should_stop(): try: src_path, dst_path = sess.run(dequeue_op) except tf.errors.OutOfRangeError: coord.request_stop() break process(src_path, dst_path) complete() # init epoch counter for the queue local_init_op = tf.local_variables_initializer() with tf.compat.v1.Session() as sess: sess.run(local_init_op) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) for i in range(a.workers): t = threading.Thread(target=worker, args=(coord,)) t.start() threads.append(t) try: coord.join(threads) except KeyboardInterrupt: coord.request_stop() coord.join(threads) main()
test_integration_using_router.py
import json import time from http import HTTPStatus from threading import Thread from typing import Union, List from unittest import TestCase from uuid import uuid4 import requests from requests.auth import HTTPBasicAuth from openbrokerapi import api, errors from openbrokerapi.catalog import ServicePlan from openbrokerapi.service_broker import ( ServiceBroker, Service, ProvisionDetails, ProvisionedServiceSpec, ProvisionState, DeprovisionDetails, DeprovisionServiceSpec, BindDetails, Binding, BindState, UnbindDetails, UnbindSpec, ) class FullRouterTestCase(TestCase): def setUp(self) -> None: broker_username = str(uuid4()) broker_passsword = str(uuid4()) self.request_ads = { "auth": HTTPBasicAuth(broker_username, broker_passsword), "headers": { "X-Broker-Api-Version": "2.15", "Content-Type": "application/json", }, } self.broker_1 = InMemoryBroker(str(uuid4()), str(uuid4())) self.broker_2 = InMemoryBroker(str(uuid4()), str(uuid4())) def run_server(): api.serve_multiple( [self.broker_1, self.broker_2], api.BrokerCredentials(broker_username, broker_passsword), host="127.0.0.1", port=5002, debug=True, ) self.server = Thread(target=run_server) self.server.setDaemon(True) self.server.start() time.sleep(2) def test_lifecycle(self): # GIVEN org_guid = str(uuid4()) space_guid = str(uuid4()) instace_guid = str(uuid4()) binding_guid = str(uuid4()) # CATALOG self.check_catalog(self.broker_1.service_guid, self.broker_1.plan_guid) self.check_catalog(self.broker_2.service_guid, self.broker_2.plan_guid) # PROVISION self.check_provision( instace_guid, org_guid, space_guid, self.broker_1.service_guid, self.broker_1.plan_guid, ) # BIND self.check_bind(binding_guid, instace_guid) # UNBIND self.check_unbind(binding_guid, instace_guid) # DEPROVISION self.check_deprovision(instace_guid) # DEPROVISION TWICE self.check_deprovision_after_deprovision_done(instace_guid) def check_instance_retrievable(self, instace_guid): response = requests.get( "http://localhost:5002/v2/service_instances/{}".format(instace_guid), **self.request_ads ) self.assertEqual(HTTPStatus.OK, response.status_code) self.assertEqual(self.broker_1.service_guid, response.json()["service_id"]) self.assertEqual(self.broker_1.plan_guid, response.json()["plan_id"]) def check_unbind(self, binding_guid, instace_guid): response = requests.delete( "http://localhost:5002/v2/service_instances/{}/service_bindings/{}".format( instace_guid, binding_guid ), params={ "service_id": self.broker_1.service_guid, "plan_id": self.broker_1.plan_guid, "accepts_incomplete": "false", }, **self.request_ads ) self.assertEqual(HTTPStatus.OK, response.status_code) def check_bind(self, binding_guid, instace_guid): response = requests.put( "http://localhost:5002/v2/service_instances/{}/service_bindings/{}?accepts_incomplete=false".format( instace_guid, binding_guid ), data=json.dumps( { "service_id": self.broker_1.service_guid, "plan_id": self.broker_1.plan_guid, } ), **self.request_ads ) self.assertEqual(HTTPStatus.CREATED, response.status_code) def check_deprovision_after_deprovision_done(self, instace_guid): response = requests.delete( "http://localhost:5002/v2/service_instances/{}".format(instace_guid), params={ "service_id": self.broker_1.service_guid, "plan_id": self.broker_1.plan_guid, "accepts_incomplete": "false", }, **self.request_ads ) self.assertEqual(HTTPStatus.GONE, response.status_code) def check_deprovision(self, instace_guid): response = requests.delete( "http://localhost:5002/v2/service_instances/{}".format(instace_guid), params={ "service_id": self.broker_1.service_guid, "plan_id": self.broker_1.plan_guid, "accepts_incomplete": "false", }, **self.request_ads ) self.assertEqual(HTTPStatus.OK, response.status_code) def check_provision( self, instace_guid, org_guid, space_guid, service_guid, plan_guid ): response = requests.put( "http://localhost:5002/v2/service_instances/{}?accepts_incomplete=false".format( instace_guid ), data=json.dumps( { "organization_guid": org_guid, "space_guid": space_guid, "service_id": service_guid, "plan_id": plan_guid, # "context": { # "organization_guid": "org-guid-here", # "space_guid": "space-guid-here", # } } ), **self.request_ads ) self.assertEqual(HTTPStatus.CREATED, response.status_code) def check_catalog(self, service_guid, plan_guid): response = requests.get("http://localhost:5002/v2/catalog", **self.request_ads) catalog = response.json() self.assertEqual(HTTPStatus.OK, response.status_code) # find service for service in catalog["services"]: if service["id"] == service_guid: break else: service = None self.assertIsNotNone(service) self.assertFalse(service.get("instances_retrievable")) self.assertFalse(service.get("bindings_retrievable")) # find plan for plan in service["plans"]: if plan["name"] == "standard": break else: plan = None self.assertIsNotNone(plan) self.assertEqual(plan_guid, plan.get("id")) class InMemoryBroker(ServiceBroker): CREATED = "CREATED" BOUND = "BOUND" DELETING = "DELETING" def __init__(self, service_guid, plan_guid): self.service_guid = service_guid self.plan_guid = plan_guid self.service_instances = dict() def catalog(self) -> Union[Service, List[Service]]: return Service( id=self.service_guid, name="InMemService", description="InMemService", bindable=True, plans=[ ServicePlan( id=self.plan_guid, name="standard", description="standard plan", free=False, ) ], instances_retrievable=False, bindings_retrievable=False, ) def provision( self, instance_id: str, details: ProvisionDetails, async_allowed: bool, **kwargs ) -> ProvisionedServiceSpec: self.service_instances[instance_id] = { "provision_details": details, "state": self.CREATED, } return ProvisionedServiceSpec(state=ProvisionState.SUCCESSFUL_CREATED) def bind( self, instance_id: str, binding_id: str, details: BindDetails, async_allowed: bool, **kwargs ) -> Binding: instance = self.service_instances.get(instance_id, {}) if instance and instance.get("state") == self.CREATED: instance["state"] = self.BOUND return Binding(BindState.SUCCESSFUL_BOUND) def unbind( self, instance_id: str, binding_id: str, details: UnbindDetails, async_allowed: bool, **kwargs ) -> UnbindSpec: instance = self.service_instances.get(instance_id, {}) if instance and instance.get("state") == self.BOUND: instance["state"] = self.CREATED return UnbindSpec(False) def deprovision( self, instance_id: str, details: DeprovisionDetails, async_allowed: bool, **kwargs ) -> DeprovisionServiceSpec: instance = self.service_instances.get(instance_id) if instance is None: raise errors.ErrInstanceDoesNotExist() if instance.get("state") == self.CREATED: del self.service_instances[instance_id] return DeprovisionServiceSpec(False)
web_service_4.py
import cv2 from PIL import Image import argparse from pathlib import Path from multiprocessing import Process, Pipe,Value,Array import torch from config import get_config from mtcnn import MTCNN from Learner import face_learner from utils import load_facebank, draw_box_name, prepare_facebank ################################################################################### from flask import Flask, request, jsonify import datetime import time import base64 import numpy as np import pickle ############################################################## from threading import Thread class arcface_NN(): def __init__(self, mtcnn_ins, learner_ins): # starting NN initialize captures with None self.cap_01 = None # cv captures self.cap_02 = None self.cap_01_grab = None # cv grabs self.cap_02_grab = None self.cap_01_buffer = None self.cap_02_buffer = None self.mtcnn = mtcnn_ins self.learner = learner_ins self.grabs_loop_activated = False # flag var for loop def set_capturess(self, cap_01_address, cap_02_address): # TO DO # self.cap_01 = cv2.VideoCapture(cap_01_address) self.cap_02 = cv2.VideoCapture(cap_02_address) print("captures", self.cap_01.isOpened(), self.cap_02.isOpened()) # # # # # def grabs_loop(self): while self.grabs_loop_activated: self.cap_01_grab = self.cap_01.grab() self.cap_02_grab = self.cap_02.grab() def grabs_loop_start(self): self.grab_loop_activated = True self.grab_thread = Thread(target=self.grabs_loop, name="grabs_loop_thread") self.grab_thread.start() print("grab thread started") def grabs_loop_stop(self): self.grab_loop_activated = False self.grab_thread.join() print("grab thread joined") self.cap_01.release() self.cap_02.release() print("captures relised") def get_current_frame_nn_results(self): try: ret_01, self.cap_01_buffer = self.cap_01.retrieve(self.cap_01_grab) ret_02, self.cap_02_buffer = self.cap_02.retrieve(self.cap_02_grab) print("retrives done ", ret_01, ret_02) print(self.cap_01_buffer.shape) print(self.cap_02_buffer.shape) cv2.imshow("cap_01", self.cap_01_buffer) cv2.imshow("cap_02", self.cap_02_buffer) cam_01_ans = self.image_to_nn_data(self.cap_01_buffer) cam_02_ans = self.image_to_nn_data(self.cap_02_buffer) return {"chanel_01":cam_01_ans, "chanel_02":cam_02_ans} except: return {"status":"something goes wrong"} def image_to_nn_data(self, cv2_image): try: print("into image to NN") img = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB) im_pil = Image.fromarray(img) print("PIL done") bboxes, faces = self.mtcnn.align_multi(im_pil, conf.face_limit, conf.min_face_size) #print(bboxes, faces) bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1,-1,1,1] # personal choice print(bboxes) # start_time = time.time() start_time = time.time() results = self.learner.infer_embs(conf, faces, args.tta) results_np = results.cpu().detach().numpy() print("infer_time", time.time()-start_time) return {"status":"done", "coords":bboxes, "embs":results_np} except: print('detect error') return {"status":"detect error"} ############################################################## # # app = Flask(__name__) @app.route('/') def hello_world(): return "these aren't the droids you're looking for" @app.route('/start', methods=['POST']) def start(): if request.method == 'POST': arcface_ins.set_capturess(0, "http://77.243.103.105:8081/mjpg/video.mjpg") time.sleep(5) arcface_ins.grabs_loop_start() return jsonify({"message":"arcface initialized"}) @app.route('/get', methods=['GET']) def get_frame_data(): if request.method == 'GET': ret = arcface_ins.get_current_frame_nn_results() return jsonify(ret) if __name__ == '__main__': parser = argparse.ArgumentParser(description='for face verification') parser.add_argument('-th','--threshold',help='threshold to decide identical faces',default=1.00, type=float) parser.add_argument("-u", "--update", help="whether perform update the facebank",action="store_true") parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true") parser.add_argument("-c", "--score", help="whether show the confidence score",action="store_true") parser.add_argument("-p", "--port", help="port", default=5000, type=int) args = parser.parse_args() conf = get_config(False) mtcnn = MTCNN() print('mtcnn loaded') learner = face_learner(conf, True) learner.threshold = args.threshold if conf.device.type == 'cpu': learner.load_state(conf, 'cpu_final.pth', True, True) print("work on CPU") else: learner.load_state(conf, 'final.pth', True, True) print("work on GPU") learner.model.eval() print('learner loaded') arcface_ins = arcface_NN(mtcnn, learner) # if args.update: # targets, names = prepare_facebank(conf, learner.model, mtcnn, tta = args.tta) # print('facebank updated') # else: # targets, names = load_facebank(conf) # print('facebank loaded') # cap = cv2.VideoCapture(0) # cap.set(3,640) # cap.set(4,480) app.run(host="0.0.0.0", port=args.port)
system.test.py
# Copyright 2019 Canonical, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from rate_publishers import RatePublishers, TimeoutManager import os import unittest import sys import launch from launch.actions.execute_process import ExecuteProcess import launch_ros.actions import launch_testing import time import threading from rclpy.executors import MultiThreadedExecutor import rclpy from std_msgs.msg import Bool from geometry_msgs.msg import Twist sys.path.append(os.path.abspath(os.path.dirname(os.path.realpath(__file__)))) def generate_test_description(): # Necessary to get real-time stdout from python processes: proc_env = os.environ.copy() proc_env['PYTHONUNBUFFERED'] = '1' dir_path = os.path.dirname(os.path.realpath(__file__)) parameters_file = os.path.join( dir_path, 'system_config.yaml' ) twist_mux = launch_ros.actions.Node( package='twist_mux', executable='twist_mux', parameters=[parameters_file], env=proc_env) publisher = ExecuteProcess( cmd=['ros2 topic pub /lock_1 std_msgs/Bool "data: False" -r 20'], shell=True, env=proc_env ) # system_blackbox = launch_ros.actions.Node( # package='twist_mux', node_executable='system_blackbox.py', env=proc_env) return launch.LaunchDescription([ twist_mux, publisher, # system_blackbox, # Start tests right away - no need to wait for anything launch_testing.actions.ReadyToTest(), ]) def twist(x=0.0, r=0.0): """Return a Twist for the given linear and rotation speed.""" t = Twist() t.linear.x = x t.angular.z = r return t class TestTwistMux(unittest.TestCase): # Maximum time (in seconds) that it may take for a message # to be received by the target node. MESSAGE_TIMEOUT = 0.3 # Value (in seconds) >= the highest topic/lock timeout. TOPIC_TIMEOUT = 1.0 @classmethod def setUpClass(cls): cls.context = rclpy.Context() rclpy.init(context=cls.context) cls.node = rclpy.create_node( 'node', namespace='ns', context=cls.context) # Aim at emulating a 'wait_for_msg' cls._subscription = cls.node.create_subscription( Twist, 'cmd_vel_out', cls._cb, 1) cls._msg = None cls.executor = MultiThreadedExecutor( context=cls.context, num_threads=2) cls.executor.add_node(cls.node) cls._publishers = RatePublishers(cls.context) cls._vel1 = cls._publishers.add_topic('vel_1', Twist) cls._vel2 = cls._publishers.add_topic('vel_2', Twist) cls._vel3 = cls._publishers.add_topic('vel_3', Twist) cls._lock1 = cls._publishers.add_topic('lock_1', Bool) cls._lock2 = cls._publishers.add_topic('lock_2', Bool) cls.executor.add_node(cls._vel1) cls.executor.add_node(cls._vel2) cls.executor.add_node(cls._vel3) cls.executor.add_node(cls._lock1) cls.executor.add_node(cls._lock2) cls._timeout_manager = TimeoutManager() cls._timeout_manager.add(cls._publishers) cls._timeout_manager.spin_thread() cls.exec_thread = threading.Thread(target=cls.executor.spin) cls.exec_thread.start() def _cb(self, msg): self._msg = msg def _wait(self, timeout): start = self.node.get_clock().now() self._msg = None while (timeout > ((self.node.get_clock().now() - start).nanoseconds / 1e9)): if self._msg is not None: return self._msg time.sleep(0.01) return self._msg def tearDown(self): # Reset all topics. twist_msg = twist(0.0, 0.0) unlock = Bool() unlock.data = False self._vel1.pub(twist_msg) self._vel2.pub(twist_msg) self._vel3.pub(twist_msg) self._lock1.pub(unlock) self._lock2.pub(unlock) # Wait for previously published messages to time out, # since we aren't restarting twist_mux. # # This sleeping time must be higher than any of the # timeouts in system_test_config.yaml. # # TODO(artivis) use rate once available time.sleep(self.MESSAGE_TIMEOUT + self.TOPIC_TIMEOUT) self.node.destroy_node() rclpy.shutdown(context=self.context) @classmethod def _vel_cmd(cls): # TODO(artivis) use rate once available time.sleep(cls.MESSAGE_TIMEOUT) # TODO wait_for_msg-like functionnality not yet available # https://github.com/ros2/rclcpp/issues/520 return cls._wait(cls, cls.MESSAGE_TIMEOUT) def test_empty(self): try: self._vel_cmd() self.fail('twist_mux should not be publishing without any input') except Exception: e = sys.exc_info()[0] print(e) pass def test_basic(self): t = twist(2.0) self._vel1.pub(t, rate=5) self.assertEqual(t, self._vel_cmd()) # def test_basic_with_priorities(self): # t1 = twist(2.0) # t2 = twist(0.0, 1.0) # # Publish twist from input1 @ 3Hz, it should be used. # self._vel1.pub(t1, rate=5) # self.assertEqual(t1, self._vel_cmd()) # # Publish twist from input3, it should have priority # # over the one from input1. # # self._vel3.pub(t2, rate=10) # self.assertEqual(t2, self._vel_cmd()) # # Stop publishing input 3 and wait for it to timeout. # # Speed should fall back to input 1. # # self._vel3.stop() # time.sleep(0.5) # input is 0.3 in .yaml file # self.assertEqual(t1, self._vel_cmd()) @launch_testing.post_shutdown_test() class TestProcessOutput(unittest.TestCase): def test_exit_code(self): # Check that all processes in the launch exit with code 0 launch_testing.asserts.assertExitCodes(self.proc_info)
cathread_tests.py
"""This script tests using EPICS CA and Python threads together Based on code from Friedrich Schotte, NIH modified by Matt Newville 19-Apr-2010 modified MN, 22-April-2011 (1 year later!) to support new context-switching modes """ import time import epics import sys from threading import Thread from epics.ca import CAThread, withInitialContext from pvnames import updating_pvlist write = sys.stdout.write flush = sys.stdout.flush epics.ca.PREEMPTIVE_CALLBACK=True def wait_for_changes(pvnames, runtime, runname): """basic test procedure called by other tests """ def onChanges(pvname=None, value=None, char_value=None, **kw): write(' %s= %s (%s)\n' % (pvname, char_value, runname)) flush() t0 = time.time() pvs = [] for pvn in pvnames: p = epics.PV(pvn) p.get() p.add_callback(onChanges) pvs.append(p) while time.time()-t0 < runtime: try: time.sleep(0.01) except: sys.exit() for p in pvs: p.clear_callbacks() def test_initcontext(pvnames, runtime, run_name): write(' -> force inital ca context: thread=%s will run for %.3f sec\n' % (run_name, runtime)) epics.ca.use_initial_context() wait_for_changes(pvnames, runtime, run_name) write( 'Done with Thread %s\n' % ( run_name)) @withInitialContext def test_decorator(pvnames, runtime, run_name): write(' -> use withInitialContext decorator: thread=%s will run for %.3f sec\n' % (run_name, runtime)) wait_for_changes(pvnames, runtime, run_name) write( 'Done with Thread %s\n' % ( run_name)) def test_CAThread(pvnames, runtime, run_name): write(' -> used with CAThread: thread=%s will run for %.3f sec\n' % (run_name, runtime)) wait_for_changes(pvnames, runtime, run_name) write( 'Done with Thread %s\n' % ( run_name)) def run_threads(threadlist): for th in threadlist: th.start() time.sleep(0.01) for th in threadlist: th.join() time.sleep(0.01) # MAIN write("Connecting to PVs\n") pvs_b = [] names_b = [] for pvname in updating_pvlist: ###pvs_b.append(epics.PV(pvname)) # pvs_b.append(pvname) names_b.append(pvname) names_a = names_b[1:] pvs_a = pvs_b[1:] epics.ca.create_context() styles = ('decorator', 'init', 'cathread') style = styles[2] if style == 'init': write( 'Test use plain threading.Thread, force use of initial CA Context \n') th1 = Thread(target=test_initcontext, args=(names_a, 2, 'A')) th2 = Thread(target=test_initcontext, args=(names_b, 3, 'B')) run_threads((th1, th2)) elif style == 'decorator': write( 'Test use plain threading.Thread, withInitialContext decorator\n') th1 = Thread(target=test_decorator, args=(names_a, 3, 'A')) th2 = Thread(target=test_decorator, args=(names_b, 5, 'B')) run_threads((th1, th2)) elif style == 'cathread': write( 'Test use CAThread\n') th1 = CAThread(target=test_CAThread, args=(names_a, 3, 'A')) th2 = CAThread(target=test_CAThread, args=(names_b, 5, 'B')) run_threads((th1, th2)) write('Test Done\n---------------------\n')
main.py
import os import threading import pandas as pd from datetime import datetime from pm4py.algo.analysis.woflan import algorithm as woflan from pm4py.algo.evaluation.generalization import evaluator as generalization_evaluator from pm4py.algo.evaluation.precision import evaluator as precision_evaluator from pm4py.algo.evaluation.replay_fitness import evaluator as replay_fitness_evaluator from pm4py.algo.evaluation.simplicity import evaluator as simplicity_evaluator from pm4py.objects.log.importer.xes import importer as xes_importer from pm4py.objects.petri.importer import importer as pnml_importer log_path = "resource/log/pre-processed" petrinet_path = "resource/model" results_path = "resource/results" columns = ["name", "fitness", "precision", "f-score", "generalization", "simplicity", "time (sec)"] def import_log(log_name): return xes_importer.apply(os.path.join(log_path, log_name)) def import_petrinet(petrinet_name): return pnml_importer.apply(os.path.join(petrinet_path, petrinet_name)) def check_petrinet_approach_already_analyzed(process_name, petrinet_approach_name): dataframe_name = os.path.join(results_path, f"{process_name}.csv") create_csv_if_not_exists(dataframe_name) df = pd.read_csv(dataframe_name) if (df['name'] == petrinet_approach_name).any(): raise Exception(f"{petrinet_approach_name} in {process_name} already analyzed") def check_sound(petrinet_name, net, im, fm): is_sound = woflan.apply(net, im, fm, parameters={woflan.Parameters.RETURN_ASAP_WHEN_NOT_SOUND: True, woflan.Parameters.PRINT_DIAGNOSTICS: False, woflan.Parameters.RETURN_DIAGNOSTICS: False}) if not is_sound: raise Exception(f"{petrinet_name} is not sound") def calculate_fscore(fitness, precision): return 2 * (fitness * precision) / (fitness + precision) def calculate_metrics(petrinet_approach_name, log, net, im, fm): start_time = datetime.now() fitness = replay_fitness_evaluator.apply(log, net, im, fm, variant=replay_fitness_evaluator.Variants.ALIGNMENT_BASED)[ 'averageFitness'] precision = precision_evaluator.apply(log, net, im, fm, variant=precision_evaluator.Variants.ALIGN_ETCONFORMANCE) generalization = generalization_evaluator.apply(log, net, im, fm) simplicity = simplicity_evaluator.apply(net) fscore = calculate_fscore(fitness, precision) time = "{:.3f}".format((datetime.now() - start_time).total_seconds()) results = {"name": petrinet_approach_name, "fitness": fitness, "precision": precision, "f-score": fscore, "generalization": generalization, "simplicity": simplicity, "time (sec)": time} return results def create_csv_if_not_exists(dataframe_name): if not os.path.isfile(dataframe_name): df = pd.DataFrame(columns=columns) df.to_csv(dataframe_name, index=False) def save_results(process_name, results): dataframe_name = os.path.join(results_path, f"{process_name}.csv") create_csv_if_not_exists(dataframe_name) df = pd.read_csv(dataframe_name) df = df.append(results, ignore_index=True) df.to_csv(dataframe_name, columns=columns, index=False) def analyze_petrinet_approach(process_name, log, petrinet_approach_name): try: check_petrinet_approach_already_analyzed(process_name, petrinet_approach_name) petrinet_name = os.path.join(petrinet_approach_name, f"{process_name}.pnml") net, im, fm = import_petrinet(petrinet_name) check_sound(petrinet_name, net, im, fm) print(f"Start calculate metrics for approach {petrinet_approach_name} and process {process_name}") results = calculate_metrics(petrinet_approach_name, log, net, im, fm) save_results(process_name, results) except Exception as e: print(str(e)) def handle_log_analysis(process_name, log): for petrinet_approach_name in sorted(os.listdir(petrinet_path)): threading.Thread(target=analyze_petrinet_approach, args=(process_name, log, petrinet_approach_name)).start() def make_analysis(): for log_name in sorted(os.listdir(log_path)): print(f"Start log analysis {log_name}") process_name = log_name.split(os.extsep)[0] log = import_log(log_name) handle_log_analysis(process_name, log) make_analysis() ######### # Análise ######### # Fitness está dando igual # Precision (e f-score) está diferente # Está considerando mais elementos como unsound # Time é o tempo de execução dos testes com threads (não o tempo de descoberta) # Falta Sound em todos ETM
restarter.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Reload python flask server by function / API endpoint # References: # https://docs.python.org/3/library/multiprocessing.html # http://stackoverflow.com/questions/27723287/reload-python-flask-server-by-function import os import sys import time import subprocess from flask import Flask, request, jsonify from multiprocessing import Process, Queue some_queue = None app = Flask(__name__) #CORS(app) @app.after_request def after_request(response): response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Access-Control-Allow-Headers, Origin, Accept, X-Requested-With, Content-Type, Access-Control-Request-Method, Access-Control-Request-Headers') response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS') return response @app.route('/') def routes(): return jsonify(items={'error':'YOU SHOULD NOT BE HERE!'}) @app.route('/restart') def restart(): q = Queue() #p = Process(target=start_flaskapp, args=[q,]) #p.start() p.terminate() #terminate flaskapp and then restart the app on subprocess args = [sys.executable] + [sys.argv[0]] subprocess.call(args) try: some_queue.put("something") print("Restarted successfully") return "Quit" except: print("Failed in restart") return "Failed" def start_flaskapp(queue): global some_queue some_queue = queue app.run()
exportservice.py
#!/usr/bin/env python2 '''A library and a command line tool to interact with the LOCKSS daemon export service via its Web Services API.''' __copyright__ = '''\ Copyright (c) 2000-2019 Board of Trustees of Leland Stanford Jr. University, all rights reserved. ''' __license__ = '''\ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Except as contained in this notice, the name of Stanford University shall not be used in advertising or otherwise to promote the sale, use or other dealings in this Software without prior written authorization from Stanford University. ''' __version__ = '0.1' import getpass import itertools from multiprocessing.dummy import Pool as ThreadPool import optparse import os.path import sys from threading import Thread try: import ZSI except ImportError: sys.exit('The Python ZSI module must be installed (or on the PYTHONPATH)') import ExportServiceImplService_client from wsutil import datems, datetimems, durationms, zsiauth # # Library # def create_export_files(host, auth, auid): '''Performs a createExportFiles operation on the given host for the given AUID, and returns a record with the files. Parameters: - host (string): a host:port pair - auth (ZSI authentication object): an authentication object - auid (string): an AUID ''' req = ExportServiceImplService_client.createExportFiles() req.Arg0 = req.new_arg0() req.Arg0.Auid = auid req.Arg0.Compress = True req.Arg0.ExcludeDirNodes = True #req.Arg0.FilePrefix = "SomePrefix" #req.Arg0.FileType = "ZIP" req.Arg0.MaxSize = 1000 req.Arg0.MaxVersions = -1 #req.Arg0.XlateFilenames = "None" try: ret = _ws_port(host, auth).createExportFiles(req) return ret.Return except ZSI.FaultException as e: if str(e).startswith('No Archival Unit with provided identifier'): return None raise def _ws_port(host, auth, tracefile=None): url = 'http://%s/ws/ExportService' % (host,) locator = ExportServiceImplService_client.ExportServiceImplServiceLocator() if tracefile is None: return locator.getExportServiceImplPort(url=url, auth=auth) else: return locator.getExportServiceImplPort(url=url, auth=auth, tracefile=tracefile) # # Command line tool # class _ExportServiceOptions(object): @staticmethod def make_parser(): usage = '%prog {--host=HOST|--hosts=HFILE}... [OPTIONS]' parser = optparse.OptionParser(version=__version__, description=__doc__, usage=usage) # Hosts group = optparse.OptionGroup(parser, 'Target hosts') group.add_option('--host', action='append', default=list(), help='add host:port pair to list of target hosts') group.add_option('--hosts', action='append', default=list(), metavar='HFILE', help='add host:port pairs in HFILE to list of target hosts') group.add_option('--password', metavar='PASS', help='UI password (default: interactive prompt)') group.add_option('--username', metavar='USER', help='UI username (default: interactive prompt)') parser.add_option_group(group) # AUIDs group = optparse.OptionGroup(parser, 'Target AUIDs') group.add_option('--auid', action='append', default=list(), help='add AUID to list of target AUIDs') group.add_option('--auids', action='append', default=list(), metavar='AFILE', help='add AUIDs in AFILE to list of target AUIDs') parser.add_option_group(group) # AUID operations group = optparse.OptionGroup(parser, 'AU operations') group.add_option('--create-export-files', action='store_true', help='output export files of target AUIDs') parser.add_option_group(group) # Other options group = optparse.OptionGroup(parser, 'Other options') group.add_option('--group-by-field', action='store_true', help='group results by field instead of host') group.add_option('--no-special-output', action='store_true', help='no special output format for a single target host') group.add_option('--select', metavar='FIELDS', help='comma-separated list of fields for narrower output') group.add_option('--threads', type='int', help='max parallel jobs allowed (default: no limit)') group.add_option('--where', help='optional WHERE clause for query operations') parser.add_option_group(group) return parser def __init__(self, parser, opts, args): super(_ExportServiceOptions, self).__init__() if len(args) > 0: parser.error('extraneous arguments: %s' % (' '.join(args))) if len(filter(None, [opts.create_export_files])) != 1: parser.error('exactly one of --create-export-files is required') if len(opts.auid) + len(opts.auids) > 0 and not any([opts.create_export_files]): parser.error('--auid, --auids can only be applied to --create-export-files') # hosts self.hosts = opts.host[:] for f in opts.hosts: self.hosts.extend(_file_lines(f)) if len(self.hosts) == 0: parser.error('at least one target host is required') # auids self.auids = opts.auid[:] for f in opts.auids: self.auids.extend(_file_lines(f)) # create_export_files self.create_export_files = opts.create_export_files if self.create_export_files: if len(self.auids) == 0: parser.error('at least one target AUID is required with --create-export-files') self.select = ''#self.__init_select(parser, opts, _AU_STATUS) # threads self.threads = opts.threads or len(self.hosts) # auth u = opts.username or getpass.getpass('UI username: ') p = opts.password or getpass.getpass('UI password: ') self.auth = zsiauth(u, p) # Last modified 2018-03-19 for unicode support and boolean False when boolean is None def _output_record(options, lst): print '\t'.join([x.encode('utf-8') if type(x) is unicode else str(x or False) if type(x)==type(True) else str(x or '') for x in lst]) # Last modified 2015-08-05 def _output_table(options, data, rowheaders, lstcolkeys): colkeys = [x for x in itertools.product(*lstcolkeys)] for j in xrange(len(lstcolkeys)): if j < len(lstcolkeys) - 1: rowpart = [''] * len(rowheaders) else: rowpart = rowheaders _output_record(options, rowpart + [x[j] for x in colkeys]) for rowkey in sorted(set([k[0] for k in data])): _output_record(options, list(rowkey) + [data.get((rowkey, colkey)) for colkey in colkeys]) # Last modified 2015-08-31 def _file_lines(fstr): with open(os.path.expanduser(fstr)) as f: ret = filter(lambda y: len(y) > 0, [x.partition('#')[0].strip() for x in f]) if len(ret) == 0: sys.exit('Error: %s contains no meaningful lines' % (fstr,)) return ret _AU_STATUS = { } def _do_create_export_files(options): headlamb = [_AU_STATUS[x] for x in options.select] data = dict() for host, auid, result in ThreadPool(options.threads).imap_unordered( \ lambda _tup: (_tup[1], _tup[0], create_export_files(_tup[1], options.auth, _tup[0])), \ itertools.product(options.auids, options.hosts)): if result is not None: for head, lamb in headlamb: if options.group_by_field: colkey = (head, host) else: colkey = (host, head) data[((auid,), colkey)] = lamb(result) _output_table(options, data, ['AUID'], [[x[0] for x in headlamb], sorted(options.hosts)] if options.group_by_field else [sorted(options.hosts), [x[0] for x in headlamb]]) def _dispatch(options): if options.create_export_files: _do_create_export_files(options) else: raise RuntimeError, 'Unreachable' def _main(): '''Main method.''' # Parse command line parser = _ExportServiceOptions.make_parser() (opts, args) = parser.parse_args() options = _ExportServiceOptions(parser, opts, args) # Dispatch t = Thread(target=_dispatch, args=(options,)) t.daemon = True t.start() while True: t.join(1.5) if not t.is_alive(): break if __name__ == '__main__': _main()
redis_used_memory.py
import redis from multiprocessing import Process import psutil import time import matplotlib.pyplot as plt import csv write_process=[] DICT={} CPU_PERCENT_LIST=[] MEMORY_PERCENT_LIST=[] def draw_picture(colunm): plt.plot(range(total_users),colunm,label="memory_percent",linewidth=3,color='r') #plt.plot(range(total_users),MEMORY_PERCENT_LIST,label="memory_percent",linewidth=1,color='b') plt.ylabel("redis-server memory percent") plt.title("users insert hash data into redis") plt.legend() plt.show() def read_csv(): with open("redis_data.csv","rb") as file: reader=csv.reader(file) memory=[float(row[0]) for row in reader] print len(memory) draw_picture(memory) def cal_cpu_memory_percent(name,proce_id): process=psutil.Process(proce_id) memory=process.memory_percent() cpu=process.cpu_percent() #print "%0.7f,%0.7f" %(memory,cpu) with open("redis_data.csv","ab+") as file: writer=csv.writer(file) writer.writerow([memory]) #global CPU_PERCENT_LIST #global MEMORY_PERCENT_LIST #print "%s cpu percent is %.2f %%" % (name, #CPU_PERCENT_LIST.append(process.cpu_percent()) #print MEMORY_PERCENT_LIST #MEMORY_PERCENT_LIST.append(process.memory_percent()) #print "%s memory percent is %.2f %%" % (name, def produce_data(length_of_key_value): for i in range(length_of_key_value): DICT[str(i)]='a' def writing_data(virtualID): #global redis_server_process_id r=redis.StrictRedis(host='127.0.0.1',port=6379) pipe=r.pipeline() #r.hmset(virtualID,DICT) for i in range(5): pipe.set(str(virtualID)+str(i),virtualID) pipe.execute() cal_cpu_memory_percent("child process %d" %virtualID,redis_server_process_id) #pipe.hset(virtualID,) def write_redis(process_count): for i in range(process_count): p=Process(target=writing_data,args=(i,)) p.start() write_process.append(p) for write in write_process: write.join() total_users=300 #input("please input total users count:") redis_server_process_id=2496 if __name__ == '__main__': produce_data(100) start_time=time.time() #cal_cpu_memory_percent("start_redis_server",redis_server_process_id) write_redis(total_users) end_time=time.time() print "total time is %f" %(end_time-start_time) read_csv() #print len(CPU_PERCENT_LIST) #print len(MEMORY_PERCENT_LIST) #draw_picture()
helpers.py
from django.conf import settings from mapproxy.seed.seeder import seed from mapproxy.seed.config import SeedingConfiguration, SeedConfigurationError, ConfigurationError from mapproxy.seed.spec import validate_seed_conf from mapproxy.config.loader import ProxyConfiguration from mapproxy.config.spec import validate_mapproxy_conf from mapproxy.seed import seeder from mapproxy.seed import util from datetime import datetime import base64 import yaml import os import errno import time from dateutil import parser import multiprocessing import psutil def generate_confs(tileset, ignore_warnings=True, renderd=False): """ Takes a Tileset object and returns mapproxy and seed config files """ mapproxy_conf_json = """ { "services":{ "wms":{ "on_source_errors":"raise", "image_formats": ["image/png"] } }, "layers":[ { "name":"", "title":"", "sources":[ "tileset_cache" ] } ], "caches":{ "tileset_cache":{ "grids":[ "webmercator" ], "sources":[ "tileset_source" ], "cache":{ "type":"mbtiles", "filename": "/provide/valid/path/to/file.mbtiles" } } }, "sources":{ "tileset_source":{ } }, "grids":{ "webmercator":{ "base":"GLOBAL_MERCATOR" } }, "globals": { "image": { "paletted": false } } } """ seed_conf_json = """ { "coverages": { "tileset_geom": { "bbox": [-77.47, 38.72, -76.72, 39.08], "datasource": "path/to/geom/file.xxx", "srs": "EPSG:4326" } }, "seeds": { "tileset_seed": { "refresh_before": { "minutes": 0 }, "caches": [ "tileset_cache" ], "levels": { "from": 0, "to": 2 }, "coverages": ["tileset_geom"] } } } """ seed_conf = yaml.safe_load(seed_conf_json) mapproxy_conf = yaml.safe_load(mapproxy_conf_json) print '---- mbtiles file to generate: {}'.format(get_tileset_filename(tileset.name)) mapproxy_conf['sources']['tileset_source']['type'] = u_to_str(tileset.server_service_type) if u_to_str(tileset.server_service_type) == 'wms': """ "req":{ "url":"http://admin:geoserver@192.168.99.100/geoserver/wms?", "layers":"geonode:ne_50m_admin_0_countries" }, """ mapproxy_conf['sources']['tileset_source']['req'] = {} mapproxy_conf['sources']['tileset_source']['req']['url'] = u_to_str(tileset.server_url) mapproxy_conf['sources']['tileset_source']['req']['layers'] = u_to_str(tileset.layer_name) elif u_to_str(tileset.server_service_type) == 'tile': """ "url": "http://a.tile.openstreetmap.org/%(z)s/%(x)s/%(y)s.png", """ mapproxy_conf['sources']['tileset_source']['url'] = u_to_str(tileset.server_url) mapproxy_conf['layers'][0]['name'] = u_to_str(tileset.layer_name) mapproxy_conf['layers'][0]['title'] = u_to_str(tileset.layer_name) mapproxy_conf['caches']['tileset_cache']['cache']['filename'] = get_tileset_filename(tileset.name, 'generating') if tileset.layer_zoom_start > tileset.layer_zoom_stop: raise ConfigurationError('invalid configuration - zoom start is greater than zoom stop') seed_conf['seeds']['tileset_seed']['levels']['from'] = tileset.layer_zoom_start seed_conf['seeds']['tileset_seed']['levels']['to'] = tileset.layer_zoom_stop # any specified refresh before for mbtiles will result in regeneration of the tile set seed_conf['seeds']['tileset_seed']['refresh_before']['minutes'] = 0 if tileset.geom: geom_type = 'other' if tileset.geom.startswith('{"'): geom_type = 'geojson' elif tileset.geom.lower().startswith('polygon') or tileset.geom.lower().startswith('multipolygon'): geom_type = 'txt' elif tileset.geom.startswith('['): geom_type = 'bbox' if geom_type in ['geojson', 'txt']: geom_dir = '{}/geoms'.format(get_tileset_dir()) if not os.path.exists(geom_dir): os.makedirs(geom_dir) # TODO: remove geom files when done or pair them up with the actual tileset files? geom_filename = '{}/geoms/{}.{}'.format(get_tileset_dir(), tileset.name, geom_type) with open(geom_filename, 'w+') as geom_file: geom_file.write(tileset.geom) seed_conf['coverages']['tileset_geom']['datasource'] = geom_filename seed_conf['coverages']['tileset_geom'].pop('bbox', None) elif geom_type is 'bbox': seed_conf['coverages']['tileset_geom']['bbox'] = yaml.safe_load(tileset.geom) seed_conf['coverages']['tileset_geom'].pop('datasource', None) else: # if not bbox or file, just set it as is to the datasource since mapproxy can handle other datastores # and they should work as is seed_conf['coverages']['tileset_geom']['datasource'] = yaml.safe_load(tileset.geom) print '---- tileset geom_type: {}, geom: {}'.format(geom_type, tileset.geom) else: # if a geom is not specified, remove the coverages key from tileset_seed seed_conf['seeds']['tileset_seed'].pop('coverages', None) seed_conf['coverages']['tileset_geom'].pop('datasource', None) seed_conf['coverages']['tileset_geom'].pop('bbox', None) print '--[ mapproxy_conf: ' print yaml.dump(mapproxy_conf) print '--[ seed_conf: ' print yaml.dump(seed_conf) if tileset.server_username and tileset.server_password: """ "http":{ "headers":{ "Authorization":"Basic YWRtaW46Z2Vvc2VydmVy" } } """ encoded = base64.b64encode('{}:{}'.format(tileset.server_username, tileset.server_password)) mapproxy_conf['sources']['tileset_source']['http'] = {} mapproxy_conf['sources']['tileset_source']['http']['headers'] = {} mapproxy_conf['sources']['tileset_source']['http']['headers']['Authorization'] = 'Basic {}'.format(encoded) errors, informal_only = validate_mapproxy_conf(mapproxy_conf) for error in errors: print error if not informal_only or (errors and not ignore_warnings): raise ConfigurationError('invalid configuration - {}'.format(', '.join(errors))) cf = ProxyConfiguration(mapproxy_conf, conf_base_dir=get_tileset_dir(), seed=seed, renderd=renderd) errors, informal_only = validate_seed_conf(seed_conf) for error in errors: print error if not informal_only: raise SeedConfigurationError('invalid seed configuration - {}'.format(', '.join(errors))) seed_cf = SeedingConfiguration(seed_conf, mapproxy_conf=cf) return cf, seed_cf """ example settings file TILEBUNDLER_CONFIG = { 'tileset_dir': '/var/lib/mbtiles' } """ def get_tileset_dir(): conf = getattr(settings, 'TILEBUNDLER_CONFIG', {}) return conf.get('tileset_dir', './') def get_tileset_filename(tileset_name, extension='mbtiles'): return '{}/{}.{}'.format(get_tileset_dir(), tileset_name, extension) def get_lock_filename(tileset_id): return '{}/generate_tileset_{}.lck'.format(get_tileset_dir(), tileset_id) def update_tileset_stats(tileset): tileset_filename = get_tileset_filename(tileset.name) if os.path.isfile(tileset_filename): stat = os.stat(tileset_filename) tileset.created_at = datetime.fromtimestamp(stat.st_ctime) tileset.filesize = stat.st_size tileset.save() def u_to_str(string): return string.encode('ascii', 'ignore') def is_int_str(v): v = str(v).strip() return v == '0' or (v if v.find('..') > -1 else v.lstrip('-+').rstrip('0').rstrip('.')).isdigit() def add_tileset_file_attribs(target_object, tileset, extension='mbtiles'): tileset_filename = get_tileset_filename(tileset.name, extension) if os.path.isfile(tileset_filename): stat = os.stat(tileset_filename) if stat: target_object['file_size'] = stat.st_size target_object['file_updated'] = datetime.fromtimestamp(stat.st_ctime) def get_status(tileset): res = { 'current': { 'status': 'unknown' }, 'pending': { 'status': 'not in progress' } } # generate status for already existing tileset # if there is a .mbtiles file on disk, get the size and time last updated tileset_filename = get_tileset_filename(tileset.name) if os.path.isfile(tileset_filename): res['current']['status'] = 'ready' # get the size and time last updated for the tileset add_tileset_file_attribs(res['current'], tileset) else: res['current']['status'] = 'not generated' # get the size and time last updated for the 'pending' tileset add_tileset_file_attribs(res['pending'], tileset, 'generating') pid = get_pid_from_lock_file(tileset.id) if pid: process = get_is_process_running(pid) if process: # if tileset generation is in progress res['pending']['status'] = 'in progress' progress_log_filename = get_tileset_filename(tileset.name, 'progress_log') if os.path.isfile(progress_log_filename): with open(progress_log_filename, 'r') as f: lines = f.read().replace('\r', '\n') lines = lines.split('\n') # an actual progress step update which looks like: # "[15:11:11] 4 50.00% 0.00000, 672645.84891, 18432942.24503, 18831637.78456 (112 tiles) ETA: 2015-07-07-15:11:12"\n latest_step = None # a progress update on the current step which looks like: # "[15:11:16] 87.50% 0000 ETA: 2015-07-07-15:11:17'\r latest_progress = None if len(lines) > 0: for line in lines[::-1]: tokens = line.split() if len(tokens) > 2: if is_int_str(tokens[1]): latest_step = tokens break elif tokens[1].endswith('%'): if latest_progress is None: # keep going, don't break latest_progress = tokens continue if latest_step: # if we have a step %, up date the progress % if latest_progress: latest_step[2] = latest_progress[1] res['pending']['progress'] = latest_step[2][0:-1] res['pending']['current_zoom_level'] = latest_step[1] # get the eta but pass if date is cannot be parsed. try: iso_date = parser.parse(latest_step[len(latest_step) - 1]).isoformat() res['pending']['estimated_completion_time'] = iso_date except ValueError: pass else: res['pending']['status'] = 'in progress, but log not found' else: res['pending']['status'] = 'stopped' return res # when using uwsgi, several processes each with their own interpreter are often launched. This means that the typical # multiprocessing sync mechanisms such as Lock and Manager cannot be used. comments about issues to know about uwsgi: # http://uwsgi-docs.readthedocs.org/en/latest/ThingsToKnow.html note that enable-threads, close-on-exec, and # close-on-exec2 were not effective and even if they were, other deployments will need to match uwsgi setting which is # inconvenient especially since the problems caused can be misleading. The implementation here uses lock files to check # if a tileset is being generated and which process to kill when generate needs to be stopped by a user. Using celery # for multiprocessing poses another problem: since it generates its worker pool processes as proc.daemon = True, # each celery process cannot invoke the mapproxy.seed function which in turn wants to launch other processes. This # can be fixed in celery but it requires a patch. celery project reopened this 'bug' a few days ago as of 7-22-2015: # https://github.com/celery/celery/issues/1709 if it is fixed, we can switch to using celery without any immediate # gain for the current use case. Instead of using daemon processes, they should use another mechanism to track/kill # child processes so that each celery task can launch other processes. def seed_process_spawn(tileset): mapproxy_conf, seed_conf = generate_confs(tileset) # if there is an old _generating one around, back it up backup_millis = int(round(time.time() * 1000)) if os.path.isfile(get_tileset_filename(tileset.name, 'generating')): os.rename(get_tileset_filename(tileset.name, 'generating'), '{}_{}'.format(get_tileset_filename(tileset.name, 'generating'), backup_millis)) # if there is an old progress_log around, back it up if os.path.isfile(get_tileset_filename(tileset.name, 'progress_log')): os.rename(get_tileset_filename(tileset.name, 'progress_log'), '{}_{}'.format(get_tileset_filename(tileset.name, 'generating'), backup_millis)) # generate the new mbtiles as name.generating file progress_log_filename = get_tileset_filename(tileset.name, 'progress_log') out = open(progress_log_filename, 'w+') progress_logger = util.ProgressLog(out=out, verbose=True, silent=False) tasks = seed_conf.seeds(['tileset_seed']) # launch the task using another process process = multiprocessing.Process(target=seed_process_target, args=(tileset.id, tileset.name, tasks, progress_logger)) pid = None if 'preparing_to_start' == get_pid_from_lock_file(tileset.id): process.start() pid = process.pid else: print '---- Not starting process. cancel was requested. ' return pid def seed_process_target(tileset_id, tileset_name, tasks, progress_logger): print '----[ start seeding. tileset {}'.format(tileset_id) seeder.seed(tasks=tasks, progress_logger=progress_logger) # now that we have generated the new mbtiles file, backup the last one, then rename # the _generating one to the main name if os.path.isfile(get_tileset_filename(tileset_name)): millis = int(round(time.time() * 1000)) os.rename(get_tileset_filename(tileset_name), '{}_{}'.format(get_tileset_filename(tileset_name), millis)) os.rename(get_tileset_filename(tileset_name, 'generating'), get_tileset_filename(tileset_name)) remove_lock_file(tileset_id) def get_lock_file(tileset_id): flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY lock_file = None try: file_handle = os.open(get_lock_filename(tileset_id), flags) except OSError as e: if e.errno == errno.EEXIST: # Failed, file already exists. pass else: # Something unexpected went wrong so re-raise the exception. raise else: # No exception, so the file must have been created successfully. lock_file = os.fdopen(file_handle, 'w') lock_file.write('preparing_to_start\n') lock_file.flush() return lock_file def remove_lock_file(tileset_id): try: os.remove(get_lock_filename(tileset_id)) except OSError as e: # Error removing lock file print '--- There was a problem removing the lock file. Something: {}'.format(e.errno) pass def get_pid_from_lock_file(tileset_id): pid = None name = get_lock_filename(tileset_id) if os.path.isfile(name): with open(name, 'r') as lock_file: lines = lock_file.readlines() if len(lines) > 0: if lines[-1]: pid = lines[-1].rstrip() return pid def get_process_from_pid(pid): process = None if is_int_str(pid): try: process = psutil.Process(pid=int(pid)) except (psutil.NoSuchProcess, psutil.ZombieProcess): print ' -- PROCESS HAS BEEN TERMINATED {}'.format(int(pid)) pass return process def get_is_process_running(pid): process = None if is_int_str(pid): try: process = psutil.Process(pid=int(pid)) exitCode = process.wait(0) print ' -- waited for process.. exitCode: {}'.format(exitCode) except (psutil.NoSuchProcess, psutil.ZombieProcess, psutil.TimeoutExpired): pass return process
decorators.py
from threading import Thread """Used for async mail calls""" def async(f): def wrapper(*args, **kwargs): thr = Thread(target=f, args=args, kwargs=kwargs) thr.start() return wrapper
internal_api.py
import os from flask import Flask, request, jsonify from modules.geocode_module import reverse_geocode from modules.db_module import district_db, location_db from modules.clustering_algo import clustering from ml_model.trash_detection import scan_and_call from time import sleep from multiprocessing import Process ddb = district_db() ldb = location_db() app = Flask(__name__) @app.route('/api/postData', methods=['POST']) def write_to_db(): data = request.get_json() coordinates = data['coordinates'] address = reverse_geocode(coordinates) postcode = address.split(',')[-1].strip() coordinates = clustering(postcode, coordinates) params = {'coordinates': coordinates, 'address': address, 'image': data['image'], 'last_seen': data['timestamp'], 'amount': data['amount'], 'status': data['status']} ddb.add_location(postcode, params) location_id = ddb.get_location_id(postcode, coordinates) ldb.add_sighting(location_id, {'time': data['timestamp'], 'amount': data['amount']}) return 'OK', 200 @app.route('/api/getData', methods=['GET']) def read_from_db(): args = request.args coordinates = args['query'] address = reverse_geocode(coordinates) postcode = address.split(',')[-1].strip() location_id = ddb.get_location_id(postcode, coordinates) return jsonify(ldb.get_location_data(location_id)) def process_1(): app.run(host='localhost', port='4000', debug=False) def process_2(): while True: scan_and_call() sleep(120) if __name__ == '__main__': p1 = Process(target=process_1) p2 = Process(target=process_2) p1.start(); p2.start() p1.join(); p2.join()
model.py
from functools import wraps from threading import Thread import polka def threaded(func): @wraps(func) def async_func(*args, **kwargs): thread = Thread(target=func, args=args, kwargs=kwargs) thread.start() return thread return async_func class Model: """Access to the Polka API""" def __init__(self): self._books = [] self._lists = [] self._experts = [] self._podcasts = [] self._blogs = [] self.is_loaded = False @property def books(self): """Gets list of books. Each book is an instance of polka.Book class.""" if not self._books: self._books = polka.books() return self._books @property def lists(self): """Gets list of compilations. Each compilations is an instance of polka.Compilation class.""" if not self._lists: self._lists = polka.lists() return self._lists @property def experts(self): """Gets list of experts. Each expert is an instance of polka.Pundit class.""" if not self._experts: self._experts = polka.pundits() return self._experts def search(self, query): """Search for the `query`. Each expert is a tuple `(title, description, object)`.""" return polka.search(query) @property def podcasts(self): """Gets list of podcasts. Each podcast is an instance od polka.Podcast class.""" if not self._podcasts: self._podcasts = polka.podcasts() return self._podcasts @property def blogs(self): """Gets list of blog articles. Each article is an instance od polka.Blog class.""" if not self._blogs: self._blogs = polka.blogs() return self._blogs def is_book(self, obj): return isinstance(obj, polka.Book) def is_list(self, obj): return isinstance(obj, polka.Compilation) def is_expert(self, obj): return isinstance(obj, polka.Pundit) def is_podcast(self, obj): return isinstance(obj, polka.Podcast) def is_blog(self, obj): return isinstance(obj, polka.Blog) def book_has_article(self, book: polka.Book): return book.has_article @threaded def get_all(self): """Preload all data""" _ = self.books _ = self.lists _ = self.experts _ = self.podcasts _ = self.blogs self.is_loaded = True
TCPServer.py
#!/bin/python2 import socket import threading bind_ip = "0.0.0.0" bind_port = 9999 server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind((bind_ip, bind_port)) server.listen(5) print "[*] Listening on %s:%d" % (bind_ip, bind_port) #client handling thread def handle_client(client_socket): #print out things sent by client request = client_socket.recv(1024) print "[*] Received: %s" % request #send back a packet client_socket.send("ACK!") client_socket.close() while True: client,addr = server.accept() print "[*] Accepted connection from: %s:%d" (addr[0], addr[1]) #ip and port #spin up our client thread to handle incoming data client_handler = threading.Thread(target=handle_client,args=(client,)) client_handler.start()
asyn.py
import asyncio import asyncio.events import functools import inspect import os import re import sys import threading from contextlib import contextmanager from glob import has_magic from .callbacks import _DEFAULT_CALLBACK from .exceptions import FSTimeoutError from .spec import AbstractFileSystem from .utils import is_exception, other_paths private = re.compile("_[^_]") async def _runner(event, coro, result, timeout=None): timeout = timeout if timeout else None # convert 0 or 0.0 to None if timeout is not None: coro = asyncio.wait_for(coro, timeout=timeout) try: result[0] = await coro except Exception as ex: result[0] = ex finally: event.set() def sync(loop, func, *args, timeout=None, **kwargs): """ Make loop run coroutine until it returns. Runs in other thread """ timeout = timeout if timeout else None # convert 0 or 0.0 to None # NB: if the loop is not running *yet*, it is OK to submit work # and we will wait for it if loop is None or loop.is_closed(): raise RuntimeError("Loop is not running") try: loop0 = asyncio.events.get_running_loop() if loop0 is loop: raise NotImplementedError("Calling sync() from within a running loop") except RuntimeError: pass coro = func(*args, **kwargs) result = [None] event = threading.Event() asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop) while True: # this loops allows thread to get interrupted if event.wait(1): break if timeout is not None: timeout -= 1 if timeout < 0: raise FSTimeoutError return_result = result[0] if isinstance(return_result, asyncio.TimeoutError): # suppress asyncio.TimeoutError, raise FSTimeoutError raise FSTimeoutError from return_result elif isinstance(return_result, BaseException): raise return_result else: return return_result iothread = [None] # dedicated fsspec IO thread loop = [None] # global event loop for any non-async instance lock = threading.Lock() # for setting exactly one thread def sync_wrapper(func, obj=None): """Given a function, make so can be called in async or blocking contexts Leave obj=None if defining within a class. Pass the instance if attaching as an attribute of the instance. """ @functools.wraps(func) def wrapper(*args, **kwargs): self = obj or args[0] return sync(self.loop, func, *args, **kwargs) return wrapper @contextmanager def _selector_policy(): original_policy = asyncio.get_event_loop_policy() try: if ( sys.version_info >= (3, 8) and os.name == "nt" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy") ): asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) yield finally: asyncio.set_event_loop_policy(original_policy) def get_running_loop(): if hasattr(asyncio, "get_running_loop"): return asyncio.get_running_loop() else: loop = asyncio._get_running_loop() if loop is None: raise RuntimeError("no running event loop") else: return loop def get_loop(): """Create or return the default fsspec IO loop The loop will be running on a separate thread. """ if loop[0] is None: with lock: # repeat the check just in case the loop got filled between the # previous two calls from another thread if loop[0] is None: with _selector_policy(): loop[0] = asyncio.new_event_loop() th = threading.Thread(target=loop[0].run_forever, name="fsspecIO") th.daemon = True th.start() iothread[0] = th return loop[0] @contextmanager def fsspec_loop(): """Temporarily switch the current event loop to the fsspec's own loop, and then revert it back after the context gets terinated. """ try: original_loop = get_running_loop() except RuntimeError: original_loop = None fsspec_loop = get_loop() try: asyncio._set_running_loop(fsspec_loop) yield fsspec_loop finally: asyncio._set_running_loop(original_loop) try: import resource except ImportError: resource = None ResourceError = OSError else: ResourceEror = resource.error _DEFAULT_BATCH_SIZE = 128 _NOFILES_DEFAULT_BATCH_SIZE = 1280 def _get_batch_size(nofiles=False): from fsspec.config import conf if nofiles: if "nofiles_gather_batch_size" in conf: return conf["nofiles_gather_batch_size"] else: if "gather_batch_size" in conf: return conf["gather_batch_size"] if nofiles: return _NOFILES_DEFAULT_BATCH_SIZE if resource is None: return _DEFAULT_BATCH_SIZE try: soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE) except (ImportError, ValueError, ResourceError): return _DEFAULT_BATCH_SIZE if soft_limit == resource.RLIM_INFINITY: return -1 else: return soft_limit // 8 async def _run_coros_in_chunks( coros, batch_size=None, callback=_DEFAULT_CALLBACK, timeout=None, return_exceptions=False, nofiles=False, ): """Run the given coroutines in chunks. Parameters ---------- coros: list of coroutines to run batch_size: int or None Number of coroutines to submit/wait on simultaneously. If -1, then it will not be any throttling. If None, it will be inferred from _get_batch_size() callback: fsspec.callbacks.Callback instance Gets a relative_update when each coroutine completes timeout: number or None If given, each coroutine times out after this time. Note that, since there are multiple batches, the total run time of this function will in general be longer return_exceptions: bool Same meaning as in asyncio.gather nofiles: bool If inferring the batch_size, does this operation involve local files? If yes, you normally expect smaller batches. """ if batch_size is None: batch_size = _get_batch_size(nofiles=nofiles) if batch_size == -1: batch_size = len(coros) assert batch_size > 0 results = [] for start in range(0, len(coros), batch_size): chunk = [ asyncio.Task(asyncio.wait_for(c, timeout=timeout)) for c in coros[start : start + batch_size] ] if callback is not _DEFAULT_CALLBACK: [ t.add_done_callback(lambda *_, **__: callback.relative_update(1)) for t in chunk ] results.extend( await asyncio.gather(*chunk, return_exceptions=return_exceptions), ) return results # these methods should be implemented as async by any async-able backend async_methods = [ "_ls", "_cat_file", "_get_file", "_put_file", "_rm_file", "_cp_file", "_pipe_file", "_expand_path", "_info", "_isfile", "_isdir", "_exists", "_walk", "_glob", "_find", "_du", "_size", "_mkdir", "_makedirs", ] class AsyncFileSystem(AbstractFileSystem): """Async file operations, default implementations Passes bulk operations to asyncio.gather for concurrent operation. Implementations that have concurrent batch operations and/or async methods should inherit from this class instead of AbstractFileSystem. Docstrings are copied from the un-underscored method in AbstractFileSystem, if not given. """ # note that methods do not have docstring here; they will be copied # for _* methods and inferred for overridden methods. async_impl = True disable_throttling = False def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs): self.asynchronous = asynchronous self._pid = os.getpid() if not asynchronous: self._loop = loop or get_loop() else: self._loop = None self.batch_size = batch_size super().__init__(*args, **kwargs) @property def loop(self): if self._pid != os.getpid(): raise RuntimeError("This class is not fork-safe") return self._loop async def _rm_file(self, path, **kwargs): raise NotImplementedError async def _rm(self, path, recursive=False, batch_size=None, **kwargs): # TODO: implement on_error batch_size = batch_size or self.batch_size path = await self._expand_path(path, recursive=recursive) return await _run_coros_in_chunks( [self._rm_file(p, **kwargs) for p in path], batch_size=batch_size, nofiles=True, ) async def _cp_file(self, path1, path2, **kwargs): raise NotImplementedError async def _copy( self, path1, path2, recursive=False, on_error=None, maxdepth=None, batch_size=None, **kwargs, ): if on_error is None and recursive: on_error = "ignore" elif on_error is None: on_error = "raise" paths = await self._expand_path(path1, maxdepth=maxdepth, recursive=recursive) path2 = other_paths(paths, path2) batch_size = batch_size or self.batch_size coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths, path2)] result = await _run_coros_in_chunks( coros, batch_size=batch_size, return_exceptions=True, nofiles=True ) for ex in filter(is_exception, result): if on_error == "ignore" and isinstance(ex, FileNotFoundError): continue raise ex async def _pipe(self, path, value=None, batch_size=None, **kwargs): if isinstance(path, str): path = {path: value} batch_size = batch_size or self.batch_size return await _run_coros_in_chunks( [self._pipe_file(k, v, **kwargs) for k, v in path.items()], batch_size=batch_size, nofiles=True, ) async def _process_limits(self, url, start, end): """Helper for "Range"-based _cat_file""" size = None suff = False if start is not None and start < 0: # if start is negative and end None, end is the "suffix length" if end is None: end = -start start = "" suff = True else: size = size or (await self._info(url))["size"] start = size + start elif start is None: start = 0 if not suff: if end is not None and end < 0: if start is not None: size = size or (await self._info(url))["size"] end = size + end elif end is None: end = "" if isinstance(end, int): end -= 1 # bytes range is inclusive return "bytes=%s-%s" % (start, end) async def _cat_file(self, path, start=None, end=None, **kwargs): raise NotImplementedError async def _cat( self, path, recursive=False, on_error="raise", batch_size=None, **kwargs ): paths = await self._expand_path(path, recursive=recursive) coros = [self._cat_file(path, **kwargs) for path in paths] batch_size = batch_size or self.batch_size out = await _run_coros_in_chunks( coros, batch_size=batch_size, nofiles=True, return_exceptions=True ) if on_error == "raise": ex = next(filter(is_exception, out), False) if ex: raise ex if ( len(paths) > 1 or isinstance(path, list) or paths[0] != self._strip_protocol(path) ): return { k: v for k, v in zip(paths, out) if on_error != "omit" or not is_exception(v) } else: return out[0] async def _cat_ranges( self, paths, starts, ends, max_gap=None, batch_size=None, **kwargs ): # TODO: on_error if max_gap is not None: # use utils.merge_offset_ranges raise NotImplementedError if not isinstance(paths, list): raise TypeError if not isinstance(starts, list): starts = [starts] * len(paths) if not isinstance(ends, list): ends = [starts] * len(paths) if len(starts) != len(paths) or len(ends) != len(paths): raise ValueError coros = [ self._cat_file(p, start=s, end=e, **kwargs) for p, s, e in zip(paths, starts, ends) ] batch_size = batch_size or self.batch_size return await _run_coros_in_chunks(coros, batch_size=batch_size, nofiles=True) async def _put_file(self, lpath, rpath, **kwargs): raise NotImplementedError async def _put( self, lpath, rpath, recursive=False, callback=_DEFAULT_CALLBACK, batch_size=None, **kwargs, ): """Copy file(s) from local. Copies a specific file or tree of files (if recursive=True). If rpath ends with a "/", it will be assumed to be a directory, and target files will go within. The put_file method will be called concurrently on a batch of files. The batch_size option can configure the amount of futures that can be executed at the same time. If it is -1, then all the files will be uploaded concurrently. The default can be set for this instance by passing "batch_size" in the constructor, or for all instances by setting the "gather_batch_size" key in ``fsspec.config.conf``, falling back to 1/8th of the system limit . """ from .implementations.local import LocalFileSystem, make_path_posix rpath = self._strip_protocol(rpath) if isinstance(lpath, str): lpath = make_path_posix(lpath) fs = LocalFileSystem() lpaths = fs.expand_path(lpath, recursive=recursive) rpaths = other_paths( lpaths, rpath, exists=isinstance(rpath, str) and await self._isdir(rpath) ) is_dir = {l: os.path.isdir(l) for l in lpaths} rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]] file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]] await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs]) batch_size = batch_size or self.batch_size coros = [] callback.set_size(len(file_pairs)) for lfile, rfile in file_pairs: callback.branch(lfile, rfile, kwargs) coros.append(self._put_file(lfile, rfile, **kwargs)) return await _run_coros_in_chunks( coros, batch_size=batch_size, callback=callback ) async def _get_file(self, rpath, lpath, **kwargs): raise NotImplementedError async def _get( self, rpath, lpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs ): """Copy file(s) to local. Copies a specific file or tree of files (if recursive=True). If lpath ends with a "/", it will be assumed to be a directory, and target files will go within. Can submit a list of paths, which may be glob-patterns and will be expanded. The get_file method will be called concurrently on a batch of files. The batch_size option can configure the amount of futures that can be executed at the same time. If it is -1, then all the files will be uploaded concurrently. The default can be set for this instance by passing "batch_size" in the constructor, or for all instances by setting the "gather_batch_size" key in ``fsspec.config.conf``, falling back to 1/8th of the system limit . """ from fsspec.implementations.local import make_path_posix rpath = self._strip_protocol(rpath) lpath = make_path_posix(lpath) rpaths = await self._expand_path(rpath, recursive=recursive) lpaths = other_paths(rpaths, lpath) [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths] batch_size = kwargs.pop("batch_size", self.batch_size) coros = [] callback.set_size(len(lpaths)) for lpath, rpath in zip(lpaths, rpaths): callback.branch(rpath, lpath, kwargs) coros.append(self._get_file(rpath, lpath, **kwargs)) return await _run_coros_in_chunks( coros, batch_size=batch_size, callback=callback ) async def _isfile(self, path): try: return (await self._info(path))["type"] == "file" except: # noqa: E722 return False async def _isdir(self, path): try: return (await self._info(path))["type"] == "directory" except IOError: return False async def _size(self, path): return (await self._info(path)).get("size", None) async def _sizes(self, paths, batch_size=None): batch_size = batch_size or self.batch_size return await _run_coros_in_chunks( [self._size(p) for p in paths], batch_size=batch_size ) async def _exists(self, path): try: await self._info(path) return True except FileNotFoundError: return False async def _info(self, path, **kwargs): raise NotImplementedError async def _ls(self, path, detail=True, **kwargs): raise NotImplementedError async def _walk(self, path, maxdepth=None, **kwargs): path = self._strip_protocol(path) full_dirs = {} dirs = {} files = {} detail = kwargs.pop("detail", False) try: listing = await self._ls(path, detail=True, **kwargs) except (FileNotFoundError, IOError): if detail: yield path, {}, {} else: yield path, [], [] return for info in listing: # each info name must be at least [path]/part , but here # we check also for names like [path]/part/ pathname = info["name"].rstrip("/") name = pathname.rsplit("/", 1)[-1] if info["type"] == "directory" and pathname != path: # do not include "self" path full_dirs[pathname] = info dirs[name] = info elif pathname == path: # file-like with same name as give path files[""] = info else: files[name] = info if detail: yield path, dirs, files else: yield path, list(dirs), list(files) if maxdepth is not None: maxdepth -= 1 if maxdepth < 1: return for d in full_dirs: async for _ in self._walk(d, maxdepth=maxdepth, detail=detail, **kwargs): yield _ async def _glob(self, path, **kwargs): import re ends = path.endswith("/") path = self._strip_protocol(path) indstar = path.find("*") if path.find("*") >= 0 else len(path) indques = path.find("?") if path.find("?") >= 0 else len(path) indbrace = path.find("[") if path.find("[") >= 0 else len(path) ind = min(indstar, indques, indbrace) detail = kwargs.pop("detail", False) if not has_magic(path): root = path depth = 1 if ends: path += "/*" elif await self._exists(path): if not detail: return [path] else: return {path: await self._info(path)} else: if not detail: return [] # glob of non-existent returns empty else: return {} elif "/" in path[:ind]: ind2 = path[:ind].rindex("/") root = path[: ind2 + 1] depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1 else: root = "" depth = None if "**" in path else path[ind + 1 :].count("/") + 1 allpaths = await self._find( root, maxdepth=depth, withdirs=True, detail=True, **kwargs ) # Escape characters special to python regex, leaving our supported # special characters in place. # See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html # for shell globbing details. pattern = ( "^" + ( path.replace("\\", r"\\") .replace(".", r"\.") .replace("+", r"\+") .replace("//", "/") .replace("(", r"\(") .replace(")", r"\)") .replace("|", r"\|") .replace("^", r"\^") .replace("$", r"\$") .replace("{", r"\{") .replace("}", r"\}") .rstrip("/") .replace("?", ".") ) + "$" ) pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern) pattern = re.sub("[*]", "[^/]*", pattern) pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*")) out = { p: allpaths[p] for p in sorted(allpaths) if pattern.match(p.replace("//", "/").rstrip("/")) } if detail: return out else: return list(out) async def _du(self, path, total=True, maxdepth=None, **kwargs): sizes = {} # async for? for f in await self._find(path, maxdepth=maxdepth, **kwargs): info = await self._info(f) sizes[info["name"]] = info["size"] if total: return sum(sizes.values()) else: return sizes async def _find(self, path, maxdepth=None, withdirs=False, **kwargs): path = self._strip_protocol(path) out = dict() detail = kwargs.pop("detail", False) # async for? async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs): if withdirs: files.update(dirs) out.update({info["name"]: info for name, info in files.items()}) if not out and (await self._isfile(path)): # walk works on directories, but find should also return [path] # when path happens to be a file out[path] = {} names = sorted(out) if not detail: return names else: return {name: out[name] for name in names} async def _expand_path(self, path, recursive=False, maxdepth=None): if isinstance(path, str): out = await self._expand_path([path], recursive, maxdepth) else: # reduce depth on each recursion level unless None or 0 maxdepth = maxdepth if not maxdepth else maxdepth - 1 out = set() path = [self._strip_protocol(p) for p in path] for p in path: # can gather here if has_magic(p): bit = set(await self._glob(p)) out |= bit if recursive: out |= set( await self._expand_path( list(bit), recursive=recursive, maxdepth=maxdepth ) ) continue elif recursive: rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True)) out |= rec if p not in out and (recursive is False or (await self._exists(p))): # should only check once, for the root out.add(p) if not out: raise FileNotFoundError(path) return list(sorted(out)) async def _mkdir(self, path, create_parents=True, **kwargs): pass # not necessary to implement, may not have directories async def _makedirs(self, path, exist_ok=False): pass # not necessary to implement, may not have directories def mirror_sync_methods(obj): """Populate sync and async methods for obj For each method will create a sync version if the name refers to an async method (coroutine) and there is no override in the child class; will create an async method for the corresponding sync method if there is no implementation. Uses the methods specified in - async_methods: the set that an implementation is expected to provide - default_async_methods: that can be derived from their sync version in AbstractFileSystem - AsyncFileSystem: async-specific default coroutines """ from fsspec import AbstractFileSystem for method in async_methods + dir(AsyncFileSystem): if not method.startswith("_"): continue smethod = method[1:] if private.match(method): isco = inspect.iscoroutinefunction(getattr(obj, method, None)) unsync = getattr(getattr(obj, smethod, False), "__func__", None) is_default = unsync is getattr(AbstractFileSystem, smethod, "") if isco and is_default: mth = sync_wrapper(getattr(obj, method), obj=obj) setattr(obj, smethod, mth) if not mth.__doc__: mth.__doc__ = getattr( getattr(AbstractFileSystem, smethod, None), "__doc__", "" ) class FSSpecCoroutineCancel(Exception): pass def _dump_running_tasks( printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False ): import traceback tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()] if printout: [task.print_stack() for task in tasks] out = [ { "locals": task._coro.cr_frame.f_locals, "file": task._coro.cr_frame.f_code.co_filename, "firstline": task._coro.cr_frame.f_code.co_firstlineno, "linelo": task._coro.cr_frame.f_lineno, "stack": traceback.format_stack(task._coro.cr_frame), "task": task if with_task else None, } for task in tasks ] if cancel: for t in tasks: cbs = t._callbacks t.cancel() asyncio.futures.Future.set_exception(t, exc) asyncio.futures.Future.cancel(t) [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures try: t._coro.throw(exc) # exits coro, unless explicitly handled except exc: pass return out
Ui.py
# -*- coding: utf-8 -*- import os import re from cefpython3 import cefpython as cef import base64 import platform import sys import threading import subprocess import queue import re import pickle from .Version import getFullVersion from .Client import Client as _C from .Parser import Parser as _P from . import Defs as _D from . import HTMLRes as _H APP_NAME = _D.APP_NAME _HTML = _H._HTML_code _DEBUG = _H._DEBUG TEAM = _H.TEAM VERSION = getFullVersion() KBINDS = _H.KBINDS _MESSAGE = _H._MESSAGE ico_pdf = _H.ico_pdf ico_failed = _H.ico_failed ico_splash = _H.ico_splash parser = _P() c = _C() #parser.fromTexttoXML('./a/Lin_2004_Rouge.txt') #parser.fromTexttoTXT('./a/Lin_2004_Rouge.txt') #parser.fromPDFtoXML('./a/Lin_2004_Rouge.pdf') #parser.fromPDFtoTXT('./a/Lin_2004_Rouge.pdf') wd = parser.getWD() outF = parser.getOutF() gl = c.ls(wd) files = [] FCNT = len(gl) def html_to_data_uri(html, js_callback=None): html = html.encode("utf-8", "replace") b64 = base64.b64encode(html).decode("utf-8", "replace") ret = "data:text/html;base64,{data}".format(data=b64) if js_callback: pass else: return ret def js_print(browser, lang, event, msg): browser.ExecuteFunction("js_print", lang, event, msg) def file_name(g): f = ''.join(g.split('/')[-1:]) if('_' in f): _f = f.split('_') f0 = _f[0] if len(_f)>0 else '' try: m_year = re.match(r'.*([1-3][0-9]{3})', f) f1 = m_year.group(1) except: f1 = '?' f2 = _f[2] if len(_f)>2 else '' else: if('-' in f): _f = f.split('-') f0 = _f[0] if len(_f)>0 else '' f1 = _f[1] if len(_f)>1 else '' f2 = _f[2] if len(_f)>2 else '' else: f0 = '?' try: m_year = re.match(r'.*([1-2][0-9]{3})', f) f1 = m_year.group(1) except: f1 = '?' f2 = '' return [f, f0, f1, f2] def _make_html(s=""): s = _HTML[0] s += _HTML[1].format(FCNT) s += _HTML[2] if len(gl)<1: s += _HTML[3].format(ico_splash, _MESSAGE[2]) else: for g in gl: g_f = file_name(g) s += _HTML[4].format(g, g_f[0], g_f[1], g_f[2], g_f[3]) s += _HTML[5].format(wd, outF) s += _HTML[6] s += _HTML[7].format(wd, outF) for i in KBINDS: s += _HTML[8].format(i, KBINDS[i]) DO_TAGS = parser.conf.getDoTags() XML_TAGS = parser.conf.getXMLTags() TXT_TAGS = parser.conf.getTXTTags() s += _HTML[9] for tag in DO_TAGS: if tag in XML_TAGS and DO_TAGS[tag]: s += _HTML[10].format(XML_TAGS[tag]) s += _HTML[11] for tag in DO_TAGS: if tag in TXT_TAGS and DO_TAGS[tag]: if(tag != '_HEADER'): s += _HTML[12].format(TXT_TAGS[tag], tag[1:].lower()) else: s += _HTML[13].format(TXT_TAGS[tag].strip()) s += _HTML[14] s += _HTML[15].format(VERSION,'.'.join(['{}'.format(i) for i in sys.version_info]), cef.GetVersion()['chrome_version'], pickle.format_version, cef.GetVersion()['cef_version']) for i in TEAM: s += _HTML[16].format(i) s += _HTML[17] return s def check_versions(): ver = cef.GetVersion() def main(): print("main") check_versions() parser.loadConfig() wd = parser.getWD() if(not _DEBUG): sys.excepthook = cef.ExceptHook settings = { "context_menu": { "enabled": _DEBUG, "navigation": False, "print": False, "view_source": False, "external_browser": False, "devtools": True, }, # "product_version": "MyProduct/10.00", # "user_agent": "MyAgent/20.00 MyProduct/10.00", } cef.Initialize(settings=settings) browser = cef.CreateBrowserSync(url=html_to_data_uri(_make_html()), window_title=APP_NAME) set_client_handlers(browser) set_javascript_bindings(browser) cef.MessageLoop() cef.Shutdown() def set_javascript_bindings(browser): external = External(browser) bindings = cef.JavascriptBindings( bindToFrames=False, bindToPopups=False) bindings.SetProperty("python_property", "This property was set in Python") bindings.SetProperty("cefpython_version", cef.GetVersion()) bindings.SetFunction("html_to_data_uri", html_to_data_uri) bindings.SetFunction("lol", lol) bindings.SetFunction("add_file", add_file) bindings.SetFunction("switch_file", switch_file) bindings.SetFunction("remove_file", remove_file) bindings.SetFunction("parse", _parse) bindings.SetFunction("setWD", setWD) bindings.SetFunction("setOut", setOut) bindings.SetFunction("set", set) bindings.SetFunction("refresh", _refresh) bindings.SetObject("external", external) browser.SetJavascriptBindings(bindings) def add_file(f): if f not in files: files.append(f) def switch_file(f): if f not in files: files.append(f) else: files.remove(f) def remove_file(f): if f in files: files.remove(f) def clear_files(): files.clear() def setWD(w=False): if w != False: files.clear() r = parser.setWD(w) if r:wd = w return r else: return False def setOut(outf=False): if outf != False: return parser.setOut(outf) else: return False def set(js_callback=None, w='', outf=''): #if js_callback: # js_print(browser, "Python", "set", "{} {}".format(wd, outf)) if w != '': if not parser.setWD(w): if js_callback: browser = js_callback.GetFrame().GetBrowser() js_print(browser, "Python", "set", "Invalid path : {}".format(w)) else: if js_callback: _refresh(js_callback) if outf != '': if not parser.setOut(outf): if js_callback: browser = js_callback.GetFrame().GetBrowser() js_print(browser, "Python", "set", "Out folder invalid ({}).".format(outf)) def _refresh(js_callback=None): wd = parser.getWD() gl = c.ls(wd) FCNT = len(gl) clear_files() parser.saveConfig() #parser.loadConfig() if js_callback: browser = js_callback.GetFrame().GetBrowser() file_count(browser, FCNT) html = '<li class="fhead"><div class="tb-e tb1"><span>Convert</span></div><div class="tb-e"><span>Author</span></div><div class="tb-e"><span>Year</span></div><div class="tb-e tb-g"><span>File</span></div></li>' fls_set(browser, html) if FCNT<1: html = """ <div class="splash">{}<span>{}</span></div> """.format(ico_splash, _MESSAGE[2]) fls_add(browser, html) else: for g in gl: g_f = file_name(g) #TODO: PARSE if g in files: html = """<li class="file animated"><div class="tb-e tb1"><input class="checkb" type="checkbox" name="pdfs" value="{0}" onclick="addFile('{0}')" checked></div> <div class="tb-e"><span>{2}</span></div> <div class="tb-e"><span>{3}</span></div> <div class="tb-e tb-g"><span>{1}</span></div> </li>""".format(g, g_f[0], g_f[1], g_f[2], g_f[3]) else: html = """<li class="file animated"><div class="tb-e tb1"><input class="checkb" type="checkbox" name="pdfs" value="{0}" onclick="addFile('{0}')"></div> <div class="tb-e"><span>{2}</span></div> <div class="tb-e"><span>{3}</span></div> <div class="tb-e tb-g"><span>{1}</span></div> </li>""".format(g, g_f[0], g_f[1], g_f[2], g_f[3]) fls_add(browser, html) def _parse(js_callback=None, xml=True): outF = "" if js_callback: html = '<li class="fhead"><div class="tb-e tb1"><span>Status</span></div><div class="tb-e"><span>Name</span></div></li>' browser = js_callback.GetFrame().GetBrowser() fls_set(browser, html) for g in files: q = queue.Queue() try: if xml: t = threading.Thread(target=parser.fromTexttoXML, args=['{}'.format(g), q]) else: t = threading.Thread(target=parser.fromTexttoTXT, args=['{}'.format(g), q]) t.start() outF = q.get() if(outF == ''): html = '<li class="file animated"><div class="tb-e tb1">{}</div><div class="tb-e"><span>{}</span></div></li>'.format(ico_failed, ''.join(outF.split('/')[-1:])) else: html = '<li class="file animated"><div class="tb-e tb1">{}</div><div class="tb-e"><span>{}</span></div></li>'.format(ico_pdf, ''.join(outF.split('/')[-1:])) # js_print(js_callback.GetFrame().GetBrowser(), # "Parser", "file_load", # "> {}".format(g)) except: html = '<li class="file animated"><div class="tb-e tb1">{}</div><div class="tb-e"><span>{}</span></div></li>'.format(ico_failed, ''.join(outF.split('/')[-1:])) args = [browser, html] threading.Timer(0.5, fls_add, args).start() def lol(str, js_callback=None): #subprocess.Popen("gnome-terminal") print(str) def fls_add(browser, html): browser.ExecuteFunction("flsAdd", html); def fls_set(browser, html): browser.ExecuteFunction("flsSet", html); def file_count(browser, val): browser.ExecuteFunction("setCount", val); def js_print(browser, lang, event, msg): browser.ExecuteFunction("js_print", lang, event, msg) def set_client_handlers(browser): # client_handlers = [LoadHandler(), DisplayHandler()] # for handler in client_handlers: # browser.SetClientHandler(handler) pass class LoadHandler(object): def OnLoadingStateChange(self, browser, is_loading, **_): """Called when the loading state has changed.""" if not is_loading: # Loading is complete. DOM is ready. # js_print(browser, "Python", "OnLoadingStateChange", # "Loading is complete") pass class External(object): def __init__(self, browser): self.browser = browser def test_multiple_callbacks(self, js_callback): """Test both javascript and python callbacks.""" # js_print(self.browser, "Python", "test_multiple_callbacks", # "Called from Javascript. Will call Javascript callback now.") pass def py_callback(msg_from_js): js_print(self.browser, "Python", "py_callback", msg_from_js) js_callback.Call("String sent from Python", py_callback)
GrabImage.py
# -- coding: utf-8 -- import sys import threading import msvcrt from ctypes import * sys.path.append("../MvImport") from MvCameraControl_class import * g_bExit = False # 为线程定义一个函数 def work_thread(cam=0, pData=0, nDataSize=0): stFrameInfo = MV_FRAME_OUT_INFO_EX() memset(byref(stFrameInfo), 0, sizeof(stFrameInfo)) while True: ret = cam.MV_CC_GetOneFrameTimeout(pData, nDataSize, stFrameInfo, 1000) if ret == 0: print ("get one frame: Width[%d], Height[%d], nFrameNum[%d]" % (stFrameInfo.nWidth, stFrameInfo.nHeight, stFrameInfo.nFrameNum)) else: print ("no data[0x%x]" % ret) if g_bExit == True: break if __name__ == "__main__": deviceList = MV_CC_DEVICE_INFO_LIST() tlayerType = MV_GIGE_DEVICE | MV_USB_DEVICE # ch:枚举设备 | en:Enum device ret = MvCamera.MV_CC_EnumDevices(tlayerType, deviceList) if ret != 0: print ("enum devices fail! ret[0x%x]" % ret) sys.exit() if deviceList.nDeviceNum == 0: print ("find no device!") sys.exit() print ("Find %d devices!" % deviceList.nDeviceNum) for i in range(0, deviceList.nDeviceNum): mvcc_dev_info = cast(deviceList.pDeviceInfo[i], POINTER(MV_CC_DEVICE_INFO)).contents if mvcc_dev_info.nTLayerType == MV_GIGE_DEVICE: print ("\ngige device: [%d]" % i) strModeName = "" for per in mvcc_dev_info.SpecialInfo.stGigEInfo.chModelName: strModeName = strModeName + chr(per) print ("device model name: %s" % strModeName) nip1 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0xff000000) >> 24) nip2 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x00ff0000) >> 16) nip3 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x0000ff00) >> 8) nip4 = (mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x000000ff) print ("current ip: %d.%d.%d.%d\n" % (nip1, nip2, nip3, nip4)) elif mvcc_dev_info.nTLayerType == MV_USB_DEVICE: print ("\nu3v device: [%d]" % i) strModeName = "" for per in mvcc_dev_info.SpecialInfo.stUsb3VInfo.chModelName: if per == 0: break strModeName = strModeName + chr(per) print ("device model name: %s" % strModeName) strSerialNumber = "" for per in mvcc_dev_info.SpecialInfo.stUsb3VInfo.chSerialNumber: if per == 0: break strSerialNumber = strSerialNumber + chr(per) print ("user serial number: %s" % strSerialNumber) nConnectionNum = input("please input the number of the device to connect:") if int(nConnectionNum) >= deviceList.nDeviceNum: print ("intput error!") sys.exit() # ch:创建相机实例 | en:Creat Camera Object cam = MvCamera() # ch:选择设备并创建句柄 | en:Select device and create handle stDeviceList = cast(deviceList.pDeviceInfo[int(nConnectionNum)], POINTER(MV_CC_DEVICE_INFO)).contents ret = cam.MV_CC_CreateHandle(stDeviceList) if ret != 0: print ("create handle fail! ret[0x%x]" % ret) sys.exit() # ch:打开设备 | en:Open device ret = cam.MV_CC_OpenDevice(MV_ACCESS_Exclusive, 0) if ret != 0: print ("open device fail! ret[0x%x]" % ret) sys.exit() # ch:探测网络最佳包大小(只对GigE相机有效) | en:Detection network optimal package size(It only works for the GigE camera) if stDeviceList.nTLayerType == MV_GIGE_DEVICE: nPacketSize = cam.MV_CC_GetOptimalPacketSize() if int(nPacketSize) > 0: ret = cam.MV_CC_SetIntValue("GevSCPSPacketSize",nPacketSize) if ret != 0: print ("Warning: Set Packet Size fail! ret[0x%x]" % ret) else: print ("Warning: Get Packet Size fail! ret[0x%x]" % nPacketSize) # ch:设置触发模式为off | en:Set trigger mode as off ret = cam.MV_CC_SetEnumValue("TriggerMode", MV_TRIGGER_MODE_OFF) if ret != 0: print ("set trigger mode fail! ret[0x%x]" % ret) sys.exit() # ch:获取数据包大小 | en:Get payload size stParam = MVCC_INTVALUE() memset(byref(stParam), 0, sizeof(MVCC_INTVALUE)) ret = cam.MV_CC_GetIntValue("PayloadSize", stParam) if ret != 0: print ("get payload size fail! ret[0x%x]" % ret) sys.exit() nPayloadSize = stParam.nCurValue # ch:开始取流 | en:Start grab image ret = cam.MV_CC_StartGrabbing() if ret != 0: print ("start grabbing fail! ret[0x%x]" % ret) sys.exit() data_buf = (c_ubyte * nPayloadSize)() try: hThreadHandle = threading.Thread(target=work_thread, args=(cam, byref(data_buf), nPayloadSize)) hThreadHandle.start() except: print ("error: unable to start thread") print ("press a key to stop grabbing.") msvcrt.getch() g_bExit = True hThreadHandle.join() # ch:停止取流 | en:Stop grab image ret = cam.MV_CC_StopGrabbing() if ret != 0: print ("stop grabbing fail! ret[0x%x]" % ret) del data_buf sys.exit() # ch:关闭设备 | Close device ret = cam.MV_CC_CloseDevice() if ret != 0: print ("close deivce fail! ret[0x%x]" % ret) del data_buf sys.exit() # ch:销毁句柄 | Destroy handle ret = cam.MV_CC_DestroyHandle() if ret != 0: print ("destroy handle fail! ret[0x%x]" % ret) del data_buf sys.exit() del data_buf
run-spec-test.py
#!/usr/bin/env python3 # Author: Volodymyr Shymanskyy # Usage: # ./run-spec-test.py # ./run-spec-test.py ./core/i32.json # ./run-spec-test.py ./core/float_exprs.json --line 2070 # ./run-spec-test.py ./proposals/tail-call/*.json # ./run-spec-test.py --exec ../build-custom/wasm3 # ./run-spec-test.py --engine "wasmer run" --exec ../build-wasi/wasm3.wasm # ./run-spec-test.py --engine "wasmer run --backend=llvm" --exec ../build-wasi/wasm3.wasm # # TODO # - Get more tests from: https://github.com/microsoft/ChakraCore/tree/master/test/WasmSpec # - Fix "Empty Stack" check # - Check Canonical NaN and Arithmetic NaN separately # - Fix imports.wast import argparse import os, sys, glob, time import subprocess import json import re import struct import math import pathlib scriptDir = os.path.dirname(os.path.abspath(sys.argv[0])) sys.path.append(os.path.join(scriptDir, '..', 'extra')) from testutils import * from pprint import pprint # # Args handling # parser = argparse.ArgumentParser() parser.add_argument("--exec", metavar="<interpreter>", default="../build/wasm3") parser.add_argument("--engine", metavar="<engine>") parser.add_argument("--timeout", type=int, default=30) parser.add_argument("--line", metavar="<source line>", type=int) parser.add_argument("--all", action="store_true") parser.add_argument("--show-logs", action="store_true") parser.add_argument("--format", choices=["raw", "hex", "fp"], default="fp") parser.add_argument("-v", "--verbose", action="store_true") parser.add_argument("-s", "--silent", action="store_true") parser.add_argument("file", nargs='*') args = parser.parse_args() if args.line: args.show_logs = True # # Utilities # log = open("spec-test.log","w+") log.write("======================\n") def warning(msg): log.write("Warning: " + msg + "\n") log.flush() if args.verbose: print(f"{ansi.WARNING}Warning:{ansi.ENDC} {msg}") def fatal(msg): log.write("Fatal: " + msg + "\n") log.flush() print(f"{ansi.FAIL}Fatal:{ansi.ENDC} {msg}") sys.exit(1) def binaryToFloat(num, t): if t == "f32": return struct.unpack('!f', struct.pack('!L', int(num)))[0] elif t == "f64": return struct.unpack('!d', struct.pack('!Q', int(num)))[0] else: fatal(f"Unknown type '{t}'") def escape(s): c = ord(s) if c < 128 and s.isprintable() and not s in " \n\r\t\\": return s if c <= 0xff: return r'\x{0:02x}'.format(c) elif c <= 0xffff: return r'\u{0:04x}'.format(c) else: return r'\U{0:08x}'.format(c) def escape_str(s): if s == "": return r'\x00' return ''.join(escape(c) for c in s) # # Value format options # def formatValueRaw(num, t): return str(num) def formatValueHex(num, t): if t == "f32" or t == "i32": return "{0:#0{1}x}".format(int(num), 8+2) elif t == "f64" or t == "i64": return "{0:#0{1}x}".format(int(num), 16+2) else: return str(num) def formatValueFloat(num, t): if t == "f32": s = 6 elif t == "f64": s = 10 else: return str(num) result = "{0:.{1}f}".format(binaryToFloat(num, t), s).rstrip('0') if result.endswith('.'): result = result + '0' if len(result) > s*2: result = "{0:.{1}e}".format(binaryToFloat(num, t), s) return result formaters = { 'raw': formatValueRaw, 'hex': formatValueHex, 'fp': formatValueFloat, } formatValue = formaters[args.format] if args.format == "fp": print("When using fp display format, values are compared loosely (some tests may produce false positives)") # # Spec tests preparation # if not (os.path.isdir("./core") and os.path.isdir("./proposals")): from io import BytesIO from zipfile import ZipFile from urllib.request import urlopen officialSpec = "https://github.com/wasm3/wasm-core-testsuite/archive/master.zip" print(f"Downloading {officialSpec}") resp = urlopen(officialSpec) with ZipFile(BytesIO(resp.read())) as zipFile: for zipInfo in zipFile.infolist(): if re.match(r".*-master/.*/.*(\.wasm|\.json)", zipInfo.filename): parts = pathlib.Path(zipInfo.filename).parts newpath = str(pathlib.Path(*parts[1:-1])) newfn = str(pathlib.Path(*parts[-1:])) ensure_path(newpath) newpath = newpath + "/" + newfn zipInfo.filename = newpath zipFile.extract(zipInfo) # # Wasm3 REPL # from subprocess import Popen, STDOUT, PIPE from threading import Thread from queue import Queue, Empty import shlex def get_engine_cmd(engine, exe): if engine: cmd = shlex.split(engine) if "wasirun" in engine or "wasm3" in engine: return cmd + [exe, "--repl"] elif "wasmer" in engine: return cmd + ["--dir=.", exe, "--", "--repl"] elif "wasmtime" in engine: return cmd + ["--dir=.", exe, "--", "--repl"] elif "iwasm" in engine: return cmd + ["--dir=.", exe, "--repl"] elif "wavm" in engine: return cmd + ["--mount-root", ".", exe, "--repl"] # TODO, fix path else: fatal(f"Don't know how to run engine {engine}") else: if exe.endswith(".wasm"): fatal(f"Need engine to execute wasm") return shlex.split(exe) + ["--repl"] class Wasm3(): def __init__(self, exe, engine=None): self.exe = exe self.engine = engine self.p = None self.loaded = None self.timeout = args.timeout self.autorestart = True self.run() def run(self): if self.p: self.terminate() cmd = get_engine_cmd(self.engine, self.exe) #print(f"wasm3: Starting {' '.join(cmd)}") self.q = Queue() self.p = Popen(cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=STDOUT) def _read_output(out, queue): for data in iter(lambda: out.read(1024), b''): queue.put(data) queue.put(None) self.t = Thread(target=_read_output, args=(self.p.stdout, self.q)) self.t.daemon = True self.t.start() try: self._read_until("wasm3> ") except Exception as e: print(f"wasm3: Could not start: {e}") def restart(self): print(f"wasm3: Restarting") for i in range(10): try: self.run() try: if self.loaded: self.load(self.loaded) except Exception as e: pass break except Exception as e: print(f"wasm3: {e} => retry") time.sleep(0.1) def init(self): return self._run_cmd(f":init\n") def version(self): return self._run_cmd(f":version\n") def load(self, fn): self.loaded = None res = self._run_cmd(f":load {fn}\n") self.loaded = fn return res def invoke(self, cmd): return self._run_cmd(" ".join(map(str, cmd)) + "\n") def _run_cmd(self, cmd): if self.autorestart and not self._is_running(): self.restart() self._flush_input() #print(f"wasm3: {cmd.strip()}") self._write(cmd) return self._read_until("wasm3> ") def _read_until(self, token): buff = "" tout = time.time() + self.timeout error = None while time.time() < tout: try: data = self.q.get(timeout=0.1) if data == None: error = "Crashed" break buff = buff + data.decode("utf-8") idx = buff.rfind(token) if idx >= 0: return buff[0:idx].strip() except Empty: pass else: error = "Timeout" self.terminate() raise Exception(error) def _write(self, data): self.p.stdin.write(data.encode("utf-8")) self.p.stdin.flush() def _is_running(self): return self.p and (self.p.poll() == None) def _flush_input(self): while not self.q.empty(): self.q.get() def terminate(self): self.p.stdin.close() self.p.terminate() self.p.wait(timeout=1.0) self.p = None # # Actual test # wasm3 = Wasm3(args.exec, args.engine) print("Version: " + wasm3.version()) blacklist = Blacklist([ "float_exprs.wast:* f32.nonarithmetic_nan_bitpattern*", "imports.wast:*", "names.wast:630 *", # name that starts with '\0' ]) stats = dotdict(total_run=0, skipped=0, failed=0, crashed=0, timeout=0, success=0, missing=0) # Convert some trap names from the original spec trapmap = { "unreachable": "unreachable executed" } def runInvoke(test): test.cmd = [test.action.field] displayArgs = [] for arg in test.action.args: test.cmd.append(arg['value']) displayArgs.append(formatValue(arg['value'], arg['type'])) test_id = f"{test.source} {test.wasm} {test.cmd[0]}({', '.join(test.cmd[1:])})" if test_id in blacklist and not args.all: warning(f"Skipped {test_id} (blacklisted)") stats.skipped += 1 return if args.verbose: print(f"Running {test_id}") stats.total_run += 1 output = "" actual = None actual_val = None force_fail = False try: output = wasm3.invoke(test.cmd) except Exception as e: actual = f"<{e}>" force_fail = True # Parse the actual output if not actual: result = re.findall(r'Result: (.*?)$', "\n" + output + "\n", re.MULTILINE) if len(result) > 0: actual = "result " + result[-1] actual_val = result[0] if not actual: result = re.findall(r'Error: \[trap\] (.*?) \(', "\n" + output + "\n", re.MULTILINE) if len(result) > 0: actual = "trap " + result[-1] if not actual: result = re.findall(r'Error: (.*?)$', "\n" + output + "\n", re.MULTILINE) if len(result) > 0: actual = "error " + result[-1] if not actual: actual = "<No Result>" force_fail = True if actual == "error no operation ()": actual = "<Not Implemented>" stats.missing += 1 force_fail = True elif actual == "<Crashed>": stats.crashed += 1 force_fail = True elif actual == "<Timeout>": stats.timeout += 1 force_fail = True # Prepare the expected result expect = None if "expected" in test: if len(test.expected) == 0: expect = "result <Empty Stack>" elif len(test.expected) == 1: t = test.expected[0]['type'] value = str(test.expected[0]['value']) expect = "result " + value if actual_val != None: if (t == "f32" or t == "f64") and (value == "<Canonical NaN>" or value == "<Arithmetic NaN>"): val = binaryToFloat(actual_val, t) #warning(f"{actual_val} => {val}") if math.isnan(val): actual = "<Some NaN>" expect = "<Some NaN>" else: expect = "result " + formatValue(value, t) actual = "result " + formatValue(actual_val, t) else: warning(f"Test {test.source} specifies multiple results") expect = "result <Multiple>" elif "expected_trap" in test: if test.expected_trap in trapmap: test.expected_trap = trapmap[test.expected_trap] expect = "trap " + str(test.expected_trap) elif "expected_anything" in test: expect = "<Anything>" else: expect = "<Unknown>" def showTestResult(): print(" ----------------------") print(f"Test: {ansi.HEADER}{test_id}{ansi.ENDC}") print(f"Args: {', '.join(displayArgs)}") print(f"Expected: {ansi.OKGREEN}{expect}{ansi.ENDC}") print(f"Actual: {ansi.WARNING}{actual}{ansi.ENDC}") if args.show_logs and len(output): print(f"Log:") print(output) log.write(f"{test.source}\t|\t{test.wasm} {test.action.field}({', '.join(displayArgs)})\t=>\t\t") if actual == expect or (expect == "<Anything>" and not force_fail): stats.success += 1 log.write(f"OK: {actual}\n") if args.line: showTestResult() else: stats.failed += 1 log.write(f"FAIL: {actual}, should be: {expect}\n") if args.silent: return showTestResult() #sys.exit(1) if args.file: jsonFiles = args.file else: jsonFiles = glob.glob(os.path.join(".", "core", "*.json")) #jsonFiles = list(map(lambda x: os.path.relpath(x, curDir), jsonFiles)) jsonFiles.sort() for fn in jsonFiles: with open(fn) as f: data = json.load(f) wast_source = filename(data["source_filename"]) wasm_module = "" print(f"Running {fn}") wasm3.init() for cmd in data["commands"]: test = dotdict() test.line = int(cmd["line"]) test.source = wast_source + ":" + str(test.line) test.wasm = wasm_module test.type = cmd["type"] if test.type == "module": wasm_module = cmd["filename"] if args.verbose: print(f"Loading {wasm_module}") try: wasm_fn = os.path.join(pathname(fn), wasm_module) wasm3.load(wasm_fn) except Exception as e: pass #fatal(str(e)) elif ( test.type == "action" or test.type == "assert_return" or test.type == "assert_trap" or test.type == "assert_exhaustion" or test.type == "assert_return_canonical_nan" or test.type == "assert_return_arithmetic_nan"): if args.line and test.line != args.line: continue if test.type == "action": test.expected_anything = True elif test.type == "assert_return": test.expected = cmd["expected"] elif test.type == "assert_return_canonical_nan": test.expected = cmd["expected"] test.expected[0]["value"] = "<Canonical NaN>" elif test.type == "assert_return_arithmetic_nan": test.expected = cmd["expected"] test.expected[0]["value"] = "<Arithmetic NaN>" elif test.type == "assert_trap": test.expected_trap = cmd["text"] elif test.type == "assert_exhaustion": test.expected_trap = "stack overflow" else: stats.skipped += 1 warning(f"Skipped {test.source} ({test.type} not implemented)") continue test.action = dotdict(cmd["action"]) if test.action.type == "invoke": # TODO: invoking in modules not implemented if test.action.module: stats.skipped += 1 warning(f"Skipped {test.source} (invoke in module)") continue test.action.field = escape_str(test.action.field) runInvoke(test) else: stats.skipped += 1 warning(f"Skipped {test.source} (unknown action type '{test.action.type}')") # These are irrelevant elif (test.type == "assert_invalid" or test.type == "assert_malformed" or test.type == "assert_uninstantiable"): pass # Others - report as skipped else: stats.skipped += 1 warning(f"Skipped {test.source} ('{test.type}' not implemented)") if (stats.failed + stats.success) != stats.total_run: warning("Statistics summary invalid") pprint(stats) if stats.failed > 0: failed = (stats.failed*100)/stats.total_run print(f"{ansi.FAIL}=======================") print(f" FAILED: {failed:.2f}%") if stats.crashed > 0: print(f" Crashed: {stats.crashed}") print(f"======================={ansi.ENDC}") sys.exit(1) elif stats.success > 0: print(f"{ansi.OKGREEN}=======================") print(f" {stats.success}/{stats.total_run} tests OK") if stats.skipped > 0: print(f"{ansi.WARNING} ({stats.skipped} tests skipped){ansi.OKGREEN}") print(f"======================={ansi.ENDC}")
pyshell.py
#! /usr/bin/env python3 import sys if __name__ == "__main__": sys.modules['idlelib.pyshell'] = sys.modules['__main__'] try: from tkinter import * except ImportError: print("** IDLE can't import Tkinter.\n" "Your Python may not be configured for Tk. **", file=sys.__stderr__) raise SystemExit(1) # Valid arguments for the ...Awareness call below are defined in the following. # https://msdn.microsoft.com/en-us/library/windows/desktop/dn280512(v=vs.85).aspx if sys.platform == 'win32': try: import ctypes PROCESS_SYSTEM_DPI_AWARE = 1 # Int required. ctypes.OleDLL('shcore').SetProcessDpiAwareness(PROCESS_SYSTEM_DPI_AWARE) except (ImportError, AttributeError, OSError): pass from tkinter import messagebox from code import InteractiveInterpreter import itertools import linecache import os import os.path from platform import python_version import re import socket import subprocess from textwrap import TextWrapper import threading import time import tokenize import warnings from idlelib.colorizer import ColorDelegator from idlelib.config import idleConf from idlelib.delegator import Delegator from idlelib import debugger from idlelib import debugger_r from idlelib.editor import EditorWindow, fixwordbreaks from idlelib.filelist import FileList from idlelib.outwin import OutputWindow from idlelib import replace from idlelib import rpc from idlelib.run import idle_formatwarning, StdInputFile, StdOutputFile from idlelib.undo import UndoDelegator # Default for testing; defaults to True in main() for running. use_subprocess = False HOST = '127.0.0.1' # python execution server on localhost loopback PORT = 0 # someday pass in host, port for remote debug capability try: # In case IDLE started with -n. eof = 'Ctrl-D (end-of-file)' exit.eof = eof quit.eof = eof except NameError: # In case python started with -S. pass # Override warnings module to write to warning_stream. Initialize to send IDLE # internal warnings to the console. ScriptBinding.check_syntax() will # temporarily redirect the stream to the shell window to display warnings when # checking user's code. warning_stream = sys.__stderr__ # None, at least on Windows, if no console. def idle_showwarning( message, category, filename, lineno, file=None, line=None): """Show Idle-format warning (after replacing warnings.showwarning). The differences are the formatter called, the file=None replacement, which can be None, the capture of the consequence AttributeError, and the output of a hard-coded prompt. """ if file is None: file = warning_stream try: file.write(idle_formatwarning( message, category, filename, lineno, line=line)) file.write(">>> ") except (AttributeError, OSError): pass # if file (probably __stderr__) is invalid, skip warning. _warnings_showwarning = None def capture_warnings(capture): "Replace warning.showwarning with idle_showwarning, or reverse." global _warnings_showwarning if capture: if _warnings_showwarning is None: _warnings_showwarning = warnings.showwarning warnings.showwarning = idle_showwarning else: if _warnings_showwarning is not None: warnings.showwarning = _warnings_showwarning _warnings_showwarning = None capture_warnings(True) def extended_linecache_checkcache(filename=None, orig_checkcache=linecache.checkcache): """Extend linecache.checkcache to preserve the <pyshell#...> entries Rather than repeating the linecache code, patch it to save the <pyshell#...> entries, call the original linecache.checkcache() (skipping them), and then restore the saved entries. orig_checkcache is bound at definition time to the original method, allowing it to be patched. """ cache = linecache.cache save = {} for key in list(cache): if key[:1] + key[-1:] == '<>': save[key] = cache.pop(key) orig_checkcache(filename) cache.update(save) # Patch linecache.checkcache(): linecache.checkcache = extended_linecache_checkcache class PyShellEditorWindow(EditorWindow): "Regular text edit window in IDLE, supports breakpoints" def __init__(self, *args): self.breakpoints = [] EditorWindow.__init__(self, *args) self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here) self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here) self.text.bind("<<open-python-shell>>", self.flist.open_shell) #TODO: don't read/write this from/to .idlerc when testing self.breakpointPath = os.path.join( idleConf.userdir, 'breakpoints.lst') # whenever a file is changed, restore breakpoints def filename_changed_hook(old_hook=self.io.filename_change_hook, self=self): self.restore_file_breaks() old_hook() self.io.set_filename_change_hook(filename_changed_hook) if self.io.filename: self.restore_file_breaks() self.color_breakpoint_text() rmenu_specs = [ ("Cut", "<<cut>>", "rmenu_check_cut"), ("Copy", "<<copy>>", "rmenu_check_copy"), ("Paste", "<<paste>>", "rmenu_check_paste"), (None, None, None), ("Set Breakpoint", "<<set-breakpoint-here>>", None), ("Clear Breakpoint", "<<clear-breakpoint-here>>", None) ] def color_breakpoint_text(self, color=True): "Turn colorizing of breakpoint text on or off" if self.io is None: # possible due to update in restore_file_breaks return if color: theme = idleConf.CurrentTheme() cfg = idleConf.GetHighlight(theme, "break") else: cfg = {'foreground': '', 'background': ''} self.text.tag_config('BREAK', cfg) def set_breakpoint(self, lineno): text = self.text filename = self.io.filename text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1)) try: self.breakpoints.index(lineno) except ValueError: # only add if missing, i.e. do once self.breakpoints.append(lineno) try: # update the subprocess debugger debug = self.flist.pyshell.interp.debugger debug.set_breakpoint_here(filename, lineno) except: # but debugger may not be active right now.... pass def set_breakpoint_here(self, event=None): text = self.text filename = self.io.filename if not filename: text.bell() return lineno = int(float(text.index("insert"))) self.set_breakpoint(lineno) def clear_breakpoint_here(self, event=None): text = self.text filename = self.io.filename if not filename: text.bell() return lineno = int(float(text.index("insert"))) try: self.breakpoints.remove(lineno) except: pass text.tag_remove("BREAK", "insert linestart",\ "insert lineend +1char") try: debug = self.flist.pyshell.interp.debugger debug.clear_breakpoint_here(filename, lineno) except: pass def clear_file_breaks(self): if self.breakpoints: text = self.text filename = self.io.filename if not filename: text.bell() return self.breakpoints = [] text.tag_remove("BREAK", "1.0", END) try: debug = self.flist.pyshell.interp.debugger debug.clear_file_breaks(filename) except: pass def store_file_breaks(self): "Save breakpoints when file is saved" # XXX 13 Dec 2002 KBK Currently the file must be saved before it can # be run. The breaks are saved at that time. If we introduce # a temporary file save feature the save breaks functionality # needs to be re-verified, since the breaks at the time the # temp file is created may differ from the breaks at the last # permanent save of the file. Currently, a break introduced # after a save will be effective, but not persistent. # This is necessary to keep the saved breaks synched with the # saved file. # # Breakpoints are set as tagged ranges in the text. # Since a modified file has to be saved before it is # run, and since self.breakpoints (from which the subprocess # debugger is loaded) is updated during the save, the visible # breaks stay synched with the subprocess even if one of these # unexpected breakpoint deletions occurs. breaks = self.breakpoints filename = self.io.filename try: with open(self.breakpointPath, "r") as fp: lines = fp.readlines() except OSError: lines = [] try: with open(self.breakpointPath, "w") as new_file: for line in lines: if not line.startswith(filename + '='): new_file.write(line) self.update_breakpoints() breaks = self.breakpoints if breaks: new_file.write(filename + '=' + str(breaks) + '\n') except OSError as err: if not getattr(self.root, "breakpoint_error_displayed", False): self.root.breakpoint_error_displayed = True messagebox.showerror(title='IDLE Error', message='Unable to update breakpoint list:\n%s' % str(err), parent=self.text) def restore_file_breaks(self): self.text.update() # this enables setting "BREAK" tags to be visible if self.io is None: # can happen if IDLE closes due to the .update() call return filename = self.io.filename if filename is None: return if os.path.isfile(self.breakpointPath): with open(self.breakpointPath, "r") as fp: lines = fp.readlines() for line in lines: if line.startswith(filename + '='): breakpoint_linenumbers = eval(line[len(filename)+1:]) for breakpoint_linenumber in breakpoint_linenumbers: self.set_breakpoint(breakpoint_linenumber) def update_breakpoints(self): "Retrieves all the breakpoints in the current window" text = self.text ranges = text.tag_ranges("BREAK") linenumber_list = self.ranges_to_linenumbers(ranges) self.breakpoints = linenumber_list def ranges_to_linenumbers(self, ranges): lines = [] for index in range(0, len(ranges), 2): lineno = int(float(ranges[index].string)) end = int(float(ranges[index+1].string)) while lineno < end: lines.append(lineno) lineno += 1 return lines # XXX 13 Dec 2002 KBK Not used currently # def saved_change_hook(self): # "Extend base method - clear breaks if module is modified" # if not self.get_saved(): # self.clear_file_breaks() # EditorWindow.saved_change_hook(self) def _close(self): "Extend base method - clear breaks when module is closed" self.clear_file_breaks() EditorWindow._close(self) class PyShellFileList(FileList): "Extend base class: IDLE supports a shell and breakpoints" # override FileList's class variable, instances return PyShellEditorWindow # instead of EditorWindow when new edit windows are created. EditorWindow = PyShellEditorWindow pyshell = None def open_shell(self, event=None): if self.pyshell: self.pyshell.top.wakeup() else: self.pyshell = PyShell(self) if self.pyshell: if not self.pyshell.begin(): return None return self.pyshell class ModifiedColorDelegator(ColorDelegator): "Extend base class: colorizer for the shell window itself" def recolorize_main(self): self.tag_remove("TODO", "1.0", "iomark") self.tag_add("SYNC", "1.0", "iomark") ColorDelegator.recolorize_main(self) def removecolors(self): # Don't remove shell color tags before "iomark" for tag in self.tagdefs: self.tag_remove(tag, "iomark", "end") class ModifiedUndoDelegator(UndoDelegator): "Extend base class: forbid insert/delete before the I/O mark" def insert(self, index, chars, tags=None): try: if self.delegate.compare(index, "<", "iomark"): self.delegate.bell() return except TclError: pass UndoDelegator.insert(self, index, chars, tags) def delete(self, index1, index2=None): try: if self.delegate.compare(index1, "<", "iomark"): self.delegate.bell() return except TclError: pass UndoDelegator.delete(self, index1, index2) def undo_event(self, event): # Temporarily monkey-patch the delegate's .insert() method to # always use the "stdin" tag. This is needed for undo-ing # deletions to preserve the "stdin" tag, because UndoDelegator # doesn't preserve tags for deleted text. orig_insert = self.delegate.insert self.delegate.insert = \ lambda index, chars: orig_insert(index, chars, "stdin") try: super().undo_event(event) finally: self.delegate.insert = orig_insert class UserInputTaggingDelegator(Delegator): """Delegator used to tag user input with "stdin".""" def insert(self, index, chars, tags=None): if tags is None: tags = "stdin" self.delegate.insert(index, chars, tags) class MyRPCClient(rpc.RPCClient): def handle_EOF(self): "Override the base class - just re-raise EOFError" raise EOFError def restart_line(width, filename): # See bpo-38141. """Return width long restart line formatted with filename. Fill line with balanced '='s, with any extras and at least one at the beginning. Do not end with a trailing space. """ tag = f"= RESTART: {filename or 'Shell'} =" if width >= len(tag): div, mod = divmod((width -len(tag)), 2) return f"{(div+mod)*'='}{tag}{div*'='}" else: return tag[:-2] # Remove ' ='. class ModifiedInterpreter(InteractiveInterpreter): def __init__(self, tkconsole): self.tkconsole = tkconsole locals = sys.modules['__main__'].__dict__ InteractiveInterpreter.__init__(self, locals=locals) self.restarting = False self.subprocess_arglist = None self.port = PORT self.original_compiler_flags = self.compile.compiler.flags _afterid = None rpcclt = None rpcsubproc = None def spawn_subprocess(self): if self.subprocess_arglist is None: self.subprocess_arglist = self.build_subprocess_arglist() self.rpcsubproc = subprocess.Popen(self.subprocess_arglist) def build_subprocess_arglist(self): assert (self.port!=0), ( "Socket should have been assigned a port number.") w = ['-W' + s for s in sys.warnoptions] # Maybe IDLE is installed and is being accessed via sys.path, # or maybe it's not installed and the idle.py script is being # run from the IDLE source directory. del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc', default=False, type='bool') command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,) return [sys.executable] + w + ["-c", command, str(self.port)] def start_subprocess(self): addr = (HOST, self.port) # GUI makes several attempts to acquire socket, listens for connection for i in range(3): time.sleep(i) try: self.rpcclt = MyRPCClient(addr) break except OSError: pass else: self.display_port_binding_error() return None # if PORT was 0, system will assign an 'ephemeral' port. Find it out: self.port = self.rpcclt.listening_sock.getsockname()[1] # if PORT was not 0, probably working with a remote execution server if PORT != 0: # To allow reconnection within the 2MSL wait (cf. Stevens TCP # V1, 18.6), set SO_REUSEADDR. Note that this can be problematic # on Windows since the implementation allows two active sockets on # the same address! self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.spawn_subprocess() #time.sleep(20) # test to simulate GUI not accepting connection # Accept the connection from the Python execution server self.rpcclt.listening_sock.settimeout(10) try: self.rpcclt.accept() except TimeoutError: self.display_no_subprocess_error() return None self.rpcclt.register("console", self.tkconsole) self.rpcclt.register("stdin", self.tkconsole.stdin) self.rpcclt.register("stdout", self.tkconsole.stdout) self.rpcclt.register("stderr", self.tkconsole.stderr) self.rpcclt.register("flist", self.tkconsole.flist) self.rpcclt.register("linecache", linecache) self.rpcclt.register("interp", self) self.transfer_path(with_cwd=True) self.poll_subprocess() return self.rpcclt def restart_subprocess(self, with_cwd=False, filename=''): if self.restarting: return self.rpcclt self.restarting = True # close only the subprocess debugger debug = self.getdebugger() if debug: try: # Only close subprocess debugger, don't unregister gui_adap! debugger_r.close_subprocess_debugger(self.rpcclt) except: pass # Kill subprocess, spawn a new one, accept connection. self.rpcclt.close() self.terminate_subprocess() console = self.tkconsole was_executing = console.executing console.executing = False self.spawn_subprocess() try: self.rpcclt.accept() except TimeoutError: self.display_no_subprocess_error() return None self.transfer_path(with_cwd=with_cwd) console.stop_readline() # annotate restart in shell window and mark it console.text.delete("iomark", "end-1c") console.write('\n') console.write(restart_line(console.width, filename)) console.text.mark_set("restart", "end-1c") console.text.mark_gravity("restart", "left") if not filename: console.showprompt() # restart subprocess debugger if debug: # Restarted debugger connects to current instance of debug GUI debugger_r.restart_subprocess_debugger(self.rpcclt) # reload remote debugger breakpoints for all PyShellEditWindows debug.load_breakpoints() self.compile.compiler.flags = self.original_compiler_flags self.restarting = False return self.rpcclt def __request_interrupt(self): self.rpcclt.remotecall("exec", "interrupt_the_server", (), {}) def interrupt_subprocess(self): threading.Thread(target=self.__request_interrupt).start() def kill_subprocess(self): if self._afterid is not None: self.tkconsole.text.after_cancel(self._afterid) try: self.rpcclt.listening_sock.close() except AttributeError: # no socket pass try: self.rpcclt.close() except AttributeError: # no socket pass self.terminate_subprocess() self.tkconsole.executing = False self.rpcclt = None def terminate_subprocess(self): "Make sure subprocess is terminated" try: self.rpcsubproc.kill() except OSError: # process already terminated return else: try: self.rpcsubproc.wait() except OSError: return def transfer_path(self, with_cwd=False): if with_cwd: # Issue 13506 path = [''] # include Current Working Directory path.extend(sys.path) else: path = sys.path self.runcommand("""if 1: import sys as _sys _sys.path = %r del _sys \n""" % (path,)) active_seq = None def poll_subprocess(self): clt = self.rpcclt if clt is None: return try: response = clt.pollresponse(self.active_seq, wait=0.05) except (EOFError, OSError, KeyboardInterrupt): # lost connection or subprocess terminated itself, restart # [the KBI is from rpc.SocketIO.handle_EOF()] if self.tkconsole.closing: return response = None self.restart_subprocess() if response: self.tkconsole.resetoutput() self.active_seq = None how, what = response console = self.tkconsole.console if how == "OK": if what is not None: print(repr(what), file=console) elif how == "EXCEPTION": if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"): self.remote_stack_viewer() elif how == "ERROR": errmsg = "pyshell.ModifiedInterpreter: Subprocess ERROR:\n" print(errmsg, what, file=sys.__stderr__) print(errmsg, what, file=console) # we received a response to the currently active seq number: try: self.tkconsole.endexecuting() except AttributeError: # shell may have closed pass # Reschedule myself if not self.tkconsole.closing: self._afterid = self.tkconsole.text.after( self.tkconsole.pollinterval, self.poll_subprocess) debugger = None def setdebugger(self, debugger): self.debugger = debugger def getdebugger(self): return self.debugger def open_remote_stack_viewer(self): """Initiate the remote stack viewer from a separate thread. This method is called from the subprocess, and by returning from this method we allow the subprocess to unblock. After a bit the shell requests the subprocess to open the remote stack viewer which returns a static object looking at the last exception. It is queried through the RPC mechanism. """ self.tkconsole.text.after(300, self.remote_stack_viewer) return def remote_stack_viewer(self): from idlelib import debugobj_r oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {}) if oid is None: self.tkconsole.root.bell() return item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid) from idlelib.tree import ScrolledCanvas, TreeNode top = Toplevel(self.tkconsole.root) theme = idleConf.CurrentTheme() background = idleConf.GetHighlight(theme, 'normal')['background'] sc = ScrolledCanvas(top, bg=background, highlightthickness=0) sc.frame.pack(expand=1, fill="both") node = TreeNode(sc.canvas, None, item) node.expand() # XXX Should GC the remote tree when closing the window gid = 0 def execsource(self, source): "Like runsource() but assumes complete exec source" filename = self.stuffsource(source) self.execfile(filename, source) def execfile(self, filename, source=None): "Execute an existing file" if source is None: with tokenize.open(filename) as fp: source = fp.read() if use_subprocess: source = (f"__file__ = r'''{os.path.abspath(filename)}'''\n" + source + "\ndel __file__") try: code = compile(source, filename, "exec") except (OverflowError, SyntaxError): self.tkconsole.resetoutput() print('*** Error in script or command!\n' 'Traceback (most recent call last):', file=self.tkconsole.stderr) InteractiveInterpreter.showsyntaxerror(self, filename) self.tkconsole.showprompt() else: self.runcode(code) def runsource(self, source): "Extend base class method: Stuff the source in the line cache first" filename = self.stuffsource(source) # at the moment, InteractiveInterpreter expects str assert isinstance(source, str) # InteractiveInterpreter.runsource() calls its runcode() method, # which is overridden (see below) return InteractiveInterpreter.runsource(self, source, filename) def stuffsource(self, source): "Stuff source in the filename cache" filename = "<pyshell#%d>" % self.gid self.gid = self.gid + 1 lines = source.split("\n") linecache.cache[filename] = len(source)+1, 0, lines, filename return filename def prepend_syspath(self, filename): "Prepend sys.path with file's directory if not already included" self.runcommand("""if 1: _filename = %r import sys as _sys from os.path import dirname as _dirname _dir = _dirname(_filename) if not _dir in _sys.path: _sys.path.insert(0, _dir) del _filename, _sys, _dirname, _dir \n""" % (filename,)) def showsyntaxerror(self, filename=None): """Override Interactive Interpreter method: Use Colorizing Color the offending position instead of printing it and pointing at it with a caret. """ tkconsole = self.tkconsole text = tkconsole.text text.tag_remove("ERROR", "1.0", "end") type, value, tb = sys.exc_info() msg = getattr(value, 'msg', '') or value or "<no detail available>" lineno = getattr(value, 'lineno', '') or 1 offset = getattr(value, 'offset', '') or 0 if offset == 0: lineno += 1 #mark end of offending line if lineno == 1: pos = "iomark + %d chars" % (offset-1) else: pos = "iomark linestart + %d lines + %d chars" % \ (lineno-1, offset-1) tkconsole.colorize_syntax_error(text, pos) tkconsole.resetoutput() self.write("SyntaxError: %s\n" % msg) tkconsole.showprompt() def showtraceback(self): "Extend base class method to reset output properly" self.tkconsole.resetoutput() self.checklinecache() InteractiveInterpreter.showtraceback(self) if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"): self.tkconsole.open_stack_viewer() def checklinecache(self): c = linecache.cache for key in list(c.keys()): if key[:1] + key[-1:] != "<>": del c[key] def runcommand(self, code): "Run the code without invoking the debugger" # The code better not raise an exception! if self.tkconsole.executing: self.display_executing_dialog() return 0 if self.rpcclt: self.rpcclt.remotequeue("exec", "runcode", (code,), {}) else: exec(code, self.locals) return 1 def runcode(self, code): "Override base class method" if self.tkconsole.executing: self.restart_subprocess() self.checklinecache() debugger = self.debugger try: self.tkconsole.beginexecuting() if not debugger and self.rpcclt is not None: self.active_seq = self.rpcclt.asyncqueue("exec", "runcode", (code,), {}) elif debugger: debugger.run(code, self.locals) else: exec(code, self.locals) except SystemExit: if not self.tkconsole.closing: if messagebox.askyesno( "Exit?", "Do you want to exit altogether?", default="yes", parent=self.tkconsole.text): raise else: self.showtraceback() else: raise except: if use_subprocess: print("IDLE internal error in runcode()", file=self.tkconsole.stderr) self.showtraceback() self.tkconsole.endexecuting() else: if self.tkconsole.canceled: self.tkconsole.canceled = False print("KeyboardInterrupt", file=self.tkconsole.stderr) else: self.showtraceback() finally: if not use_subprocess: try: self.tkconsole.endexecuting() except AttributeError: # shell may have closed pass def write(self, s): "Override base class method" return self.tkconsole.stderr.write(s) def display_port_binding_error(self): messagebox.showerror( "Port Binding Error", "IDLE can't bind to a TCP/IP port, which is necessary to " "communicate with its Python execution server. This might be " "because no networking is installed on this computer. " "Run IDLE with the -n command line switch to start without a " "subprocess and refer to Help/IDLE Help 'Running without a " "subprocess' for further details.", parent=self.tkconsole.text) def display_no_subprocess_error(self): messagebox.showerror( "Subprocess Connection Error", "IDLE's subprocess didn't make connection.\n" "See the 'Startup failure' section of the IDLE doc, online at\n" "https://docs.python.org/3/library/idle.html#startup-failure", parent=self.tkconsole.text) def display_executing_dialog(self): messagebox.showerror( "Already executing", "The Python Shell window is already executing a command; " "please wait until it is finished.", parent=self.tkconsole.text) class PyShell(OutputWindow): from idlelib.squeezer import Squeezer shell_title = "IDLE Shell " + python_version() # Override classes ColorDelegator = ModifiedColorDelegator UndoDelegator = ModifiedUndoDelegator # Override menus menu_specs = [ ("file", "_File"), ("edit", "_Edit"), ("debug", "_Debug"), ("options", "_Options"), ("window", "_Window"), ("help", "_Help"), ] # Extend right-click context menu rmenu_specs = OutputWindow.rmenu_specs + [ ("Squeeze", "<<squeeze-current-text>>"), ] _idx = 1 + len(list(itertools.takewhile( lambda rmenu_item: rmenu_item[0] != "Copy", rmenu_specs) )) rmenu_specs.insert(_idx, ("Copy with prompts", "<<copy-with-prompts>>", "rmenu_check_copy")) del _idx allow_line_numbers = False user_input_insert_tags = "stdin" # New classes from idlelib.history import History from idlelib.sidebar import ShellSidebar def __init__(self, flist=None): if use_subprocess: ms = self.menu_specs if ms[2][0] != "shell": ms.insert(2, ("shell", "She_ll")) self.interp = ModifiedInterpreter(self) if flist is None: root = Tk() fixwordbreaks(root) root.withdraw() flist = PyShellFileList(root) self.shell_sidebar = None # initialized below OutputWindow.__init__(self, flist, None, None) self.usetabs = False # indentwidth must be 8 when using tabs. See note in EditorWindow: self.indentwidth = 4 self.sys_ps1 = sys.ps1 if hasattr(sys, 'ps1') else '>>>\n' self.prompt_last_line = self.sys_ps1.split('\n')[-1] self.prompt = self.sys_ps1 # Changes when debug active text = self.text text.configure(wrap="char") text.bind("<<newline-and-indent>>", self.enter_callback) text.bind("<<plain-newline-and-indent>>", self.linefeed_callback) text.bind("<<interrupt-execution>>", self.cancel_callback) text.bind("<<end-of-file>>", self.eof_callback) text.bind("<<open-stack-viewer>>", self.open_stack_viewer) text.bind("<<toggle-debugger>>", self.toggle_debugger) text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer) text.bind("<<copy-with-prompts>>", self.copy_with_prompts_callback) if use_subprocess: text.bind("<<view-restart>>", self.view_restart_mark) text.bind("<<restart-shell>>", self.restart_shell) self.squeezer = self.Squeezer(self) text.bind("<<squeeze-current-text>>", self.squeeze_current_text_event) self.save_stdout = sys.stdout self.save_stderr = sys.stderr self.save_stdin = sys.stdin from idlelib import iomenu self.stdin = StdInputFile(self, "stdin", iomenu.encoding, iomenu.errors) self.stdout = StdOutputFile(self, "stdout", iomenu.encoding, iomenu.errors) self.stderr = StdOutputFile(self, "stderr", iomenu.encoding, "backslashreplace") self.console = StdOutputFile(self, "console", iomenu.encoding, iomenu.errors) if not use_subprocess: sys.stdout = self.stdout sys.stderr = self.stderr sys.stdin = self.stdin try: # page help() text to shell. import pydoc # import must be done here to capture i/o rebinding. # XXX KBK 27Dec07 use text viewer someday, but must work w/o subproc pydoc.pager = pydoc.plainpager except: sys.stderr = sys.__stderr__ raise # self.history = self.History(self.text) # self.pollinterval = 50 # millisec self.shell_sidebar = self.ShellSidebar(self) # Insert UserInputTaggingDelegator at the top of the percolator, # but make calls to text.insert() skip it. This causes only insert # events generated in Tcl/Tk to go through this delegator. self.text.insert = self.per.top.insert self.per.insertfilter(UserInputTaggingDelegator()) def ResetFont(self): super().ResetFont() if self.shell_sidebar is not None: self.shell_sidebar.update_font() def ResetColorizer(self): super().ResetColorizer() theme = idleConf.CurrentTheme() tag_colors = { "stdin": {'background': None, 'foreground': None}, "stdout": idleConf.GetHighlight(theme, "stdout"), "stderr": idleConf.GetHighlight(theme, "stderr"), "console": idleConf.GetHighlight(theme, "normal"), } for tag, tag_colors_config in tag_colors.items(): self.text.tag_configure(tag, **tag_colors_config) if self.shell_sidebar is not None: self.shell_sidebar.update_colors() def replace_event(self, event): replace.replace(self.text, insert_tags="stdin") return "break" def get_standard_extension_names(self): return idleConf.GetExtensions(shell_only=True) def copy_with_prompts_callback(self, event=None): """Copy selected lines to the clipboard, with prompts. This makes the copied text useful for doc-tests and interactive shell code examples. This always copies entire lines, even if only part of the first and/or last lines is selected. """ text = self.text selection_indexes = ( self.text.index("sel.first linestart"), self.text.index("sel.last +1line linestart"), ) if selection_indexes[0] is None: # There is no selection, so do nothing. return selected_text = self.text.get(*selection_indexes) selection_lineno_range = range( int(float(selection_indexes[0])), int(float(selection_indexes[1])) ) prompts = [ self.shell_sidebar.line_prompts.get(lineno) for lineno in selection_lineno_range ] selected_text_with_prompts = "\n".join( line if prompt is None else f"{prompt} {line}" for prompt, line in zip(prompts, selected_text.splitlines()) ) + "\n" text.clipboard_clear() text.clipboard_append(selected_text_with_prompts) reading = False executing = False canceled = False endoffile = False closing = False _stop_readline_flag = False def set_warning_stream(self, stream): global warning_stream warning_stream = stream def get_warning_stream(self): return warning_stream def toggle_debugger(self, event=None): if self.executing: messagebox.showerror("Don't debug now", "You can only toggle the debugger when idle", parent=self.text) self.set_debugger_indicator() return "break" else: db = self.interp.getdebugger() if db: self.close_debugger() else: self.open_debugger() def set_debugger_indicator(self): db = self.interp.getdebugger() self.setvar("<<toggle-debugger>>", not not db) def toggle_jit_stack_viewer(self, event=None): pass # All we need is the variable def close_debugger(self): db = self.interp.getdebugger() if db: self.interp.setdebugger(None) db.close() if self.interp.rpcclt: debugger_r.close_remote_debugger(self.interp.rpcclt) self.resetoutput() self.console.write("[DEBUG OFF]\n") self.prompt = self.sys_ps1 self.showprompt() self.set_debugger_indicator() def open_debugger(self): if self.interp.rpcclt: dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt, self) else: dbg_gui = debugger.Debugger(self) self.interp.setdebugger(dbg_gui) dbg_gui.load_breakpoints() self.prompt = "[DEBUG ON]\n" + self.sys_ps1 self.showprompt() self.set_debugger_indicator() def debug_menu_postcommand(self): state = 'disabled' if self.executing else 'normal' self.update_menu_state('debug', '*tack*iewer', state) def beginexecuting(self): "Helper for ModifiedInterpreter" self.resetoutput() self.executing = True def endexecuting(self): "Helper for ModifiedInterpreter" self.executing = False self.canceled = False self.showprompt() def close(self): "Extend EditorWindow.close()" if self.executing: response = messagebox.askokcancel( "Kill?", "Your program is still running!\n Do you want to kill it?", default="ok", parent=self.text) if response is False: return "cancel" self.stop_readline() self.canceled = True self.closing = True return EditorWindow.close(self) def _close(self): "Extend EditorWindow._close(), shut down debugger and execution server" self.close_debugger() if use_subprocess: self.interp.kill_subprocess() # Restore std streams sys.stdout = self.save_stdout sys.stderr = self.save_stderr sys.stdin = self.save_stdin # Break cycles self.interp = None self.console = None self.flist.pyshell = None self.history = None EditorWindow._close(self) def ispythonsource(self, filename): "Override EditorWindow method: never remove the colorizer" return True def short_title(self): return self.shell_title COPYRIGHT = \ 'Type "help", "copyright", "credits" or "license()" for more information.' def begin(self): self.text.mark_set("iomark", "insert") self.resetoutput() if use_subprocess: nosub = '' client = self.interp.start_subprocess() if not client: self.close() return False else: nosub = ("==== No Subprocess ====\n\n" + "WARNING: Running IDLE without a Subprocess is deprecated\n" + "and will be removed in a later version. See Help/IDLE Help\n" + "for details.\n\n") sys.displayhook = rpc.displayhook self.write("Python %s on %s\n%s\n%s" % (sys.version, sys.platform, self.COPYRIGHT, nosub)) self.text.focus_force() self.showprompt() # User code should use separate default Tk root window import tkinter tkinter._support_default_root = True tkinter._default_root = None return True def stop_readline(self): if not self.reading: # no nested mainloop to exit. return self._stop_readline_flag = True self.top.quit() def readline(self): save = self.reading try: self.reading = True self.top.mainloop() # nested mainloop() finally: self.reading = save if self._stop_readline_flag: self._stop_readline_flag = False return "" line = self.text.get("iomark", "end-1c") if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C line = "\n" self.resetoutput() if self.canceled: self.canceled = False if not use_subprocess: raise KeyboardInterrupt if self.endoffile: self.endoffile = False line = "" return line def isatty(self): return True def cancel_callback(self, event=None): try: if self.text.compare("sel.first", "!=", "sel.last"): return # Active selection -- always use default binding except: pass if not (self.executing or self.reading): self.resetoutput() self.interp.write("KeyboardInterrupt\n") self.showprompt() return "break" self.endoffile = False self.canceled = True if (self.executing and self.interp.rpcclt): if self.interp.getdebugger(): self.interp.restart_subprocess() else: self.interp.interrupt_subprocess() if self.reading: self.top.quit() # exit the nested mainloop() in readline() return "break" def eof_callback(self, event): if self.executing and not self.reading: return # Let the default binding (delete next char) take over if not (self.text.compare("iomark", "==", "insert") and self.text.compare("insert", "==", "end-1c")): return # Let the default binding (delete next char) take over if not self.executing: self.resetoutput() self.close() else: self.canceled = False self.endoffile = True self.top.quit() return "break" def linefeed_callback(self, event): # Insert a linefeed without entering anything (still autoindented) if self.reading: self.text.insert("insert", "\n") self.text.see("insert") else: self.newline_and_indent_event(event) return "break" def enter_callback(self, event): if self.executing and not self.reading: return # Let the default binding (insert '\n') take over # If some text is selected, recall the selection # (but only if this before the I/O mark) try: sel = self.text.get("sel.first", "sel.last") if sel: if self.text.compare("sel.last", "<=", "iomark"): self.recall(sel, event) return "break" except: pass # If we're strictly before the line containing iomark, recall # the current line, less a leading prompt, less leading or # trailing whitespace if self.text.compare("insert", "<", "iomark linestart"): # Check if there's a relevant stdin range -- if so, use it. # Note: "stdin" blocks may include several successive statements, # so look for "console" tags on the newline before each statement # (and possibly on prompts). prev = self.text.tag_prevrange("stdin", "insert") if ( prev and self.text.compare("insert", "<", prev[1]) and # The following is needed to handle empty statements. "console" not in self.text.tag_names("insert") ): prev_cons = self.text.tag_prevrange("console", "insert") if prev_cons and self.text.compare(prev_cons[1], ">=", prev[0]): prev = (prev_cons[1], prev[1]) next_cons = self.text.tag_nextrange("console", "insert") if next_cons and self.text.compare(next_cons[0], "<", prev[1]): prev = (prev[0], self.text.index(next_cons[0] + "+1c")) self.recall(self.text.get(prev[0], prev[1]), event) return "break" next = self.text.tag_nextrange("stdin", "insert") if next and self.text.compare("insert lineend", ">=", next[0]): next_cons = self.text.tag_nextrange("console", "insert lineend") if next_cons and self.text.compare(next_cons[0], "<", next[1]): next = (next[0], self.text.index(next_cons[0] + "+1c")) self.recall(self.text.get(next[0], next[1]), event) return "break" # No stdin mark -- just get the current line, less any prompt indices = self.text.tag_nextrange("console", "insert linestart") if indices and \ self.text.compare(indices[0], "<=", "insert linestart"): self.recall(self.text.get(indices[1], "insert lineend"), event) else: self.recall(self.text.get("insert linestart", "insert lineend"), event) return "break" # If we're between the beginning of the line and the iomark, i.e. # in the prompt area, move to the end of the prompt if self.text.compare("insert", "<", "iomark"): self.text.mark_set("insert", "iomark") # If we're in the current input and there's only whitespace # beyond the cursor, erase that whitespace first s = self.text.get("insert", "end-1c") if s and not s.strip(): self.text.delete("insert", "end-1c") # If we're in the current input before its last line, # insert a newline right at the insert point if self.text.compare("insert", "<", "end-1c linestart"): self.newline_and_indent_event(event) return "break" # We're in the last line; append a newline and submit it self.text.mark_set("insert", "end-1c") if self.reading: self.text.insert("insert", "\n") self.text.see("insert") else: self.newline_and_indent_event(event) self.text.update_idletasks() if self.reading: self.top.quit() # Break out of recursive mainloop() else: self.runit() return "break" def recall(self, s, event): # remove leading and trailing empty or whitespace lines s = re.sub(r'^\s*\n', '', s) s = re.sub(r'\n\s*$', '', s) lines = s.split('\n') self.text.undo_block_start() try: self.text.tag_remove("sel", "1.0", "end") self.text.mark_set("insert", "end-1c") prefix = self.text.get("insert linestart", "insert") if prefix.rstrip().endswith(':'): self.newline_and_indent_event(event) prefix = self.text.get("insert linestart", "insert") self.text.insert("insert", lines[0].strip(), self.user_input_insert_tags) if len(lines) > 1: orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0) new_base_indent = re.search(r'^([ \t]*)', prefix).group(0) for line in lines[1:]: if line.startswith(orig_base_indent): # replace orig base indentation with new indentation line = new_base_indent + line[len(orig_base_indent):] self.text.insert('insert', '\n' + line.rstrip(), self.user_input_insert_tags) finally: self.text.see("insert") self.text.undo_block_stop() _last_newline_re = re.compile(r"[ \t]*(\n[ \t]*)?\Z") def runit(self): index_before = self.text.index("end-2c") line = self.text.get("iomark", "end-1c") # Strip off last newline and surrounding whitespace. # (To allow you to hit return twice to end a statement.) line = self._last_newline_re.sub("", line) input_is_complete = self.interp.runsource(line) if not input_is_complete: if self.text.get(index_before) == '\n': self.text.tag_remove(self.user_input_insert_tags, index_before) self.shell_sidebar.update_sidebar() def open_stack_viewer(self, event=None): if self.interp.rpcclt: return self.interp.remote_stack_viewer() try: sys.last_traceback except: messagebox.showerror("No stack trace", "There is no stack trace yet.\n" "(sys.last_traceback is not defined)", parent=self.text) return from idlelib.stackviewer import StackBrowser StackBrowser(self.root, self.flist) def view_restart_mark(self, event=None): self.text.see("iomark") self.text.see("restart") def restart_shell(self, event=None): "Callback for Run/Restart Shell Cntl-F6" self.interp.restart_subprocess(with_cwd=True) def showprompt(self): self.resetoutput() prompt = self.prompt if self.sys_ps1 and prompt.endswith(self.sys_ps1): prompt = prompt[:-len(self.sys_ps1)] self.text.tag_add("console", "iomark-1c") self.console.write(prompt) self.shell_sidebar.update_sidebar() self.text.mark_set("insert", "end-1c") self.set_line_and_column() self.io.reset_undo() def show_warning(self, msg): width = self.interp.tkconsole.width wrapper = TextWrapper(width=width, tabsize=8, expand_tabs=True) wrapped_msg = '\n'.join(wrapper.wrap(msg)) if not wrapped_msg.endswith('\n'): wrapped_msg += '\n' self.per.bottom.insert("iomark linestart", wrapped_msg, "stderr") def resetoutput(self): source = self.text.get("iomark", "end-1c") if self.history: self.history.store(source) if self.text.get("end-2c") != "\n": self.text.insert("end-1c", "\n") self.text.mark_set("iomark", "end-1c") self.set_line_and_column() self.ctip.remove_calltip_window() def write(self, s, tags=()): try: self.text.mark_gravity("iomark", "right") count = OutputWindow.write(self, s, tags, "iomark") self.text.mark_gravity("iomark", "left") except: raise ###pass # ### 11Aug07 KBK if we are expecting exceptions # let's find out what they are and be specific. if self.canceled: self.canceled = False if not use_subprocess: raise KeyboardInterrupt return count def rmenu_check_cut(self): try: if self.text.compare('sel.first', '<', 'iomark'): return 'disabled' except TclError: # no selection, so the index 'sel.first' doesn't exist return 'disabled' return super().rmenu_check_cut() def rmenu_check_paste(self): if self.text.compare('insert','<','iomark'): return 'disabled' return super().rmenu_check_paste() def squeeze_current_text_event(self, event=None): self.squeezer.squeeze_current_text() self.shell_sidebar.update_sidebar() def on_squeezed_expand(self, index, text, tags): self.shell_sidebar.update_sidebar() def fix_x11_paste(root): "Make paste replace selection on x11. See issue #5124." if root._windowingsystem == 'x11': for cls in 'Text', 'Entry', 'Spinbox': root.bind_class( cls, '<<Paste>>', 'catch {%W delete sel.first sel.last}\n' + root.bind_class(cls, '<<Paste>>')) usage_msg = """\ USAGE: idle [-deins] [-t title] [file]* idle [-dns] [-t title] (-c cmd | -r file) [arg]* idle [-dns] [-t title] - [arg]* -h print this help message and exit -n run IDLE without a subprocess (DEPRECATED, see Help/IDLE Help for details) The following options will override the IDLE 'settings' configuration: -e open an edit window -i open a shell window The following options imply -i and will open a shell: -c cmd run the command in a shell, or -r file run script from file -d enable the debugger -s run $IDLESTARTUP or $PYTHONSTARTUP before anything else -t title set title of shell window A default edit window will be bypassed when -c, -r, or - are used. [arg]* are passed to the command (-c) or script (-r) in sys.argv[1:]. Examples: idle Open an edit window or shell depending on IDLE's configuration. idle foo.py foobar.py Edit the files, also open a shell if configured to start with shell. idle -est "Baz" foo.py Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell window with the title "Baz". idle -c "import sys; print(sys.argv)" "foo" Open a shell window and run the command, passing "-c" in sys.argv[0] and "foo" in sys.argv[1]. idle -d -s -r foo.py "Hello World" Open a shell window, run a startup script, enable the debugger, and run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in sys.argv[1]. echo "import sys; print(sys.argv)" | idle - "foobar" Open a shell window, run the script piped in, passing '' in sys.argv[0] and "foobar" in sys.argv[1]. """ def main(): import getopt from platform import system from idlelib import testing # bool value from idlelib import macosx global flist, root, use_subprocess capture_warnings(True) use_subprocess = True enable_shell = False enable_edit = False debug = False cmd = None script = None startup = False try: opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:") except getopt.error as msg: print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr) sys.exit(2) for o, a in opts: if o == '-c': cmd = a enable_shell = True if o == '-d': debug = True enable_shell = True if o == '-e': enable_edit = True if o == '-h': sys.stdout.write(usage_msg) sys.exit() if o == '-i': enable_shell = True if o == '-n': print(" Warning: running IDLE without a subprocess is deprecated.", file=sys.stderr) use_subprocess = False if o == '-r': script = a if os.path.isfile(script): pass else: print("No script file: ", script) sys.exit() enable_shell = True if o == '-s': startup = True enable_shell = True if o == '-t': PyShell.shell_title = a enable_shell = True if args and args[0] == '-': cmd = sys.stdin.read() enable_shell = True # process sys.argv and sys.path: for i in range(len(sys.path)): sys.path[i] = os.path.abspath(sys.path[i]) if args and args[0] == '-': sys.argv = [''] + args[1:] elif cmd: sys.argv = ['-c'] + args elif script: sys.argv = [script] + args elif args: enable_edit = True pathx = [] for filename in args: pathx.append(os.path.dirname(filename)) for dir in pathx: dir = os.path.abspath(dir) if not dir in sys.path: sys.path.insert(0, dir) else: dir = os.getcwd() if dir not in sys.path: sys.path.insert(0, dir) # check the IDLE settings configuration (but command line overrides) edit_start = idleConf.GetOption('main', 'General', 'editor-on-startup', type='bool') enable_edit = enable_edit or edit_start enable_shell = enable_shell or not enable_edit # Setup root. Don't break user code run in IDLE process. # Don't change environment when testing. if use_subprocess and not testing: NoDefaultRoot() root = Tk(className="Idle") root.withdraw() from idlelib.run import fix_scaling fix_scaling(root) # set application icon icondir = os.path.join(os.path.dirname(__file__), 'Icons') if system() == 'Windows': iconfile = os.path.join(icondir, 'idle.ico') root.wm_iconbitmap(default=iconfile) elif not macosx.isAquaTk(): if TkVersion >= 8.6: ext = '.png' sizes = (16, 32, 48, 256) else: ext = '.gif' sizes = (16, 32, 48) iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext)) for size in sizes] icons = [PhotoImage(master=root, file=iconfile) for iconfile in iconfiles] root.wm_iconphoto(True, *icons) # start editor and/or shell windows: fixwordbreaks(root) fix_x11_paste(root) flist = PyShellFileList(root) macosx.setupApp(root, flist) if enable_edit: if not (cmd or script): for filename in args[:]: if flist.open(filename) is None: # filename is a directory actually, disconsider it args.remove(filename) if not args: flist.new() if enable_shell: shell = flist.open_shell() if not shell: return # couldn't open shell if macosx.isAquaTk() and flist.dict: # On OSX: when the user has double-clicked on a file that causes # IDLE to be launched the shell window will open just in front of # the file she wants to see. Lower the interpreter window when # there are open files. shell.top.lower() else: shell = flist.pyshell # Handle remaining options. If any of these are set, enable_shell # was set also, so shell must be true to reach here. if debug: shell.open_debugger() if startup: filename = os.environ.get("IDLESTARTUP") or \ os.environ.get("PYTHONSTARTUP") if filename and os.path.isfile(filename): shell.interp.execfile(filename) if cmd or script: shell.interp.runcommand("""if 1: import sys as _sys _sys.argv = %r del _sys \n""" % (sys.argv,)) if cmd: shell.interp.execsource(cmd) elif script: shell.interp.prepend_syspath(script) shell.interp.execfile(script) elif shell: # If there is a shell window and no cmd or script in progress, # check for problematic issues and print warning message(s) in # the IDLE shell window; this is less intrusive than always # opening a separate window. # Warn if the "Prefer tabs when opening documents" system # preference is set to "Always". prefer_tabs_preference_warning = macosx.preferTabsPreferenceWarning() if prefer_tabs_preference_warning: shell.show_warning(prefer_tabs_preference_warning) while flist.inversedict: # keep IDLE running while files are open. root.mainloop() root.destroy() capture_warnings(False) if __name__ == "__main__": main() capture_warnings(False) # Make sure turned off; see issue 18081
test_collection.py
import pdb import pytest import logging import itertools from time import sleep from multiprocessing import Process from milvus import IndexType, MetricType from utils import * dim = 128 drop_collection_interval_time = 3 index_file_size = 10 vectors = gen_vectors(100, dim) class TestCollection: """ ****************************************************************** The following cases are used to test `create_collection` function ****************************************************************** """ def test_create_collection(self, connect): ''' target: test create normal collection method: create collection with corrent params expected: create status return ok ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} status = connect.create_collection(param) assert status.OK() def test_create_collection_ip(self, connect): ''' target: test create normal collection method: create collection with corrent params expected: create status return ok ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.IP} status = connect.create_collection(param) assert status.OK() def test_create_collection_jaccard(self, connect): ''' target: test create normal collection method: create collection with corrent params expected: create status return ok ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.JACCARD} status = connect.create_collection(param) assert status.OK() def test_create_collection_hamming(self, connect): ''' target: test create normal collection method: create collection with corrent params expected: create status return ok ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.HAMMING} status = connect.create_collection(param) assert status.OK() def test_create_collection_substructure(self, connect): ''' target: test create normal collection method: create collection with corrent params expected: create status return ok ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.SUBSTRUCTURE} status = connect.create_collection(param) assert status.OK() def test_create_collection_superstructure(self, connect): ''' target: test create normal collection method: create collection with corrent params expected: create status return ok ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.SUPERSTRUCTURE} status = connect.create_collection(param) assert status.OK() def test_create_collection_auto_flush_disabled(self, connect): ''' target: test create normal collection, with large auto_flush_interval method: create collection with corrent params expected: create status return ok ''' disable_flush(connect) collection_name = gen_unique_str("test_collection") try: param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.SUPERSTRUCTURE} status = connect.create_collection(param) assert status.OK() status = connect.drop_collection(collection_name,) assert status.OK() time.sleep(2) ## recreate collection status = connect.create_collection(param) assert status.OK() except Exception as e: pass finally: enable_flush(connect) @pytest.mark.level(2) def test_create_collection_without_connection(self, dis_connect): ''' target: test create collection, without connection method: create collection with correct params, with a disconnected instance expected: create raise exception ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} with pytest.raises(Exception) as e: status = dis_connect.create_collection(param) def test_create_collection_existed(self, connect): ''' target: test create collection but the collection name have already existed method: create collection with the same collection_name expected: create status return not ok ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} status = connect.create_collection(param) status = connect.create_collection(param) assert not status.OK() @pytest.mark.level(2) def test_create_collection_existed_ip(self, connect): ''' target: test create collection but the collection name have already existed method: create collection with the same collection_name expected: create status return not ok ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.IP} status = connect.create_collection(param) status = connect.create_collection(param) assert not status.OK() def test_create_collection_None(self, connect): ''' target: test create collection but the collection name is None method: create collection, param collection_name is None expected: create raise error ''' param = {'collection_name': None, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} with pytest.raises(Exception) as e: status = connect.create_collection(param) def test_create_collection_no_dimension(self, connect): ''' target: test create collection with no dimension params method: create collection with corrent params expected: create status return ok ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} with pytest.raises(Exception) as e: status = connect.create_collection(param) def test_create_collection_no_file_size(self, connect): ''' target: test create collection with no index_file_size params method: create collection with corrent params expected: create status return ok, use default 1024 ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'metric_type': MetricType.L2} status = connect.create_collection(param) logging.getLogger().info(status) status, result = connect.get_collection_info(collection_name) logging.getLogger().info(result) assert result.index_file_size == 1024 def test_create_collection_max_file_size(self, connect): ''' target: test create collection with no index_file_size params method: create collection with corrent params expected: create status return ok, use default 1024 ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': 1024 * 129, 'metric_type': MetricType.L2} status = connect.create_collection(param) assert not status.OK() def test_create_collection_no_metric_type(self, connect): ''' target: test create collection with no metric_type params method: create collection with corrent params expected: create status return ok, use default L2 ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size} status = connect.create_collection(param) status, result = connect.get_collection_info(collection_name) logging.getLogger().info(result) assert result.metric_type == MetricType.L2 """ ****************************************************************** The following cases are used to test `get_collection_info` function ****************************************************************** """ def test_collection_describe_result(self, connect): ''' target: test describe collection created with correct params method: create collection, assert the value returned by describe method expected: collection_name equals with the collection name created ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} connect.create_collection(param) status, res = connect.get_collection_info(collection_name) assert res.collection_name == collection_name assert res.metric_type == MetricType.L2 @pytest.mark.level(2) def test_collection_get_collection_info_name_ip(self, connect): ''' target: test describe collection created with correct params method: create collection, assert the value returned by describe method expected: collection_name equals with the collection name created ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.IP} connect.create_collection(param) status, res = connect.get_collection_info(collection_name) assert res.collection_name == collection_name assert res.metric_type == MetricType.IP @pytest.mark.level(2) def test_collection_get_collection_info_name_jaccard(self, connect): ''' target: test describe collection created with correct params method: create collection, assert the value returned by describe method expected: collection_name equals with the collection name created ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.JACCARD} connect.create_collection(param) status, res = connect.get_collection_info(collection_name) assert res.collection_name == collection_name assert res.metric_type == MetricType.JACCARD @pytest.mark.level(2) def test_collection_get_collection_info_name_hamming(self, connect): ''' target: test describe collection created with correct params method: create collection, assert the value returned by describe method expected: collection_name equals with the collection name created ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.HAMMING} connect.create_collection(param) status, res = connect.get_collection_info(collection_name) assert res.collection_name == collection_name assert res.metric_type == MetricType.HAMMING def test_collection_get_collection_info_name_substructure(self, connect): ''' target: test describe collection created with correct params method: create collection, assert the value returned by describe method expected: collection_name equals with the collection name created ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.SUBSTRUCTURE} connect.create_collection(param) status, res = connect.get_collection_info(collection_name) assert res.collection_name == collection_name assert res.metric_type == MetricType.SUBSTRUCTURE def test_collection_get_collection_info_name_superstructure(self, connect): ''' target: test describe collection created with correct params method: create collection, assert the value returned by describe method expected: collection_name equals with the collection name created ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.SUPERSTRUCTURE} connect.create_collection(param) status, res = connect.get_collection_info(collection_name) assert res.collection_name == collection_name assert res.metric_type == MetricType.SUPERSTRUCTURE # TODO: enable @pytest.mark.level(2) def _test_collection_get_collection_info_name_multiprocessing(self, connect, args): ''' target: test describe collection created with multiprocess method: create collection, assert the value returned by describe method expected: collection_name equals with the collection name created ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} connect.create_collection(param) def describecollection(milvus): status, res = milvus.get_collection_info(collection_name) assert res.collection_name == collection_name process_num = 4 processes = [] for i in range(process_num): milvus = get_milvus(args["ip"], args["port"], handler=args["handler"]) p = Process(target=describecollection, args=(milvus,)) processes.append(p) p.start() for p in processes: p.join() # @pytest.mark.level(2) # def test_collection_describe_without_connection(self, collection, dis_connect): # ''' # target: test describe collection, without connection # method: describe collection with correct params, with a disconnected instance # expected: describe raise exception # ''' # with pytest.raises(Exception) as e: # status = dis_connect.get_collection_info(collection) def test_collection_describe_dimension(self, connect): ''' target: test describe collection created with correct params method: create collection, assert the dimention value returned by describe method expected: dimention equals with dimention when created ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim+1, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} connect.create_collection(param) status, res = connect.get_collection_info(collection_name) assert res.dimension == dim+1 """ ****************************************************************** The following cases are used to test `drop_collection` function ****************************************************************** """ def test_drop_collection(self, connect, collection): ''' target: test delete collection created with correct params method: create collection and then delete, assert the value returned by delete method expected: status ok, and no collection in collections ''' status = connect.drop_collection(collection) assert not assert_has_collection(connect, collection) @pytest.mark.level(2) def test_drop_collection_ip(self, connect, ip_collection): ''' target: test delete collection created with correct params method: create collection and then delete, assert the value returned by delete method expected: status ok, and no collection in collections ''' status = connect.drop_collection(ip_collection) assert not assert_has_collection(connect, ip_collection) @pytest.mark.level(2) def test_drop_collection_jaccard(self, connect, jac_collection): ''' target: test delete collection created with correct params method: create collection and then delete, assert the value returned by delete method expected: status ok, and no collection in collections ''' status = connect.drop_collection(jac_collection) assert not assert_has_collection(connect, jac_collection) @pytest.mark.level(2) def test_drop_collection_hamming(self, connect, ham_collection): ''' target: test delete collection created with correct params method: create collection and then delete, assert the value returned by delete method expected: status ok, and no collection in collections ''' status = connect.drop_collection(ham_collection) assert not assert_has_collection(connect, ham_collection) # @pytest.mark.level(2) # def test_collection_delete_without_connection(self, collection, dis_connect): # ''' # target: test describe collection, without connection # method: describe collection with correct params, with a disconnected instance # expected: describe raise exception # ''' # with pytest.raises(Exception) as e: # status = dis_connect.drop_collection(collection) def test_drop_collection_not_existed(self, connect): ''' target: test delete collection not in index method: delete all collections, and delete collection again, assert the value returned by delete method expected: status not ok ''' collection_name = gen_unique_str("test_collection") status = connect.drop_collection(collection_name) assert not status.OK() def test_delete_create_collection_repeatedly(self, connect): ''' target: test delete and create the same collection repeatedly method: try to create the same collection and delete repeatedly, assert the value returned by delete method expected: create ok and delete ok ''' loops = 2 timeout = 5 for i in range(loops): collection_name = "test_collection" param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} connect.create_collection(param) status = None while i < timeout: status = connect.drop_collection(collection_name) time.sleep(1) i += 1 if status.OK(): break if i > timeout: assert False # TODO: enable @pytest.mark.level(2) def _test_drop_collection_multiprocessing(self, args): ''' target: test delete collection with multiprocess method: create collection and then delete, assert the value returned by delete method expected: status ok, and no collection in collections ''' process_num = 6 processes = [] def deletecollection(milvus): status = milvus.drop_collection(collection) # assert not status.code==0 assert assert_has_collection(milvus, collection) assert status.OK() for i in range(process_num): milvus = get_milvus(args["ip"], args["port"], handler=args["handler"]) p = Process(target=deletecollection, args=(milvus,)) processes.append(p) p.start() for p in processes: p.join() # TODO: enable @pytest.mark.level(2) def _test_drop_collection_multiprocessing_multicollection(self, connect): ''' target: test delete collection with multiprocess method: create collection and then delete, assert the value returned by delete method expected: status ok, and no collection in collections ''' process_num = 5 loop_num = 2 processes = [] collection = [] j = 0 while j < (process_num*loop_num): collection_name = gen_unique_str("test_drop_collection_with_multiprocessing") collection.append(collection_name) param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} connect.create_collection(param) j = j + 1 def delete(connect,ids): i = 0 while i < loop_num: status = connect.drop_collection(collection[ids*process_num+i]) time.sleep(2) assert status.OK() assert not assert_has_collection(connect, collection[ids*process_num+i]) i = i + 1 for i in range(process_num): ids = i p = Process(target=delete, args=(connect,ids)) processes.append(p) p.start() for p in processes: p.join() """ ****************************************************************** The following cases are used to test `has_collection` function ****************************************************************** """ def test_has_collection(self, connect): ''' target: test if the created collection existed method: create collection, assert the value returned by has_collection method expected: True ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} connect.create_collection(param) assert assert_has_collection(connect, collection_name) def test_has_collection_ip(self, connect): ''' target: test if the created collection existed method: create collection, assert the value returned by has_collection method expected: True ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.IP} connect.create_collection(param) assert assert_has_collection(connect, collection_name) def test_has_collection_jaccard(self, connect): ''' target: test if the created collection existed method: create collection, assert the value returned by has_collection method expected: True ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.JACCARD} connect.create_collection(param) assert assert_has_collection(connect, collection_name) def test_has_collection_hamming(self, connect): ''' target: test if the created collection existed method: create collection, assert the value returned by has_collection method expected: True ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.HAMMING} connect.create_collection(param) assert assert_has_collection(connect, collection_name) # @pytest.mark.level(2) # def test_has_collection_without_connection(self, collection, dis_connect): # ''' # target: test has collection, without connection # method: calling has collection with correct params, with a disconnected instance # expected: has collection raise exception # ''' # with pytest.raises(Exception) as e: # assert_has_collection(dis_connect, collection) def test_has_collection_not_existed(self, connect): ''' target: test if collection not created method: random a collection name, which not existed in db, assert the value returned by has_collection method expected: False ''' collection_name = gen_unique_str("test_collection") assert not assert_has_collection(connect, collection_name) """ ****************************************************************** The following cases are used to test `list_collections` function ****************************************************************** """ def test_list_collections(self, connect): ''' target: test show collections is correct or not, if collection created method: create collection, assert the value returned by list_collections method is equal to 0 expected: collection_name in show collections ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} connect.create_collection(param) status, result = connect.list_collections() assert status.OK() assert collection_name in result def test_list_collections_ip(self, connect): ''' target: test show collections is correct or not, if collection created method: create collection, assert the value returned by list_collections method is equal to 0 expected: collection_name in show collections ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.IP} connect.create_collection(param) status, result = connect.list_collections() assert status.OK() assert collection_name in result def test_list_collections_jaccard(self, connect): ''' target: test show collections is correct or not, if collection created method: create collection, assert the value returned by list_collections method is equal to 0 expected: collection_name in show collections ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.JACCARD} connect.create_collection(param) status, result = connect.list_collections() assert status.OK() assert collection_name in result def test_list_collections_hamming(self, connect): ''' target: test show collections is correct or not, if collection created method: create collection, assert the value returned by list_collections method is equal to 0 expected: collection_name in show collections ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.HAMMING} connect.create_collection(param) status, result = connect.list_collections() assert status.OK() assert collection_name in result def test_list_collections_substructure(self, connect): ''' target: test show collections is correct or not, if collection created method: create collection, assert the value returned by list_collections method is equal to 0 expected: collection_name in show collections ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.SUBSTRUCTURE} connect.create_collection(param) status, result = connect.list_collections() assert status.OK() assert collection_name in result def test_list_collections_superstructure(self, connect): ''' target: test show collections is correct or not, if collection created method: create collection, assert the value returned by list_collections method is equal to 0 expected: collection_name in show collections ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.SUPERSTRUCTURE} connect.create_collection(param) status, result = connect.list_collections() assert status.OK() assert collection_name in result # @pytest.mark.level(2) # def test_list_collections_without_connection(self, dis_connect): # ''' # target: test list_collections, without connection # method: calling list_collections with correct params, with a disconnected instance # expected: list_collections raise exception # ''' # with pytest.raises(Exception) as e: # status = dis_connect.list_collections() @pytest.mark.level(2) def test_list_collections_no_collection(self, connect): ''' target: test show collections is correct or not, if no collection in db method: delete all collections, assert the value returned by list_collections method is equal to [] expected: the status is ok, and the result is equal to [] ''' status, result = connect.list_collections() if result: for collection_name in result: connect.drop_collection(collection_name) time.sleep(drop_collection_interval_time) status, result = connect.list_collections() assert status.OK() assert len(result) == 0 # TODO: enable @pytest.mark.level(2) def _test_list_collections_multiprocessing(self, connect, args): ''' target: test show collections is correct or not with processes method: create collection, assert the value returned by list_collections method is equal to 0 expected: collection_name in show collections ''' collection_name = gen_unique_str("test_collection") param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} connect.create_collection(param) def showcollections(milvus): status, result = milvus.list_collections() assert status.OK() assert collection_name in result process_num = 8 processes = [] for i in range(process_num): milvus = get_milvus(args["ip"], args["port"], handler=args["handler"]) p = Process(target=showcollections, args=(milvus,)) processes.append(p) p.start() for p in processes: p.join() """ ****************************************************************** The following cases are used to test `load_collection` function ****************************************************************** """ """ generate valid create_index params """ @pytest.fixture( scope="function", params=gen_simple_index() ) def get_simple_index(self, request, connect): if str(connect._cmd("mode")[1]) == "CPU": if request.param["index_type"] == IndexType.IVF_SQ8H: pytest.skip("sq8h not support in cpu mode") if request.param["index_type"] == IndexType.IVF_PQ: pytest.skip("Skip PQ Temporary") return request.param @pytest.mark.level(1) def test_load_collection(self, connect, collection, get_simple_index): index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] status, ids = connect.insert(collection, vectors) status = connect.create_index(collection, index_type, index_param) status = connect.load_collection(collection) assert status.OK() @pytest.mark.level(1) def test_load_collection_ip(self, connect, ip_collection, get_simple_index): index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] status, ids = connect.insert(ip_collection, vectors) status = connect.create_index(ip_collection, index_type, index_param) status = connect.load_collection(ip_collection) assert status.OK() @pytest.mark.level(1) def test_load_collection_jaccard(self, connect, jac_collection, get_simple_index): index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] status, ids = connect.insert(jac_collection, vectors) status = connect.create_index(jac_collection, index_type, index_param) status = connect.load_collection(jac_collection) assert status.OK() @pytest.mark.level(1) def test_load_collection_hamming(self, connect, ham_collection, get_simple_index): index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] status, ids = connect.insert(ham_collection, vectors) status = connect.create_index(ham_collection, index_type, index_param) status = connect.load_collection(ham_collection) assert status.OK() @pytest.mark.level(2) def test_load_collection_not_existed(self, connect, collection, get_simple_index): index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] collection_name = gen_unique_str() status, ids = connect.insert(collection, vectors) status = connect.create_index(collection, index_type, index_param) status = connect.load_collection(collection_name) assert not status.OK() @pytest.mark.level(1) def test_load_collection_partition(self, connect, collection): partition_name = gen_unique_str() status, ids = connect.insert(collection, vectors) assert status.OK() status = connect.create_partition(collection, partition_name) status = connect.load_collection(collection, partition_tags=[partition_name]) assert status.OK() @pytest.mark.level(1) def test_load_release_collection_partition(self, connect, collection): partition_name = gen_unique_str() status = connect.create_partition(collection, partition_name) assert status.OK() status, ids = connect.insert(collection, vectors, partition_tag=partition_name) assert status.OK() status = connect.load_collection(collection, partition_tags=[partition_name]) assert status.OK() status = connect.release_collection(collection, partition_tags=[partition_name]) assert status.OK() @pytest.mark.level(1) def test_load_release_twice_collection_partition(self, connect, collection): partition_name = gen_unique_str() status = connect.create_partition(collection, partition_name) assert status.OK() status, ids = connect.insert(collection, vectors, partition_tag=partition_name) assert status.OK() status = connect.load_collection(collection, partition_tags=[partition_name]) assert status.OK() for i in range(2): status = connect.release_collection(collection, partition_tags=[partition_name]) assert status.OK() @pytest.mark.level(1) def test_load_release_load_collection_partition(self, connect, collection): partition_name = gen_unique_str() status = connect.create_partition(collection, partition_name) assert status.OK() status, ids = connect.insert(collection, vectors, partition_tag=partition_name) assert status.OK() status = connect.load_collection(collection, partition_tags=[partition_name]) assert status.OK() status = connect.release_collection(collection, partition_tags=[partition_name]) assert status.OK() status = connect.load_collection(collection, partition_tags=[partition_name]) assert status.OK() @pytest.mark.level(1) def test_not_load_release_collection_partition(self, connect, collection): partition_name = gen_unique_str() status = connect.create_partition(collection, partition_name) assert status.OK() status, ids = connect.insert(collection, vectors, partition_tag=partition_name) assert status.OK() status = connect.release_collection(collection, partition_tags=[partition_name]) assert status.OK() @pytest.mark.level(1) def test_load_collection_partitions(self, connect, collection): partition_names = [] for i in range(2): name = gen_unique_str() partition_names.append(name) status = connect.create_partition(collection, name) assert status.OK() status, ids = connect.insert(collection, vectors) status = connect.load_collection(collection, partition_tags=partition_names) assert status.OK() @pytest.mark.level(1) def test_load_collection_partition_not_existed(self, connect, collection): partition_name = gen_unique_str() status, ids = connect.insert(collection, vectors) assert status.OK() status = connect.load_collection(collection, partition_tags=[partition_name]) assert not status.OK() @pytest.mark.level(1) def test_load_collection_partition_invalid_string(self, connect, collection): partition_name = "invalid string" status, ids = connect.insert(collection, vectors) assert status.OK() status = connect.load_collection(collection, partition_tags=[partition_name]) assert not status.OK() @pytest.mark.level(1) def test_load_collection_partition_None(self, connect, collection): status = connect.load_collection(collection, partition_tags=None) assert status.OK() @pytest.mark.level(2) def test_load_collection_not_existed_ip(self, connect, ip_collection, get_simple_index): index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] collection_name = gen_unique_str() status, ids = connect.insert(ip_collection, vectors) status = connect.create_index(ip_collection, index_type, index_param) status = connect.load_collection(collection_name) assert not status.OK() @pytest.mark.level(1) def test_load_collection_no_vectors(self, connect, collection): status = connect.load_collection(collection) assert status.OK() @pytest.mark.level(2) def test_load_collection_no_vectors_ip(self, connect, ip_collection): status = connect.load_collection(ip_collection) assert status.OK() # TODO: psutils get memory usage @pytest.mark.level(1) def test_load_collection_memory_usage(self, connect, collection): pass class TestCollectionInvalid(object): """ Test creating collection with invalid collection names """ @pytest.fixture( scope="function", params=gen_invalid_collection_names() ) def get_collection_name(self, request): yield request.param @pytest.mark.level(2) def test_create_collection_with_invalid_collectionname(self, connect, get_collection_name): collection_name = get_collection_name param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} status = connect.create_collection(param) assert not status.OK() def test_create_collection_with_empty_collectionname(self, connect): collection_name = '' param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} with pytest.raises(Exception) as e: status = connect.create_collection(param) def test_load_collection_with_invalid_collectionname(self, connect): collection_name = '' with pytest.raises(Exception) as e: status = connect.load_collection(collection_name) class TestCreateCollectionDimInvalid(object): """ Test creating collection with invalid dimension """ @pytest.fixture( scope="function", params=gen_invalid_dims() ) def get_dim(self, request): yield request.param @pytest.mark.level(2) @pytest.mark.timeout(5) def test_create_collection_with_invalid_dimension(self, connect, get_dim): dimension = get_dim collection = gen_unique_str("test_create_collection_with_invalid_dimension") param = {'collection_name': collection, 'dimension': dimension, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} if isinstance(dimension, int): status = connect.create_collection(param) assert not status.OK() else: with pytest.raises(Exception) as e: status = connect.create_collection(param) # TODO: max / min index file size class TestCreateCollectionIndexSizeInvalid(object): """ Test creating collections with invalid index_file_size """ @pytest.fixture( scope="function", params=gen_invalid_file_sizes() ) def get_file_size(self, request): yield request.param @pytest.mark.level(2) def test_create_collection_with_invalid_file_size(self, connect, collection, get_file_size): file_size = get_file_size param = {'collection_name': collection, 'dimension': dim, 'index_file_size': file_size, 'metric_type': MetricType.L2} if isinstance(file_size, int): status = connect.create_collection(param) assert not status.OK() else: with pytest.raises(Exception) as e: status = connect.create_collection(param) class TestCreateMetricTypeInvalid(object): """ Test creating collections with invalid metric_type """ @pytest.fixture( scope="function", params=gen_invalid_metric_types() ) def get_metric_type(self, request): yield request.param @pytest.mark.level(2) def test_create_collection_with_invalid_file_size(self, connect, collection, get_metric_type): metric_type = get_metric_type param = {'collection_name': collection, 'dimension': dim, 'index_file_size': 10, 'metric_type': metric_type} with pytest.raises(Exception) as e: status = connect.create_collection(param) def create_collection(connect, **params): param = {'collection_name': params["collection_name"], 'dimension': params["dimension"], 'index_file_size': index_file_size, 'metric_type': MetricType.L2} status = connect.create_collection(param) return status def search_collection(connect, **params): status, result = connect.search( params["collection_name"], params["top_k"], params["query_vectors"], params={"nprobe": params["nprobe"]}) return status def load_collection(connect, **params): status = connect.load_collection(params["collection_name"]) return status def has(connect, **params): status, result = connect.has_collection(params["collection_name"]) return status def show(connect, **params): status, result = connect.list_collections() return status def delete(connect, **params): status = connect.drop_collection(params["collection_name"]) return status def describe(connect, **params): status, result = connect.get_collection_info(params["collection_name"]) return status def rowcount(connect, **params): status, result = connect.count_entities(params["collection_name"]) return status def create_index(connect, **params): status = connect.create_index(params["collection_name"], params["index_type"], params["index_param"]) return status func_map = { # 0:has, 1:show, 10:create_collection, 11:describe, 12:rowcount, 13:search_collection, 14:load_collection, 15:create_index, 30:delete } def gen_sequence(): raw_seq = func_map.keys() result = itertools.permutations(raw_seq) for x in result: yield x class TestCollectionLogic(object): @pytest.mark.parametrize("logic_seq", gen_sequence()) @pytest.mark.level(2) def _test_logic(self, connect, logic_seq, args): if args["handler"] == "HTTP": pytest.skip("Skip in http mode") if self.is_right(logic_seq): self.execute(logic_seq, connect) else: self.execute_with_error(logic_seq, connect) self.tear_down(connect) def is_right(self, seq): if sorted(seq) == seq: return True not_created = True has_deleted = False for i in range(len(seq)): if seq[i] > 10 and not_created: return False elif seq [i] > 10 and has_deleted: return False elif seq[i] == 10: not_created = False elif seq[i] == 30: has_deleted = True return True def execute(self, logic_seq, connect): basic_params = self.gen_params() for i in range(len(logic_seq)): # logging.getLogger().info(logic_seq[i]) f = func_map[logic_seq[i]] status = f(connect, **basic_params) assert status.OK() def execute_with_error(self, logic_seq, connect): basic_params = self.gen_params() error_flag = False for i in range(len(logic_seq)): f = func_map[logic_seq[i]] status = f(connect, **basic_params) if not status.OK(): # logging.getLogger().info(logic_seq[i]) error_flag = True break assert error_flag == True def tear_down(self, connect): names = connect.list_collections()[1] for name in names: connect.drop_collection(name) def gen_params(self): collection_name = gen_unique_str("test_collection") top_k = 1 vectors = gen_vectors(2, dim) param = {'collection_name': collection_name, 'dimension': dim, 'metric_type': MetricType.L2, 'nprobe': 1, 'top_k': top_k, 'index_type': IndexType.IVF_SQ8, 'index_param': { 'nlist': 16384 }, 'query_vectors': vectors} return param
my_git.py
import git import os, re, io from git import Git, Repo from threading import Thread from PyQt5.QtCore import pyqtSignal, QObject class PipeIO(io.BytesIO): def __init__(self, updater_cb): io.BytesIO.__init__(self) self.updater_cb = updater_cb def write(self, b): buf = io.BytesIO.getbuffer(self).tobytes() progress = re.findall("(?<=\()\d+\/\d+(?=\))", buf.decode("utf-8")) try: self.updater_cb(progress[-1]) except: pass return io.BytesIO.write(self, b) class MyGit(QObject): updated = pyqtSignal(str) finished = pyqtSignal() def __init__(self, path): QObject.__init__(self) try: self.repo = Repo(path) except git.exc.NoSuchPathError as e: self.repo = None except git.exc.InvalidGitRepositoryError as e: self.repo = None def is_valid(self): return self.repo != None def is_dirty(self): return self.repo.is_dirty() def updater_cb(self, data): self.updated.emit(data) def rewrite_dates(self, commits): tpl = """ if [ "$GIT_COMMIT" == "%s" ]; then export GIT_AUTHOR_DATE="%s" export GIT_COMMITTER_DATE="%s" fi """ stdout = PipeIO(self.updater_cb) s = "" for commit in commits: s += tpl % ( commit["hash"], commit["newdatetime"].replace(tzinfo = None), commit["newdatetime"].replace(tzinfo = None) ) thread = Thread(target = self.repo.git.filter_branch, args = ("-f", "--env-filter", s), kwargs = {"output_stream": stdout}) #, max_chunk_size = 30) thread.start() thread.join() self.finished.emit()
execute.py
#!/usr/bin/env python import sys import time import socket from bandwidth_monitor import BandwidthMonitor from tcpclient import TCPClient import tcpserver import threading from udpclient import UDPClient from udpserver import Server from config_helper import read_config_map, update_key_value import plotly.plotly as py import plotly.graph_objs as go from random import randint import plotly import globals def generate_plotly_graph(x_axis, y_axis, protocol): """ Plotly - Make charts and dashboards online which you find under https://plot.ly/ This function responsible on creating a plotly graph. The graph stands as a visualization tool to view & transmit the bandwidth from the last running. :param x_axis: x axis label :param y_axis: y axis label :param protocol: protocol (udp/tcp) :return: opens chrome / default browser to Plotly graph view """ data = [go.Scatter(x=x_axis, y=y_axis)] layout = go.Layout(title=protocol + " Bandwidth summary", xaxis=dict(title="Seconds", titlefont=dict( family = "Courter New, monospace", size = 18, color = "#7f7f7f" ) ), yaxis=dict(title="Packet Size", titlefont=dict( family="Courier New, monospace", size=18, color="#7f7f7f" ) ) ) fig = go.Figure(data=data, layout=layout) file_name = "sysrun-" + str(randint(1000, 9999)) plot_url = py.plot(fig, filename=file_name) print "connection to Plotly has been enabled. Please visit your account" print "The current run graph summary is under %s file" % file_name if __name__ == "__main__": plotly.tools.set_credentials_file(username='oramranov', api_key='zf86oqsk6c') host = "" port = "" type = "" debug = "" visualization = "" globals.init() running_requests = 0 total_bandwidth = 0 max_bandwidth = 0 monitor = BandwidthMonitor() time_test = int(read_config_map("runtime_globals")["test_time"]) y_axis = [] x_axis = [] try: host = sys.argv[1] port = sys.argv[2] protocol = sys.argv[3] type = sys.argv[4] debug = sys.argv[5] visualization = sys.argv[6] globals.logger.debug("script parameters transferred successfully.") except IndexError: host = read_config_map("runtime_globals")["host"][1:-1] port = int(read_config_map("runtime_globals")["port"]) protocol = read_config_map("runtime_globals")["protocol"][1:-1] type = read_config_map("runtime_globals")["type"][1:-1] debug = read_config_map("runtime_globals")["debug"] update_key_value("runtime_globals", "debug", debug) visualization = read_config_map("runtime_globals")["visualization"] globals.logger.debug("script parameters set to default via the config file.") if type == "s": if protocol == 'tcp': try: globals.logger.debug("script has been enabled as a tcp server type.") server_obj = tcpserver.TCPServer(int(port)) thread_manager = threading.Thread(target=server_obj.run) except socket.error, e: globals.logger.warning("an error has been occurred. Message info: " + str(e)) sys.exit(1) elif protocol == 'udp': try: globals.logger.debug("script has been enabled as a udp server type.") server_obj = Server(int(port)) print "Hit any key to terminate server connection. " thread_manager = threading.Thread(target=server_obj.run) except socket.error, e: globals.logger.warning("an error has been occurred. Message info: " + str(e)) sys.exit(1) thread_manager.setDaemon(False) thread_manager.start() print "Starting Bandwidth monitor" while running_requests <= time_test: monitor.initialization() time.sleep(1) monitor.terminate() speed = monitor.get_bandwidth() / (1000 * 1000) if float(speed) > 0: running_requests += 1 y_axis.append(float(speed)) x_axis.append(running_requests) if float(speed) > float(max_bandwidth): max_bandwidth = speed print str(speed) + " Mbytes/Second" total_bandwidth = str(float(total_bandwidth) + float(speed)) print "process finished." print "%d seconds summary: " % time_test print "=====================" print "Total bandwidth: " + total_bandwidth print "Maximum bandwidth request: " + str(max_bandwidth) print "=====================" # data = [go.Scatter( # x=['2013-10-04 22:23:00', '2013-11-04 22:23:00', '2013-12-04 22:23:00'], # y=[1, 3, 6])] monitor.terminate() if len(y_axis) > 0 and len(x_axis) > 0: generate_plotly_graph(x_axis, y_axis, protocol) elif type == "c": if protocol == 'tcp': globals.logger.debug("script has been enabled as a tcp client type") print "Starting TCP client" client_obj = TCPClient(host, int(port)) client_obj.open_conn() thread_manager = threading.Thread(target=client_obj.spam) elif protocol == 'udp': globals.logger.debug("script has been enabled as a udp client type") print "Starting UDP client" client_obj = UDPClient(host, int(port)) client_obj.open_conn() thread_manager = threading.Thread(target=client_obj.spam) thread_manager.setDaemon(False) thread_manager.start() for i in xrange(time_test): monitor.initialization() time.sleep(1) monitor.terminate() curr_payload = monitor.get_bandwidth() / (1000 * 1000) print str(curr_payload) + "MBytes/s payload sent." print "Data has been transmitted successfully." monitor.terminate()
comm.py
from __future__ import division, print_function import sys from sys import version_info from time import sleep from threading import Thread, Event from getports import Serial,ports if version_info[0] == 3: # Python 3 def tobytes(x): return x.encode('latin1') def asbyte(x): return bytes([x]) def asint(x): return x[0] def bytesum(x): return sum(x) def tostr(x): return x.decode('latin1') def toints(x): return [a for a in x] else: # Python 2 def tobytes(x): return x def asbyte(x): return chr(x) def asint(x): return ord(x[0]) def bytesum(x): return sum(bytearray(x)) def tostr(x): return x def toints(bs): return [ord(c) for c in bs] class CommPort(object): BAUDRATE = 1000000 # may NOT be 1200, must match other end # 1Mbaud seems to be fastes reliable Arduino UART speed # BAUDRATE is irrelevant for true USB communication # (used by Leonardo and KL25Z) def __init__(self, port, data_call_on_packet, call_when_connected, call_on_error=lambda x:None): self.portname = port self._respavail = Event() # set when a full command # response has been read. self._data_call_on_packet = data_call_on_packet self._call_when_connected = call_when_connected self._call_on_error = call_on_error def connect(self): """Initiate a connection with the serial port specified upon instantiation. Non-blocking: when connected, calls call_when_connected with no args.""" t1 = Thread(target=self._connect) t1.daemon = True self._readthread = t2 = Thread(target=self._readin) t2.daemon = True t1.start() def _connect(self): """Open the port (resetting the DAQ board), then start a thread reading from it. Call the _call_when_connected callback function to indicate success. """ # print('DEBUG: enter comm._connect',file=sys.stderr) self._reset() self._do_readin = True # set False to kill _readthread self._readthread.start() # print('DEBUG:about to handshake', file=sys.stderr) self._call_when_connected() def _reset(self): """Opens a serial port, makes sure it isn't a Leonardo bootloader, by writing an "E" to it and waiting to see if a new port appears (resetting self.portname, if there is a new port). Sets self.ser to the serial port. """ p = self.portname # Check to see if there is a Leonardo, using self._enum_found = None # None or single NEW port found # by enum thread self._enum_active = True # Turn off to end enum thread. t = Thread(target=self._enum) # enum thread for new ports appearing t.daemon = True # let program stop even if enum thread still running t.start() # print("DEBUG: about to open s1 Serial(",repr(p),",",self.BAUDRATE,")", file=sys.stderr) s1 = Serial(p, baudrate=self.BAUDRATE, timeout=1) #open port # print("DEBUG: back from attempt to open s1 Serial", file=sys.stderr) sleep(0.1) s1.write(b'E') # end leonardo bootloader sleep(1) if self._enum_found is None: # no new ports, so not the leonardo bootloader # reopen port to get clean reset self.ser = s1 sleep(0.6) # 1.5 seconds may be enough, 1.6 seems ok for Uno return s1.close() # close the existing port self._enum_active = False # kill the enum thread # open Leonardo port now that bootloader has stopped p = self.portname = self._enum_found self.ser = Serial(p, baudrate=self.BAUDRATE, timeout=1) sleep(0.1) return def command(self, c, d=b''): """send command c (a single-character string) together with data d (a byte string) and return the data from the response (a byte string). """ mbase = b'!' + c + asbyte(len(d)) + d msg = mbase + asbyte(-bytesum(mbase) % 256) while True: # print("DEBUG: sending message", msg[:2], # " ".join(map(hex, toints(msg[2:]))), file=sys.stderr) self.ser.write(msg) self._respavail.wait(timeout=5) if not self._respavail.is_set(): print('Warning: Command timeout for command {}'.format(c),file=sys.stderr) if c=='H': print('Try killing port-select window, and rerunning after unplugging and replugging the board into the USB port', file=sys.stderr) continue self._respavail.clear() cm, res = self._cmresp if cm == ord(c): return res print('Warning: Invalid command response: sent {} command, response is {} {}'.format(c, chr(cm), res), file=sys.stderr) if version_info[0] == 3: # Python 3 def _readin(self): """Read and process data from the serial port. If it forms a command response, store in _cmresp and set _respavail. _cmresp is tuple( integer command, bytes response) If it forms a data record, call _datacallback with it. """ rd = self.ser.read int_star = b'*'[0] int_bang = b'!'[0] int_E = b'E'[0] # print('DEBUG: readin begin on self.ser=', self.ser, file=sys.stderr) while self._do_readin: first_two=b'' while not first_two: first_two = rd(2) c,cm = first_two # print("DEBUG: c=", c,"clock=", clock(), file=sys.stderr) if c == int_bang: ln = rd(1)[0] data = rd(ln) chk = rd(1) if len(chk)>0 and (c+cm+ln + sum(data) + chk[0]) % 256 == 0: if cm == int_E: self._call_on_error(data) else: self._cmresp = cm, data self._respavail.set() else: print('Warning: Checksum error on',cm,'packet',file=sys.stderr) elif c == int_star: ln = cm data = rd(ln) chk = rd(1) # print('DEBUG: data=', data, 'clock=', clock(), file=sys.stderr) if len(chk)>0 and (c + ln + sum(data) + chk[0]) % 256 == 0: self._data_call_on_packet(data) else: print('Warning: Checksum error on data packet.',file=sys.stderr) elif c: print('Warning: packet frame missing: expecting "!" or "*", but got', hex(c), file=sys.stderr) else: # Python 2 def _readin(self): """Read and process data from the serial port. If it forms a command response, store in _cmresp and set _respavail. If it forms a data record, call _datacallback with it. """ rd = self.ser.read int_star = ord(b'*') int_bang = ord(b'!') int_E = ord(b'E') # print('DEBUG: readin begin on self.ser=', self.ser, file=sys.stderr) while self._do_readin: first_two=b'' while not first_two: first_two = rd(2) c=ord(first_two[0]) cm = ord(first_two[1]) # print("DEBUG: c=", c,"clock=", clock(), file=sys.stderr) if c == int_bang: ln = ord(rd(1)) data = rd(ln) chk = rd(1) if len(chk)>0 and (c+cm+ln + sum(bytearray(data)) + ord(chk)) % 256 == 0: if cm == int_E: self._call_on_error(data) else: self._cmresp = cm, data self._respavail.set() else: print('Warning: Checksum error on',cm,'packet',file=sys.stderr) elif c == int_star: ln = cm data = rd(ln) chk = rd(1) # print('DEBUG: data=', data, 'clock=', clock(), file=sys.stderr) if len(chk)>0 and (c + ln + sum(bytearray(data)) + ord(chk)) % 256 == 0: self._data_call_on_packet(data) else: print('Warning: Checksum error on data packet.',file=sys.stderr) elif c: print('Warning: packet frame missing: expecting "!" or "*", but got', hex(c), file=sys.stderr) def _enum(self): """Keep track of the number of serial ports available. When a new one appears, puts the port name in _enum_found. To stop, set _enum_active to false. """ p1 = ports() while self._enum_active: p2 = ports() if len(p2) > len(p1): self._enum_found = (set(p2) - set(p1)).pop()[1] p1 = p2 sleep(0.1)
wsdump.py
#!/home/topicos/Apps/FacebookBot/facebook-echobot/bin/python2 import argparse import code import six import sys import threading import time import websocket from six.moves.urllib.parse import urlparse try: import readline except: pass def get_encoding(): encoding = getattr(sys.stdin, "encoding", "") if not encoding: return "utf-8" else: return encoding.lower() OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY) ENCODING = get_encoding() class VAction(argparse.Action): def __call__(self, parser, args, values, option_string=None): if values==None: values = "1" try: values = int(values) except ValueError: values = values.count("v")+1 setattr(args, self.dest, values) def parse_args(): parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool") parser.add_argument("url", metavar="ws_url", help="websocket url. ex. ws://echo.websocket.org/") parser.add_argument("-p", "--proxy", help="proxy url. ex. http://127.0.0.1:8080") parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction, dest="verbose", help="set verbose mode. If set to 1, show opcode. " "If set to 2, enable to trace websocket module") parser.add_argument("-n", "--nocert", action='store_true', help="Ignore invalid SSL cert") parser.add_argument("-r", "--raw", action="store_true", help="raw output") parser.add_argument("-s", "--subprotocols", nargs='*', help="Set subprotocols") parser.add_argument("-o", "--origin", help="Set origin") parser.add_argument("--eof-wait", default=0, type=int, help="wait time(second) after 'EOF' received.") parser.add_argument("-t", "--text", help="Send initial text") parser.add_argument("--timings", action="store_true", help="Print timings in seconds") return parser.parse_args() class RawInput(): def raw_input(self, prompt): if six.PY3: line = input(prompt) else: line = raw_input(prompt) if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type): line = line.decode(ENCODING).encode("utf-8") elif isinstance(line, six.text_type): line = line.encode("utf-8") return line class InteractiveConsole(RawInput, code.InteractiveConsole): def write(self, data): sys.stdout.write("\033[2K\033[E") # sys.stdout.write("\n") sys.stdout.write("\033[34m< " + data + "\033[39m") sys.stdout.write("\n> ") sys.stdout.flush() def read(self): return self.raw_input("> ") class NonInteractive(RawInput): def write(self, data): sys.stdout.write(data) sys.stdout.write("\n") sys.stdout.flush() def read(self): return self.raw_input("") def main(): start_time = time.time() args = parse_args() if args.verbose > 1: websocket.enableTrace(True) options = {} if (args.proxy): p = urlparse(args.proxy) options["http_proxy_host"] = p.hostname options["http_proxy_port"] = p.port if (args.origin): options["origin"] = args.origin if (args.subprotocols): options["subprotocols"] = args.subprotocols opts = {} if (args.nocert): opts = { "cert_reqs": websocket.ssl.CERT_NONE, "check_hostname": False } ws = websocket.create_connection(args.url, sslopt=opts, **options) if args.raw: console = NonInteractive() else: console = InteractiveConsole() print("Press Ctrl+C to quit") def recv(): try: frame = ws.recv_frame() except websocket.WebSocketException: return (websocket.ABNF.OPCODE_CLOSE, None) if not frame: raise websocket.WebSocketException("Not a valid frame %s" % frame) elif frame.opcode in OPCODE_DATA: return (frame.opcode, frame.data) elif frame.opcode == websocket.ABNF.OPCODE_CLOSE: ws.send_close() return (frame.opcode, None) elif frame.opcode == websocket.ABNF.OPCODE_PING: ws.pong(frame.data) return frame.opcode, frame.data return frame.opcode, frame.data def recv_ws(): while True: opcode, data = recv() msg = None if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes): data = str(data, "utf-8") if not args.verbose and opcode in OPCODE_DATA: msg = data elif args.verbose: msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data) if msg is not None: if (args.timings): console.write(str(time.time() - start_time) + ": " + msg) else: console.write(msg) if opcode == websocket.ABNF.OPCODE_CLOSE: break thread = threading.Thread(target=recv_ws) thread.daemon = True thread.start() if args.text: ws.send(args.text) while True: try: message = console.read() ws.send(message) except KeyboardInterrupt: return except EOFError: time.sleep(args.eof_wait) return if __name__ == "__main__": try: main() except Exception as e: print(e)
test_credentials.py
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import uuid import threading import os import math import time import tempfile import shutil from datetime import datetime, timedelta import sys from dateutil.tz import tzlocal from botocore.exceptions import CredentialRetrievalError from tests import mock, unittest, IntegerRefresher, BaseEnvVar, random_chars from tests import temporary_file, StubbedSession, SessionHTTPStubber from botocore import UNSIGNED from botocore.credentials import EnvProvider, ContainerProvider from botocore.credentials import InstanceMetadataProvider from botocore.credentials import Credentials, ReadOnlyCredentials from botocore.credentials import AssumeRoleProvider, ProfileProviderBuilder from botocore.credentials import CanonicalNameCredentialSourcer from botocore.credentials import DeferredRefreshableCredentials from botocore.credentials import create_credential_resolver from botocore.credentials import JSONFileCache from botocore.credentials import SSOProvider from botocore.config import Config from botocore.session import Session from botocore.exceptions import InvalidConfigError, InfiniteLoopConfigError from botocore.stub import Stubber from botocore.utils import datetime2timestamp class TestCredentialRefreshRaces(unittest.TestCase): def assert_consistent_credentials_seen(self, creds, func): collected = [] self._run_threads(20, func, collected) for creds in collected: # During testing, the refresher uses it's current # refresh count as the values for the access, secret, and # token value. This means that at any given point in time, # the credentials should be something like: # # ReadOnlyCredentials('1', '1', '1') # ReadOnlyCredentials('2', '2', '2') # ... # ReadOnlyCredentials('30', '30', '30') # # This makes it really easy to verify we see a consistent # set of credentials from the same time period. We just # check if all the credential values are the same. If # we ever see something like: # # ReadOnlyCredentials('1', '2', '1') # # We fail. This is because we're using the access_key # from the first refresh ('1'), the secret key from # the second refresh ('2'), and the token from the # first refresh ('1'). self.assertTrue(creds[0] == creds[1] == creds[2], creds) def assert_non_none_retrieved_credentials(self, func): collected = [] self._run_threads(50, func, collected) for cred in collected: self.assertIsNotNone(cred) def _run_threads(self, num_threads, func, collected): threads = [] for _ in range(num_threads): threads.append(threading.Thread(target=func, args=(collected,))) for thread in threads: thread.start() for thread in threads: thread.join() def test_has_no_race_conditions(self): creds = IntegerRefresher( creds_last_for=2, advisory_refresh=1, mandatory_refresh=0 ) def _run_in_thread(collected): for _ in range(4000): frozen = creds.get_frozen_credentials() collected.append((frozen.access_key, frozen.secret_key, frozen.token)) start = time.time() self.assert_consistent_credentials_seen(creds, _run_in_thread) end = time.time() # creds_last_for = 2 seconds (from above) # So, for example, if execution time took 6.1 seconds, then # we should see a maximum number of refreshes being (6 / 2.0) + 1 = 4 max_calls_allowed = math.ceil((end - start) / 2.0) + 1 self.assertTrue(creds.refresh_counter <= max_calls_allowed, "Too many cred refreshes, max: %s, actual: %s, " "time_delta: %.4f" % (max_calls_allowed, creds.refresh_counter, (end - start))) def test_no_race_for_immediate_advisory_expiration(self): creds = IntegerRefresher( creds_last_for=1, advisory_refresh=1, mandatory_refresh=0 ) def _run_in_thread(collected): for _ in range(100): frozen = creds.get_frozen_credentials() collected.append((frozen.access_key, frozen.secret_key, frozen.token)) self.assert_consistent_credentials_seen(creds, _run_in_thread) def test_no_race_for_initial_refresh_of_deferred_refreshable(self): def get_credentials(): expiry_time = ( datetime.now(tzlocal()) + timedelta(hours=24)).isoformat() return { 'access_key': 'my-access-key', 'secret_key': 'my-secret-key', 'token': 'my-token', 'expiry_time': expiry_time } deferred_creds = DeferredRefreshableCredentials( get_credentials, 'fixed') def _run_in_thread(collected): frozen = deferred_creds.get_frozen_credentials() collected.append(frozen) self.assert_non_none_retrieved_credentials(_run_in_thread) class BaseAssumeRoleTest(BaseEnvVar): def setUp(self): super(BaseAssumeRoleTest, self).setUp() self.tempdir = tempfile.mkdtemp() self.config_file = os.path.join(self.tempdir, 'config') self.environ['AWS_CONFIG_FILE'] = self.config_file self.environ['AWS_SHARED_CREDENTIALS_FILE'] = str(uuid.uuid4()) def tearDown(self): shutil.rmtree(self.tempdir) super(BaseAssumeRoleTest, self).tearDown() def some_future_time(self): timeobj = datetime.now(tzlocal()) return timeobj + timedelta(hours=24) def create_assume_role_response(self, credentials, expiration=None): if expiration is None: expiration = self.some_future_time() response = { 'Credentials': { 'AccessKeyId': credentials.access_key, 'SecretAccessKey': credentials.secret_key, 'SessionToken': credentials.token, 'Expiration': expiration }, 'AssumedRoleUser': { 'AssumedRoleId': 'myroleid', 'Arn': 'arn:aws:iam::1234567890:user/myuser' } } return response def create_random_credentials(self): return Credentials( 'fake-%s' % random_chars(15), 'fake-%s' % random_chars(35), 'fake-%s' % random_chars(45) ) def assert_creds_equal(self, c1, c2): c1_frozen = c1 if not isinstance(c1_frozen, ReadOnlyCredentials): c1_frozen = c1.get_frozen_credentials() c2_frozen = c2 if not isinstance(c2_frozen, ReadOnlyCredentials): c2_frozen = c2.get_frozen_credentials() self.assertEqual(c1_frozen, c2_frozen) def write_config(self, config): with open(self.config_file, 'w') as f: f.write(config) class TestAssumeRole(BaseAssumeRoleTest): def setUp(self): super(TestAssumeRole, self).setUp() self.environ['AWS_ACCESS_KEY_ID'] = 'access_key' self.environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key' self.metadata_provider = self.mock_provider(InstanceMetadataProvider) self.env_provider = self.mock_provider(EnvProvider) self.container_provider = self.mock_provider(ContainerProvider) self.mock_client_creator = mock.Mock(spec=Session.create_client) self.actual_client_region = None current_dir = os.path.dirname(os.path.abspath(__file__)) credential_process = os.path.join( current_dir, 'utils', 'credentialprocess.py' ) self.credential_process = '%s %s' % ( sys.executable, credential_process ) def mock_provider(self, provider_cls): mock_instance = mock.Mock(spec=provider_cls) mock_instance.load.return_value = None mock_instance.METHOD = provider_cls.METHOD mock_instance.CANONICAL_NAME = provider_cls.CANONICAL_NAME return mock_instance def create_session(self, profile=None): session = StubbedSession(profile=profile) # We have to set bogus credentials here or otherwise we'll trigger # an early credential chain resolution. sts = session.create_client( 'sts', aws_access_key_id='spam', aws_secret_access_key='eggs', ) self.mock_client_creator.return_value = sts assume_role_provider = AssumeRoleProvider( load_config=lambda: session.full_config, client_creator=self.mock_client_creator, cache={}, profile_name=profile, credential_sourcer=CanonicalNameCredentialSourcer([ self.env_provider, self.container_provider, self.metadata_provider ]), profile_provider_builder=ProfileProviderBuilder( session, sso_token_cache=JSONFileCache(self.tempdir), ), ) stubber = session.stub('sts') stubber.activate() component_name = 'credential_provider' resolver = session.get_component(component_name) available_methods = [p.METHOD for p in resolver.providers] replacements = { 'env': self.env_provider, 'iam-role': self.metadata_provider, 'container-role': self.container_provider, 'assume-role': assume_role_provider } for name, provider in replacements.items(): try: index = available_methods.index(name) except ValueError: # The provider isn't in the session continue resolver.providers[index] = provider session.register_component( 'credential_provider', resolver ) return session, stubber def test_assume_role(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n\n' '[profile B]\n' 'aws_access_key_id = abc123\n' 'aws_secret_access_key = def456\n' ) self.write_config(config) expected_creds = self.create_random_credentials() response = self.create_assume_role_response(expected_creds) session, stubber = self.create_session(profile='A') stubber.add_response('assume_role', response) actual_creds = session.get_credentials() self.assert_creds_equal(actual_creds, expected_creds) stubber.assert_no_pending_responses() def test_environment_credential_source(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'credential_source = Environment\n' ) self.write_config(config) environment_creds = self.create_random_credentials() self.env_provider.load.return_value = environment_creds expected_creds = self.create_random_credentials() response = self.create_assume_role_response(expected_creds) session, stubber = self.create_session(profile='A') stubber.add_response('assume_role', response) actual_creds = session.get_credentials() self.assert_creds_equal(actual_creds, expected_creds) stubber.assert_no_pending_responses() self.assertEqual(self.env_provider.load.call_count, 1) def test_instance_metadata_credential_source(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'credential_source = Ec2InstanceMetadata\n' ) self.write_config(config) metadata_creds = self.create_random_credentials() self.metadata_provider.load.return_value = metadata_creds expected_creds = self.create_random_credentials() response = self.create_assume_role_response(expected_creds) session, stubber = self.create_session(profile='A') stubber.add_response('assume_role', response) actual_creds = session.get_credentials() self.assert_creds_equal(actual_creds, expected_creds) stubber.assert_no_pending_responses() self.assertEqual(self.metadata_provider.load.call_count, 1) def test_container_credential_source(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'credential_source = EcsContainer\n' ) self.write_config(config) container_creds = self.create_random_credentials() self.container_provider.load.return_value = container_creds expected_creds = self.create_random_credentials() response = self.create_assume_role_response(expected_creds) session, stubber = self.create_session(profile='A') stubber.add_response('assume_role', response) actual_creds = session.get_credentials() self.assert_creds_equal(actual_creds, expected_creds) stubber.assert_no_pending_responses() self.assertEqual(self.container_provider.load.call_count, 1) def test_invalid_credential_source(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'credential_source = CustomInvalidProvider\n' ) self.write_config(config) with self.assertRaises(InvalidConfigError): session, _ = self.create_session(profile='A') session.get_credentials() def test_misconfigured_source_profile(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n' '[profile B]\n' 'region = us-west-2\n' ) self.write_config(config) with self.assertRaises(InvalidConfigError): session, _ = self.create_session(profile='A') session.get_credentials().get_frozen_credentials() def test_recursive_assume_role(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n\n' '[profile B]\n' 'role_arn = arn:aws:iam::123456789:role/RoleB\n' 'source_profile = C\n\n' '[profile C]\n' 'aws_access_key_id = abc123\n' 'aws_secret_access_key = def456\n' ) self.write_config(config) profile_b_creds = self.create_random_credentials() profile_b_response = self.create_assume_role_response(profile_b_creds) profile_a_creds = self.create_random_credentials() profile_a_response = self.create_assume_role_response(profile_a_creds) session, stubber = self.create_session(profile='A') stubber.add_response('assume_role', profile_b_response) stubber.add_response('assume_role', profile_a_response) actual_creds = session.get_credentials() self.assert_creds_equal(actual_creds, profile_a_creds) stubber.assert_no_pending_responses() def test_recursive_assume_role_stops_at_static_creds(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n\n' '[profile B]\n' 'aws_access_key_id = abc123\n' 'aws_secret_access_key = def456\n' 'role_arn = arn:aws:iam::123456789:role/RoleB\n' 'source_profile = C\n\n' '[profile C]\n' 'aws_access_key_id = abc123\n' 'aws_secret_access_key = def456\n' ) self.write_config(config) profile_a_creds = self.create_random_credentials() profile_a_response = self.create_assume_role_response(profile_a_creds) session, stubber = self.create_session(profile='A') stubber.add_response('assume_role', profile_a_response) actual_creds = session.get_credentials() self.assert_creds_equal(actual_creds, profile_a_creds) stubber.assert_no_pending_responses() def test_infinitely_recursive_assume_role(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = A\n' ) self.write_config(config) with self.assertRaises(InfiniteLoopConfigError): session, _ = self.create_session(profile='A') session.get_credentials() def test_process_source_profile(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n' '[profile B]\n' 'credential_process = %s\n' % self.credential_process ) self.write_config(config) expected_creds = self.create_random_credentials() response = self.create_assume_role_response(expected_creds) session, stubber = self.create_session(profile='A') stubber.add_response('assume_role', response) actual_creds = session.get_credentials() self.assert_creds_equal(actual_creds, expected_creds) stubber.assert_no_pending_responses() # Assert that the client was created with the credentials from the # credential process. self.assertEqual(self.mock_client_creator.call_count, 1) _, kwargs = self.mock_client_creator.call_args_list[0] expected_kwargs = { 'aws_access_key_id': 'spam', 'aws_secret_access_key': 'eggs', 'aws_session_token': None, } self.assertEqual(kwargs, expected_kwargs) def test_web_identity_source_profile(self): token_path = os.path.join(self.tempdir, 'token') with open(token_path, 'w') as token_file: token_file.write('a.token') config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n' '[profile B]\n' 'role_arn = arn:aws:iam::123456789:role/RoleB\n' 'web_identity_token_file = %s\n' % token_path ) self.write_config(config) session, stubber = self.create_session(profile='A') identity_creds = self.create_random_credentials() identity_response = self.create_assume_role_response(identity_creds) stubber.add_response( 'assume_role_with_web_identity', identity_response, ) expected_creds = self.create_random_credentials() assume_role_response = self.create_assume_role_response(expected_creds) stubber.add_response('assume_role', assume_role_response) actual_creds = session.get_credentials() self.assert_creds_equal(actual_creds, expected_creds) stubber.assert_no_pending_responses() # Assert that the client was created with the credentials from the # assume role with web identity call. self.assertEqual(self.mock_client_creator.call_count, 1) _, kwargs = self.mock_client_creator.call_args_list[0] expected_kwargs = { 'aws_access_key_id': identity_creds.access_key, 'aws_secret_access_key': identity_creds.secret_key, 'aws_session_token': identity_creds.token, } self.assertEqual(kwargs, expected_kwargs) def test_web_identity_source_profile_ignores_env_vars(self): token_path = os.path.join(self.tempdir, 'token') with open(token_path, 'w') as token_file: token_file.write('a.token') self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB' config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n' '[profile B]\n' 'web_identity_token_file = %s\n' % token_path ) self.write_config(config) session, _ = self.create_session(profile='A') # The config is split between the profile and the env, we # should only be looking at the profile so this should raise # a configuration error. with self.assertRaises(InvalidConfigError): session.get_credentials() def test_sso_source_profile(self): token_cache_key = 'f395038c92f1828cbb3991d2d6152d326b895606' cached_token = { 'accessToken': 'a.token', 'expiresAt': self.some_future_time(), } temp_cache = JSONFileCache(self.tempdir) temp_cache[token_cache_key] = cached_token config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n' '[profile B]\n' 'sso_region = us-east-1\n' 'sso_start_url = https://test.url/start\n' 'sso_role_name = SSORole\n' 'sso_account_id = 1234567890\n' ) self.write_config(config) session, sts_stubber = self.create_session(profile='A') client_config = Config( region_name='us-east-1', signature_version=UNSIGNED, ) sso_stubber = session.stub('sso', config=client_config) sso_stubber.activate() # The expiration needs to be in milliseconds expiration = datetime2timestamp(self.some_future_time()) * 1000 sso_role_creds = self.create_random_credentials() sso_role_response = { 'roleCredentials': { 'accessKeyId': sso_role_creds.access_key, 'secretAccessKey': sso_role_creds.secret_key, 'sessionToken': sso_role_creds.token, 'expiration': int(expiration), } } sso_stubber.add_response('get_role_credentials', sso_role_response) expected_creds = self.create_random_credentials() assume_role_response = self.create_assume_role_response(expected_creds) sts_stubber.add_response('assume_role', assume_role_response) actual_creds = session.get_credentials() self.assert_creds_equal(actual_creds, expected_creds) sts_stubber.assert_no_pending_responses() # Assert that the client was created with the credentials from the # SSO get role credentials response self.assertEqual(self.mock_client_creator.call_count, 1) _, kwargs = self.mock_client_creator.call_args_list[0] expected_kwargs = { 'aws_access_key_id': sso_role_creds.access_key, 'aws_secret_access_key': sso_role_creds.secret_key, 'aws_session_token': sso_role_creds.token, } self.assertEqual(kwargs, expected_kwargs) def test_web_identity_credential_source_ignores_env_vars(self): token_path = os.path.join(self.tempdir, 'token') with open(token_path, 'w') as token_file: token_file.write('a.token') self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB' self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = token_path config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'credential_source = Environment\n' ) self.write_config(config) session, _ = self.create_session(profile='A') # We should not get credentials from web-identity configured in the # environment when the Environment credential_source is set. # There are no Environment credentials, so this should raise a # retrieval error. with self.assertRaises(CredentialRetrievalError): session.get_credentials() def test_self_referential_profile(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = A\n' 'aws_access_key_id = abc123\n' 'aws_secret_access_key = def456\n' ) self.write_config(config) expected_creds = self.create_random_credentials() response = self.create_assume_role_response(expected_creds) session, stubber = self.create_session(profile='A') stubber.add_response('assume_role', response) actual_creds = session.get_credentials() self.assert_creds_equal(actual_creds, expected_creds) stubber.assert_no_pending_responses() def create_stubbed_sts_client(self, session): expected_creds = self.create_random_credentials() _original_create_client = session.create_client def create_client_sts_stub(service, *args, **kwargs): client = _original_create_client(service, *args, **kwargs) stub = Stubber(client) response = self.create_assume_role_response(expected_creds) self.actual_client_region = client.meta.region_name stub.add_response('assume_role', response) stub.activate() return client return create_client_sts_stub, expected_creds def test_assume_role_uses_correct_region(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n\n' '[profile B]\n' 'aws_access_key_id = abc123\n' 'aws_secret_access_key = def456\n' ) self.write_config(config) session = Session(profile='A') # Verify that when we configure the session with a specific region # that we use that region when creating the sts client. session.set_config_variable('region', 'cn-north-1') create_client, expected_creds = self.create_stubbed_sts_client(session) session.create_client = create_client resolver = create_credential_resolver(session) provider = resolver.get_provider('assume-role') creds = provider.load() self.assert_creds_equal(creds, expected_creds) self.assertEqual(self.actual_client_region, 'cn-north-1') class TestAssumeRoleWithWebIdentity(BaseAssumeRoleTest): def setUp(self): super(TestAssumeRoleWithWebIdentity, self).setUp() self.token_file = os.path.join(self.tempdir, 'token.jwt') self.write_token('totally.a.token') def write_token(self, token, path=None): if path is None: path = self.token_file with open(path, 'w') as f: f.write(token) def assert_session_credentials(self, expected_params, **kwargs): expected_creds = self.create_random_credentials() response = self.create_assume_role_response(expected_creds) session = StubbedSession(**kwargs) stubber = session.stub('sts') stubber.add_response( 'assume_role_with_web_identity', response, expected_params ) stubber.activate() actual_creds = session.get_credentials() self.assert_creds_equal(actual_creds, expected_creds) stubber.assert_no_pending_responses() def test_assume_role(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'role_session_name = sname\n' 'web_identity_token_file = %s\n' ) % self.token_file self.write_config(config) expected_params = { 'RoleArn': 'arn:aws:iam::123456789:role/RoleA', 'RoleSessionName': 'sname', 'WebIdentityToken': 'totally.a.token', } self.assert_session_credentials(expected_params, profile='A') def test_assume_role_env_vars(self): config = ( '[profile B]\n' 'region = us-west-2\n' ) self.write_config(config) self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB' self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = self.token_file self.environ['AWS_ROLE_SESSION_NAME'] = 'bname' expected_params = { 'RoleArn': 'arn:aws:iam::123456789:role/RoleB', 'RoleSessionName': 'bname', 'WebIdentityToken': 'totally.a.token', } self.assert_session_credentials(expected_params) def test_assume_role_env_vars_do_not_take_precedence(self): config = ( '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'role_session_name = aname\n' 'web_identity_token_file = %s\n' ) % self.token_file self.write_config(config) different_token = os.path.join(self.tempdir, str(uuid.uuid4())) self.write_token('totally.different.token', path=different_token) self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleC' self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = different_token self.environ['AWS_ROLE_SESSION_NAME'] = 'cname' expected_params = { 'RoleArn': 'arn:aws:iam::123456789:role/RoleA', 'RoleSessionName': 'aname', 'WebIdentityToken': 'totally.a.token', } self.assert_session_credentials(expected_params, profile='A') class TestProcessProvider(unittest.TestCase): def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) credential_process = os.path.join( current_dir, 'utils', 'credentialprocess.py' ) self.credential_process = '%s %s' % ( sys.executable, credential_process ) self.environ = os.environ.copy() self.environ_patch = mock.patch('os.environ', self.environ) self.environ_patch.start() def tearDown(self): self.environ_patch.stop() def test_credential_process(self): config = ( '[profile processcreds]\n' 'credential_process = %s\n' ) config = config % self.credential_process with temporary_file('w') as f: f.write(config) f.flush() self.environ['AWS_CONFIG_FILE'] = f.name credentials = Session(profile='processcreds').get_credentials() self.assertEqual(credentials.access_key, 'spam') self.assertEqual(credentials.secret_key, 'eggs') def test_credential_process_returns_error(self): config = ( '[profile processcreds]\n' 'credential_process = %s --raise-error\n' ) config = config % self.credential_process with temporary_file('w') as f: f.write(config) f.flush() self.environ['AWS_CONFIG_FILE'] = f.name session = Session(profile='processcreds') # This regex validates that there is no substring: b' # The reason why we want to validate that is that we want to # make sure that stderr is actually decoded so that in # exceptional cases the error is properly formatted. # As for how the regex works: # `(?!b').` is a negative lookahead, meaning that it will only # match if it is not followed by the pattern `b'`. Since it is # followed by a `.` it will match any character not followed by # that pattern. `((?!hede).)*` does that zero or more times. The # final pattern adds `^` and `$` to anchor the beginning and end # of the string so we can know the whole string is consumed. # Finally `(?s)` at the beginning makes dots match newlines so # we can handle a multi-line string. reg = r"(?s)^((?!b').)*$" with self.assertRaisesRegex(CredentialRetrievalError, reg): session.get_credentials() class TestSTSRegional(BaseAssumeRoleTest): def add_assume_role_http_response(self, stubber): stubber.add_response( body=self._get_assume_role_body('AssumeRole')) def add_assume_role_with_web_identity_http_response(self, stubber): stubber.add_response( body=self._get_assume_role_body('AssumeRoleWithWebIdentity')) def _get_assume_role_body(self, method_name): expiration = self.some_future_time() body = ( '<{method_name}Response>' ' <{method_name}Result>' ' <AssumedRoleUser>' ' <Arn>arn:aws:sts::0123456:user</Arn>' ' <AssumedRoleId>AKID:mysession-1567020004</AssumedRoleId>' ' </AssumedRoleUser>' ' <Credentials>' ' <AccessKeyId>AccessKey</AccessKeyId>' ' <SecretAccessKey>SecretKey</SecretAccessKey>' ' <SessionToken>SessionToken</SessionToken>' ' <Expiration>{expiration}</Expiration>' ' </Credentials>' ' </{method_name}Result>' '</{method_name}Response>' ).format(method_name=method_name, expiration=expiration) return body.encode('utf-8') def make_stubbed_client_call_to_region(self, session, stubber, region): ec2 = session.create_client('ec2', region_name=region) stubber.add_response(body=b'<DescribeRegionsResponse/>') ec2.describe_regions() def test_assume_role_uses_same_region_as_client(self): config = ( '[profile A]\n' 'sts_regional_endpoints = regional\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n\n' '[profile B]\n' 'aws_access_key_id = abc123\n' 'aws_secret_access_key = def456\n' ) self.write_config(config) session = Session(profile='A') with SessionHTTPStubber(session) as stubber: self.add_assume_role_http_response(stubber) # Make an arbitrary client and API call as we are really only # looking to make sure the STS assume role call uses the correct # endpoint. self.make_stubbed_client_call_to_region( session, stubber, 'us-west-2') self.assertEqual( stubber.requests[0].url, 'https://sts.us-west-2.amazonaws.com/' ) def test_assume_role_web_identity_uses_same_region_as_client(self): token_file = os.path.join(self.tempdir, 'token.jwt') with open(token_file, 'w') as f: f.write('some-token') config = ( '[profile A]\n' 'sts_regional_endpoints = regional\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'web_identity_token_file = %s\n' 'source_profile = B\n\n' '[profile B]\n' 'aws_access_key_id = abc123\n' 'aws_secret_access_key = def456\n' % token_file ) self.write_config(config) # Make an arbitrary client and API call as we are really only # looking to make sure the STS assume role call uses the correct # endpoint. session = Session(profile='A') with SessionHTTPStubber(session) as stubber: self.add_assume_role_with_web_identity_http_response(stubber) # Make an arbitrary client and API call as we are really only # looking to make sure the STS assume role call uses the correct # endpoint. self.make_stubbed_client_call_to_region( session, stubber, 'us-west-2') self.assertEqual( stubber.requests[0].url, 'https://sts.us-west-2.amazonaws.com/' )
dataengine-service_configure.py
#!/usr/bin/python3 # ***************************************************************************** # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # ****************************************************************************** import argparse import datalab.actions_lib import datalab.fab import datalab.meta_lib import json import logging import multiprocessing import os import sys import traceback from datalab.common_lib import manage_pkg from fabric import * import subprocess parser = argparse.ArgumentParser() parser.add_argument('--uuid', type=str, default='') args = parser.parse_args() def configure_dataengine_service(instance, emr_conf): emr_conf['instance_ip'] = instance.get('PrivateIpAddress') try: logging.info('[CREATING DATALAB SSH USER ON DATAENGINE SERVICE]') print('[CREATING DATALAB SSH USER ON DATAENGINE SERVICE]') params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \ (emr_conf['instance_ip'], emr_conf['key_path'], emr_conf['initial_user'], emr_conf['os_user'], emr_conf['sudo_group']) try: subprocess.run("~/scripts/{}.py {}".format('create_ssh_user', params), shell=True, check=True) except: traceback.print_exc() raise Exception except Exception as err: datalab.fab.append_result("Failed to create DataLab ssh user.", str(err)) datalab.actions_lib.terminate_emr(emr_conf['cluster_id']) sys.exit(1) # configuring proxy on Data Engine service try: logging.info('[CONFIGURE PROXY ON DATAENGINE SERVICE]') print('[CONFIGURE PROXY ON DATAENGINE SERVICE]') additional_config = {"proxy_host": emr_conf['edge_instance_hostname'], "proxy_port": "3128"} params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \ .format(emr_conf['instance_ip'], emr_conf['cluster_name'], emr_conf['key_path'], json.dumps(additional_config), emr_conf['os_user']) try: subprocess.run("~/scripts/{}.py {}".format('common_configure_proxy', params), shell=True, check=True) except: traceback.print_exc() raise Exception except Exception as err: datalab.fab.append_result("Failed to configure proxy.", str(err)) datalab.actions_lib.terminate_emr(emr_conf['cluster_id']) sys.exit(1) try: logging.info('[CONFIGURE DATAENGINE SERVICE]') print('[CONFIGURE DATAENGINE SERVICE]') try: datalab.fab.configure_data_engine_service_pip(emr_conf['instance_ip'], emr_conf['os_user'], emr_conf['key_path'], True) global conn conn = datalab.fab.init_datalab_connection(emr_conf['instance_ip'], emr_conf['os_user'], emr_conf['key_path']) conn.sudo('''bash -c 'echo "[main]" > /etc/yum/pluginconf.d/priorities.conf ; echo "enabled = 0" >> /etc/yum/pluginconf.d/priorities.conf' ''') manage_pkg('-y install', 'remote', 'R-devel') conn.close() except: traceback.print_exc() raise Exception except Exception as err: datalab.fab.append_result("Failed to configure dataengine service.", str(err)) datalab.actions_lib.terminate_emr(emr_conf['cluster_id']) sys.exit(1) try: print('[SETUP EDGE REVERSE PROXY TEMPLATE]') logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]') cluster_master_instances = emr_conf['cluster_master_instances'] slaves = [] for idx, instance in enumerate(emr_conf['cluster_core_instances']): slave = { 'name': 'datanode{}'.format(idx + 1), 'ip': instance.get('PrivateIpAddress'), 'dns': instance.get('PrivateDnsName') } slaves.append(slave) additional_info = { "computational_name": emr_conf['computational_name'], "master_ip": cluster_master_instances[0].get('PrivateIpAddress'), "master_dns": cluster_master_instances[0].get('PrivateDnsName'), "slaves": slaves, "tensor": False } params = "--edge_hostname {} " \ "--keyfile {} " \ "--os_user {} " \ "--type {} " \ "--exploratory_name {} " \ "--additional_info '{}'"\ .format(emr_conf['edge_instance_hostname'], emr_conf['key_path'], emr_conf['os_user'], 'dataengine-service', emr_conf['exploratory_name'], json.dumps(additional_info)) try: subprocess.run("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params), shell=True, check=True) except: datalab.fab.append_result("Failed edge reverse proxy template") raise Exception except Exception as err: datalab.fab.append_result("Failed edge reverse proxy template", str(err)) datalab.actions_lib.terminate_emr(emr_conf['cluster_id']) sys.exit(1) try: print('[INSTALLING USERs KEY]') logging.info('[INSTALLING USERs KEY]') additional_config = {"user_keyname": emr_conf['user_keyname'], "user_keydir": os.environ['conf_key_dir']} params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format( emr_conf['instance_ip'], emr_conf['key_path'], json.dumps(additional_config), emr_conf['os_user']) try: subprocess.run("~/scripts/{}.py {}".format('install_user_key', params), shell=True, check=True) except: traceback.print_exc() raise Exception except Exception as err: datalab.fab.append_result("Failed installing users key", str(err)) datalab.actions_lib.terminate_emr(emr_conf['cluster_id']) sys.exit(1) if __name__ == "__main__": local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id']) local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.INFO, filename=local_log_filepath) try: datalab.actions_lib.create_aws_config_files() print('Generating infrastructure names and tags') emr_conf = dict() if 'exploratory_name' in os.environ: emr_conf['exploratory_name'] = os.environ['exploratory_name'] else: emr_conf['exploratory_name'] = '' if 'computational_name' in os.environ: emr_conf['computational_name'] = os.environ['computational_name'] else: emr_conf['computational_name'] = '' emr_conf['apps'] = 'Hadoop Hive Hue Spark Livy' emr_conf['service_base_name'] = os.environ['conf_service_base_name'] emr_conf['project_name'] = os.environ['project_name'] emr_conf['endpoint_name'] = os.environ['endpoint_name'] emr_conf['tag_name'] = emr_conf['service_base_name'] + '-tag' emr_conf['key_name'] = os.environ['conf_key_name'] emr_conf['region'] = os.environ['aws_region'] emr_conf['release_label'] = os.environ['emr_version'] emr_conf['master_instance_type'] = os.environ['emr_master_instance_type'] emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type'] emr_conf['instance_count'] = os.environ['emr_instance_count'] emr_conf['notebook_ip'] = datalab.meta_lib.get_instance_ip_address( emr_conf['tag_name'], os.environ['notebook_instance_name']).get('Private') emr_conf['network_type'] = os.environ['conf_network_type'] emr_conf['role_service_name'] = os.environ['emr_service_role'] emr_conf['role_ec2_name'] = os.environ['emr_ec2_role'] emr_conf['tags'] = "Name={0}-{1}-{2}-des-{3}-{4}," \ "{0}-tag={0}-{1}-{2}-des-{3}-{4}," \ "Notebook={5}," \ "State=not-configured," \ "Endpoint_tag={2}".format( emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'], emr_conf['exploratory_name'], args.uuid, os.environ['notebook_instance_name']) emr_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}-{4}' \ .format(emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'], emr_conf['computational_name'], args.uuid) emr_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name']).lower().replace('_', '-') tag = {"Key": "{}-tag".format(emr_conf['service_base_name']), "Value": "{}-{}-{}-subnet".format( emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'])} emr_conf['subnet_cidr'] = datalab.meta_lib.get_subnet_by_tag(tag) emr_conf['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']) emr_conf['all_ip_cidr'] = '0.0.0.0/0' emr_conf['additional_emr_sg_name'] = '{}-{}-{}-de-se-additional-sg'.format(emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name']) emr_conf['vpc_id'] = os.environ['aws_vpc_id'] emr_conf['cluster_id'] = datalab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name']) emr_conf['cluster_instances'] = datalab.meta_lib.get_emr_instances_list(emr_conf['cluster_id']) emr_conf['cluster_master_instances'] = datalab.meta_lib.get_emr_instances_list(emr_conf['cluster_id'], 'MASTER') emr_conf['cluster_core_instances'] = datalab.meta_lib.get_emr_instances_list(emr_conf['cluster_id'], 'CORE') emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name']) emr_conf['edge_instance_hostname'] = datalab.meta_lib.get_instance_private_ip_address( emr_conf['tag_name'], emr_conf['edge_instance_name']) emr_conf['edge_instance_hostname'] = datalab.meta_lib.get_instance_hostname(emr_conf['tag_name'], emr_conf['edge_instance_name']) emr_conf['user_keyname'] = emr_conf['project_name'] emr_conf['os_user'] = os.environ['conf_os_user'] emr_conf['initial_user'] = 'ec2-user' emr_conf['sudo_group'] = 'wheel' except Exception as err: datalab.fab.append_result("Failed to generate variables dictionary", str(err)) datalab.actions_lib.terminate_emr(emr_conf['cluster_id']) sys.exit(1) try: jobs = [] for instance in emr_conf['cluster_instances']: p = multiprocessing.Process(target=configure_dataengine_service, args=(instance, emr_conf)) jobs.append(p) p.start() for job in jobs: job.join() for job in jobs: if job.exitcode != 0: raise Exception except: traceback.print_exc() raise Exception try: logging.info('[SUMMARY]') ip_address = emr_conf['cluster_master_instances'][0].get('PrivateIpAddress') emr_master_url = "http://" + ip_address + ":8088" emr_master_acces_url = "https://{}/{}_{}/".format(emr_conf['edge_instance_hostname'], emr_conf['exploratory_name'], emr_conf['computational_name']) logging.info('[SUMMARY]') print('[SUMMARY]') print("Service base name: {}".format(emr_conf['service_base_name'])) print("Cluster name: {}".format(emr_conf['cluster_name'])) print("Cluster id: {}".format(datalab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name']))) print("Key name: {}".format(emr_conf['key_name'])) print("Region: {}".format(emr_conf['region'])) print("EMR version: {}".format(emr_conf['release_label'])) print("EMR master node shape: {}".format(emr_conf['master_instance_type'])) print("EMR slave node shape: {}".format(emr_conf['slave_instance_type'])) print("Instance count: {}".format(emr_conf['instance_count'])) print("Notebook IP address: {}".format(emr_conf['notebook_ip'])) print("Bucket name: {}".format(emr_conf['bucket_name'])) with open("/root/result.json", 'w') as result: res = {"hostname": emr_conf['cluster_name'], "instance_id": datalab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name']), "key_name": emr_conf['key_name'], "user_own_bucket_name": emr_conf['bucket_name'], "Action": "Create new EMR cluster", "computational_url": [ {"description": "EMR Master", "url": emr_master_acces_url}, # {"description": "EMR Master (via tunnl)", # "url": emr_master_url} ]} print(json.dumps(res)) result.write(json.dumps(res)) except Exception as err: datalab.fab.append_result("Error with writing results", str(err)) datalab.actions_lib.terminate_emr(emr_conf['cluster_id']) sys.exit(1)
orchestrator.py
# Copyright 2016, 2017 Matteo Franchin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Module responsible of prioritising and carrying out the creation of thumbnails. The work is carried out by the Orchestrator object, which collects requests for thumbnail generation and sends them to a separate process. The work is prioritised such that the last thumbnail request is carried out first. ''' import os from threading import Thread from multiprocessing import Process, Queue from collections import namedtuple try: from Queue import Empty except ImportError: from queue import Empty from .backcaller import BackCaller from .thumbnailers import build_image_thumbnail, build_directory_thumbnail def comment(s): pass # State returned after a thumbnail request. (THUMBNAIL_LOADING, THUMBNAIL_DAMAGED, THUMBNAIL_DONE) = range(3) class Thumbnail(object): def __init__(self, file_name, size, state, request_id, data=None): self.file_name = file_name self.size = size self.state = state self.request_id = request_id self.data = data def match(self, size): # TODO: For now we tolerate slight errors in the resize. return size[0] == self.size[0] or size[1] == self.size[1] class Worker(Process): def __init__(self, cmd_queue, out_queue): super(Worker, self).__init__() # Queues used to coordinate work with other threads. self.cmd_queue = cmd_queue self.out_queue = out_queue # Private datastructures always accessed from the same thread. self.local_queue = [] self.idx_from_req = {} self.current_request_id = None def run(self): '''The main worker loop.''' while True: # Move all items from cmd_queue to local queue. # Block during get if the local queue is empty. comment('Move work to local queue') if self.move_work_to_local_queue(): self.out_queue.put(('STOP',)) return # Now process at most one command from the local queue. if len(self.local_queue) > 0: self.process_item_from_local_queue() def move_work_to_local_queue(self, blocking=True): '''Move all commands to the local queue (in reverse order). Also handle the queue management commands (CLEARQ, ...). Return whether the worker process should terminate as a result of a STOP command. ''' local_queue = self.local_queue idx_from_req = self.idx_from_req while True: # If the local queue is empty, then block the get() on cmd_queue, # as there is no work we could do anyway. queue_empty = (len(local_queue) == 0) try: args = self.cmd_queue.get(blocking and queue_empty) except Empty: return False else: cmd = args[0] if cmd == 'MAKETHUMB': # Queue the MAKETHUMB command in the local queue. The # order is reversed (most recent request are dealt with # first). request_id = args[1] idx_from_req[request_id] = len(local_queue) local_queue.append(args) elif cmd == 'CLEARQ': # Clear all jobs queued before this command. self.local_queue = local_queue = [] self.idx_from_req = idx_from_req = {} self.current_request_id = None elif cmd == 'CANCEL': # Cancel a MAKETHUMB command in the queue. request_id = args[1] idx = idx_from_req.get(request_id) if idx is not None: local_queue[idx] = ('NOP',) else: comment('CANCEL: Cannot find request {}' .format(request_id)) # Cancel the current thumb creation, if necessary. if self.current_request_id == request_id: self.current_request_id = None else: # Exit the worker process. assert cmd == 'STOP', 'Unknown command {}'.format(cmd) return True def check_thumb_cancelled(self): '''Return whether the current thumbnail creation should be cancelled. ''' self.move_work_to_local_queue(blocking=False) return (self.current_request_id is None) def process_item_from_local_queue(self): '''Process one item in the local queue and remove it.''' # Remove the top command from local_queue and idx_from_req map. # NOP commands are removed and ignored. while len(self.local_queue) > 0: args = self.local_queue.pop() if args[0] != 'NOP': assert args[0] == 'MAKETHUMB' request_id, file_name, size = args[1:] self.idx_from_req.pop(request_id) comment('Received MAKETHUMB command with ID {}' .format(request_id)) self.current_request_id = request_id state, data = \ self.make_thumb(file_name, size, check_cancelled=self.check_thumb_cancelled) if self.check_thumb_cancelled(): return comment('MAKETHUMB processed: sending result') self.out_queue.put(('MAKETHUMB', file_name, size, request_id, state, data)) return def make_thumb(self, file_name, size, **kwargs): if os.path.isdir(file_name): arr = build_directory_thumbnail(file_name, size, **kwargs) else: arr = build_image_thumbnail(file_name, size) state = (THUMBNAIL_DONE if arr is not None else THUMBNAIL_DAMAGED) return (state, arr) def listener_main(orchestrator): '''Listener thread main loop. The listener thread is responsible for taking the thumbnails produced by the worker process and putting them back into the orchestrator thumbnail cache (by calling Orchestrator.thumbnail_read()). ''' while True: args = orchestrator.out_queue.get() out_item = args[0] if out_item == 'STOP': return if out_item == 'MAKETHUMB': orchestrator.thumbnail_ready(*args[1:]) else: raise ValueError('Listener received unknown output: {}' .format(out_item)) class Orchestrator(BackCaller): def __init__(self, soft_limit=500, hard_limit=550): super(Orchestrator, self).__init__(thumbnail_available=None) self.thumbnails = {} self.request_id = 0 self.thumbnail_soft_limit = soft_limit self.thumbnail_hard_limit = hard_limit self.cmd_queue = Queue() self.out_queue = Queue() # Separate process doing all the hard work. self.worker = Worker(self.cmd_queue, self.out_queue) self.worker.daemon = True self.worker.start() # Thread listening to out_queue and calling back when items are # available. self.out_listener = t = Thread(target=listener_main, args=(self,)) t.daemon = True t.start() def request_thumbnail(self, file_name, size): '''Request a thumbnail with the given size in a non-blocking way.''' comment('Request thumbnail {} with size {}'.format(file_name, size)) tn = self.thumbnails.get(file_name) if tn is not None: # A thumbnail already exists. Unless it has the wrong size return # this. if tn.state == THUMBNAIL_DAMAGED or tn.match(size): # (tn.state == THUMBNAIL_DONE and tn.size == size)): comment('Returning cached thumbnail') return tn # If tn.state == THUMBNAIL_LOADING we re-submit the command to # ensure it gets a higher priority over the other requests. # This is important to ensure we render first the area of the # screen the user is looking at. self.cmd_queue.put(('CANCEL', tn.request_id)) comment('Replacing thumbnail {} != {}'.format(size, tn.size)) else: if len(self.thumbnails) >= self.thumbnail_hard_limit: comment('Too many thumbnails ({}): removing old thumbnails...' .format(len(self.thumbnails))) all_tns = [(tn.file_name, tn.request_id) for tn in self.thumbnails.itervalues()] all_tns.sort(lambda a, b: cmp(b[1], a[1])) for file_name_to_remove, _ in \ all_tns[self.thumbnail_soft_limit:]: comment('Rm thumb {}'.format(file_name_to_remove)) self.thumbnails.pop(file_name_to_remove) # Create or replace the thumbnail. comment('Storing LOADING-thumbnail for {}'.format(file_name)) request_id = self.request_id self.request_id += 1 self.thumbnails[file_name] = tn = \ Thumbnail(file_name, size, THUMBNAIL_LOADING, request_id) # Send a request for the thumbnail. comment('Queuing MAKETHUMB command, request {}'.format(request_id)) self.cmd_queue.put(('MAKETHUMB', request_id, file_name, size)) return tn def thumbnail_ready(self, file_name, size, request_id, state, data=None): '''Internal. Used to provide thumbnail data, once it ready.''' tn = self.thumbnails.get(file_name) if tn is None: comment('Received thumbnail for unknown request {}' .format(request_id)) return if request_id != tn.request_id: comment('request_id {} != {} for {}' .format(request_id, tn.request_id, file_name)) if size != tn.size: comment('Size mismatch: discarding thumbnail') return comment('Accepting thumbnail for {}'.format(file_name)) tn.state = state tn.data = data # Alert that a new thumbnail is now available. self.call('thumbnail_available', file_name, size, state, data) def clear_queue(self): '''Abort all the work in progress and clear the queue.''' # Need to remove LOADING-thumbnails from the cache. tn_to_remove = [tn.file_name for tn in self.thumbnails.itervalues() if tn.state == THUMBNAIL_LOADING] for tn in tn_to_remove: self.thumbnails.pop(tn) self.cmd_queue.put(('CLEARQ',)) if __name__ == '__main__': import time def got_it(*args): print('Got it ' + ', '.join(map(str, args))) t = Orchestrator() t.set_callback('thumbnail_available', got_it) t.request_thumbnail('/tmp/image00.jpg', (100, 100)) t.request_thumbnail('/tmp/image01.jpg', (100, 100)) t.request_thumbnail('/tmp/image02.jpg', (100, 100)) t.request_thumbnail('/tmp/image00.jpg', (100, 100)) time.sleep(10)
byhand.py
import datetime import json import traceback import os import platform import logging.config from threading import Thread, Event from slavem import Reporter import time import arrow import logging.config from wash import Washer from aggregatebar import AggregateBar from contracter import Contracter from hiscontract import HisContracter settingFile = 'conf/kwarg.json' loggingConfigFile = 'conf/logging.conf' # serverChanFile = 'conf/serverChan.json' if __debug__: settingFile = 'tmp/kwarg.json' loggingConfigFile = 'tmp/logging.conf' # with open(serverChanFile, 'r') as f: # serverChanUrls = json.load(f)['serverChanSlaveUrls'] with open(settingFile, 'r') as f: kwargs = json.load(f) # with open(loggingConfigFile, 'r') as f: # loggingConfig = json.load(f) # 加载日志模块 logging.config.fileConfig(loggingConfigFile) logger = logging.getLogger() # 汇报 stopped = Event() slavemReport = Reporter(**kwargs.pop('slavemConf')) # 启动心跳进程 def heartBeat(): while not stopped.wait(30): slavemReport.heartBeat() beat = Thread(target=heartBeat, daemon=True) startDate = arrow.get('2019-05-23 00:00:00+08:00').datetime endDate = arrow.get('2019-05-24 00:00:00+08:00').datetime tradingDay = startDate try: while tradingDay <= endDate: # 清洗数据 w = Washer(startDate=tradingDay, **kwargs) w.start() # 聚合日线数据 a = AggregateBar(startDate=tradingDay, **kwargs) a.start() # 更新合约的始末日期 h = HisContracter(startDate=tradingDay, **kwargs) h.start() # 生成主力合约数据 c = Contracter(startDate=tradingDay, **kwargs) c.start() tradingDay += datetime.timedelta(days=1) os.system('say "清洗数据完成"') except: e = traceback.format_exc() logger.critical(e) time.sleep(3) if platform.system() == "Darwin": os.system('say "失败,失败,失败"') finally: pass
workerpool.py
import os import sys import signal import subprocess import multiprocessing from openquake.baselib import zeromq as z, general, parallel try: from setproctitle import setproctitle except ImportError: def setproctitle(title): "Do nothing" def streamer(host, task_in_port, task_out_port): """ A streamer for zmq workers. :param host: name or IP of the controller node :param task_in_port: port where to send the tasks :param task_out_port: port from where to receive the tasks """ try: z.zmq.proxy(z.bind('tcp://%s:%s' % (host, task_in_port), z.zmq.PULL), z.bind('tcp://%s:%s' % (host, task_out_port), z.zmq.PUSH)) except (KeyboardInterrupt, z.zmq.ZMQError): pass # killed cleanly by SIGINT/SIGTERM class WorkerMaster(object): """ :param master_host: hostname or IP of the master node :param task_in_port: port where to send the tasks :param task_out_port: port from where to read the tasks :param ctrl_port: port on which the worker pools listen :param host_cores: names of the remote hosts and number of cores to use :param remote_python: path of the Python executable on the remote hosts """ def __init__(self, master_host, task_in_port, task_out_port, ctrl_port, host_cores, remote_python=None, receiver_ports=None): # receiver_ports is not used self.task_in_port = task_in_port self.task_out_url = 'tcp://%s:%s' % (master_host, task_out_port) self.ctrl_port = int(ctrl_port) self.host_cores = [hc.split() for hc in host_cores.split(',')] self.remote_python = remote_python or sys.executable def status(self, host=None): """ :returns: a list of pairs (hostname, 'running'|'not-running') """ if host is None: host_cores = self.host_cores else: host_cores = [hc for hc in self.host_cores if hc[0] == host] lst = [] for host, _ in host_cores: ready = general.socket_ready((host, self.ctrl_port)) lst.append((host, 'running' if ready else 'not-running')) return lst def start(self): """ Start multiple workerpools, possibly on remote servers via ssh """ starting = [] for host, cores in self.host_cores: if self.status(host)[0][1] == 'running': print('%s:%s already running' % (host, self.ctrl_port)) continue ctrl_url = 'tcp://%s:%s' % (host, self.ctrl_port) if host == '127.0.0.1': # localhost args = [sys.executable] else: args = ['ssh', host, self.remote_python] args += ['-m', 'openquake.baselib.workerpool', ctrl_url, self.task_out_url, cores] starting.append(' '.join(args)) subprocess.Popen(args) return 'starting %s' % starting def stop(self): """ Send a "stop" command to all worker pools """ stopped = [] for host, _ in self.host_cores: if self.status(host)[0][1] == 'not-running': print('%s not running' % host) continue ctrl_url = 'tcp://%s:%s' % (host, self.ctrl_port) with z.Socket(ctrl_url, z.zmq.REQ, 'connect') as sock: sock.send('stop') stopped.append(host) return 'stopped %s' % stopped def kill(self): """ Send a "kill" command to all worker pools """ killed = [] for host, _ in self.host_cores: if self.status(host)[0][1] == 'not-running': print('%s not running' % host) continue ctrl_url = 'tcp://%s:%s' % (host, self.ctrl_port) with z.Socket(ctrl_url, z.zmq.REQ, 'connect') as sock: sock.send('kill') killed.append(host) return 'killed %s' % killed def restart(self): """ Stop and start again """ self.stop() self.start() return 'restarted' class WorkerPool(object): """ A pool of workers accepting the command 'stop' and 'kill' and reading tasks to perform from the task_out_port. :param ctrl_url: zmq address of the control socket :param task_out_port: zmq address of the task streamer :param num_workers: a string with the number of workers (or '-1') """ def __init__(self, ctrl_url, task_out_port, num_workers='-1'): self.ctrl_url = ctrl_url self.task_out_port = task_out_port self.num_workers = (multiprocessing.cpu_count() if num_workers == '-1' else int(num_workers)) self.pid = os.getpid() def worker(self, sock): """ :param sock: a zeromq.Socket of kind PULL receiving (cmd, args) """ setproctitle('oq-zworker') with sock: for cmd, args in sock: parallel.safely_call(cmd, args) def start(self): """ Start worker processes and a control loop """ setproctitle('oq-zworkerpool %s' % self.ctrl_url[6:]) # strip tcp:// # start workers self.workers = [] for _ in range(self.num_workers): sock = z.Socket(self.task_out_port, z.zmq.PULL, 'connect') proc = multiprocessing.Process(target=self.worker, args=(sock,)) proc.start() sock.pid = proc.pid self.workers.append(sock) # start control loop accepting the commands stop and kill with z.Socket(self.ctrl_url, z.zmq.REP, 'bind') as ctrlsock: for cmd in ctrlsock: if cmd in ('stop', 'kill'): msg = getattr(self, cmd)() ctrlsock.send(msg) break elif cmd == 'getpid': ctrlsock.send(self.pid) elif cmd == 'get_num_workers': ctrlsock.send(self.num_workers) def stop(self): """ Send a SIGTERM to all worker processes """ for sock in self.workers: os.kill(sock.pid, signal.SIGTERM) return 'WorkerPool %s stopped' % self.ctrl_url def kill(self): """ Send a SIGKILL to all worker processes """ for sock in self.workers: os.kill(sock.pid, signal.SIGKILL) return 'WorkerPool %s killed' % self.ctrl_url if __name__ == '__main__': ctrl_url, task_out_port, num_workers = sys.argv[1:] WorkerPool(ctrl_url, task_out_port, num_workers).start()
ircclient.py
# -*- coding: utf-8 -*- """Starts an IRC client.""" import sys import threading from . import exceptions from . import ircsocket LINESEP = "\r\n" """IRC likes a \r\n, though most accept a \n.""" def connect_to_irc(host, port, vlog): """Connect to an IRC server. Args: host The host to connect to. port The port to use. vlog A verbose logger to pass messages to. Return: An open socket connected to the server, or the program exits if it cannot connect. """ try: sock = ircsocket.connect(host, port, vlog) except exceptions.CouldNotConnect as connect_error: error(str(connect_error)) sys.exit() return sock def login(sock, vlog): """Login to an IRC server. Args: sock An open socket to login over. vlog A verbose logger to pass messages to. """ msg = "PASS to-wong-foo" + LINESEP vlog(msg) ircsocket.send(sock, msg, vlog) msg = "NICK jt2222" + LINESEP vlog(msg) ircsocket.send(sock, msg, vlog) msg = "USER paul 8 * : Paul Muttonchops" + LINESEP vlog(msg) ircsocket.send(sock, msg, vlog) def parse_input(sock, data, vlog): """Convert input into IRC commands and send them to the server. Args: sock An open socket to login over. data The data entered by the user. vlog A verbose logger to pass messages to. Return: True if its okay to continue; False if not. """ words = data.split() cmd = words[0] cmd = cmd.upper() msg = cmd + " " + " ".join(words[1:]) + LINESEP vlog("SENDING: " + msg) ircsocket.send(sock, msg, vlog) return cmd != "QUIT" def parse_output(data, echo): """Parse output received from the server. Args: data Data received from the server. echo A logger to pass the parsed data to. """ # Break up the data into words. words = data.split() # If there's a prefix, the first word starts with a colon prefix = None if words[0].startswith(":"): prefix = words[0][1:] words.pop(0) # The command is the next word. command = None if words: command = words[0] words.pop(0) # The params are everything up to the next colon. params = [] is_param = True while is_param: if words and not words[0].startswith(":"): params.append(words[0]) words.pop(0) else: is_param = False # The tail is the rest. tail = [] if words: tail.extend(words) output = command + " " + " ".join(tail) # echo(output) echo(data) def start(host, port, echo, error, vlog): """Start an IRC client. Args: host The host to connect to. port The port to use. echo A logger to pass output messages to. error A logger to pass error messages to. vlog A verbose logger to pass messages to. """ sock = connect_to_irc(host, port, vlog) login(sock, vlog) # Start listening on the socket in a separate thread. thread_should_stop = threading.Event() args = (sock, thread_should_stop, lambda x: parse_output(x, echo), vlog) thread = threading.Thread(target=ircsocket.receive, args=args) thread.start() # Listen for input from the user in this thread. keep_alive = True try: while keep_alive: raw_entered_data = input() entered_data = raw_entered_data.strip() if entered_data: vlog("STDIN: " + str(entered_data)) keep_alive = parse_input(sock, entered_data, vlog) # If the user exits, or the system exits, disconnect first. except (KeyboardInterrupt, SystemExit): vlog("") vlog("Caught exit signal...") ircsocket.disconnect(sock, thread, thread_should_stop, vlog) vlog("Re-raising exit signal...") raise # Disconnect before stopping. vlog("") vlog("Nothing left to do.") ircsocket.disconnect(sock, thread, thread_should_stop, vlog) echo("Goodbye.")
_kubeless.py
#!/usr/bin/env python import importlib import io import os import queue import sys import bottle import prometheus_client as prom # The reason this file has an underscore prefix in its name is to avoid a # name collision with the user-defined module. current_mod = os.path.basename(__file__).split('.')[0] if os.getenv('MOD_NAME') == current_mod: raise ValueError(f'Module cannot be named {current_mod}') sys.path.append('/kubeless') mod = importlib.import_module(os.getenv('MOD_NAME')) func = getattr(mod, os.getenv('FUNC_HANDLER')) func_port = os.getenv('FUNC_PORT', 8080) timeout = float(os.getenv('FUNC_TIMEOUT', 180)) memfile_max = int(os.getenv('FUNC_MEMFILE_MAX', 100*1024*1024)) bottle.BaseRequest.MEMFILE_MAX = memfile_max app = application = bottle.app() function_context = { 'function-name': func.__name__, 'timeout': timeout, 'runtime': os.getenv('FUNC_RUNTIME'), 'memory-limit': os.getenv('FUNC_MEMORY_LIMIT'), } class PicklableBottleRequest(bottle.BaseRequest): '''Bottle request that can be pickled (serialized). `bottle.BaseRequest` is not picklable and therefore cannot be passed directly to a python multiprocessing `Process` when using the forkserver or spawn multiprocessing contexts. So, we selectively delete components that are not picklable. ''' def __init__(self, data, *args, **kwargs): super().__init__(*args, **kwargs) # Bottle uses either `io.BytesIO` or `tempfile.TemporaryFile` to store the # request body depending on whether the length of the body is less than # `MEMFILE_MAX` or not, but `tempfile.TemporaryFile` is not picklable. # So, we override it to always store the body as `io.BytesIO`. self.environ['bottle.request.body'] = io.BytesIO(data) def __getstate__(self): env = self.environ.copy() # File-like objects are not picklable. del env['wsgi.errors'] del env['wsgi.input'] # bottle.ConfigDict is not picklable because it contains a lambda function. del env['bottle.app'] del env['bottle.route'] del env['route.handle'] return env def __setstate__(self, env): setattr(self, 'environ', env) def funcWrap(q, event, c): try: q.put(func(event, c)) except Exception as inst: q.put(inst) @app.get('/healthz') def healthz(): return 'OK' @app.get('/metrics') def metrics(): bottle.response.content_type = prom.CONTENT_TYPE_LATEST return prom.generate_latest(prom.REGISTRY) @app.route('/<:re:.*>', method=['GET', 'POST', 'PATCH', 'DELETE']) def handler(): req = bottle.request data = req.body.read() picklable_req = PicklableBottleRequest(data, req.environ.copy()) if req.get_header('content-type') == 'application/json': data = req.json event = { 'data': data, 'event-id': req.get_header('event-id'), 'event-type': req.get_header('event-type'), 'event-time': req.get_header('event-time'), 'event-namespace': req.get_header('event-namespace'), 'extensions': {'request': picklable_req} } method = req.method func_calls.labels(method).inc() with func_errors.labels(method).count_exceptions(): with func_hist.labels(method).time(): q = ctx.Queue() p = ctx.Process(target=funcWrap, args=(q, event, function_context)) p.start() try: res = q.get(block=True, timeout=timeout) except queue.Empty: p.terminate() p.join() return bottle.HTTPError(408, "Timeout while processing the function") else: p.join() if isinstance(res, Exception) and not isinstance(res, bottle.HTTPResponse): logging.error("Function returned an exception: %s", res) raise res return res def preload(): """This is a no-op function used to start the forkserver.""" pass if __name__ == '__main__': import logging import multiprocessing as mp import requestlogger mp_context = os.getenv('MP_CONTEXT', 'forkserver') if mp_context == "fork": raise ValueError( '"fork" multiprocessing context is not supported because cherrypy is a ' 'multithreaded server and safely forking a multithreaded process is ' 'problematic' ) if mp_context not in ["forkserver", "spawn"]: raise ValueError( f'"{mp_context}" is an invalid multiprocessing context. Possible values ' 'are "forkserver" and "spawn"' ) try: ctx = mp.get_context(mp_context) if ctx.get_start_method() == 'forkserver': # Preload the current module and consequently also the user-defined module # so that all the child processes forked from the forkserver in response to # a request immediately have access to the global data in the user-defined # module without having to load it for every request. ctx.set_forkserver_preload([current_mod]) # Start the forkserver before we start accepting requests. d = ctx.Process(target=preload) d.start() d.join() except ValueError: # Default to 'spawn' if 'forkserver' is unavailable. ctx = mp.get_context('spawn') logging.warn( f'"{mp_context}" multiprocessing context is unavailable. Using "spawn"' ) func_hist = prom.Histogram( 'function_duration_seconds', 'Duration of user function in seconds', ['method'] ) func_calls = prom.Counter( 'function_calls_total', 'Number of calls to user function', ['method'] ) func_errors = prom.Counter( 'function_failures_total', 'Number of exceptions in user function', ['method'] ) loggedapp = requestlogger.WSGILogger( app, [logging.StreamHandler(stream=sys.stdout)], requestlogger.ApacheFormatter(), ) bottle.run( loggedapp, server='cherrypy', host='0.0.0.0', port=func_port, # Number of requests that can be handled in parallel (default = 10). numthreads=os.getenv('CHERRYPY_NUMTHREADS', 10), )
inception_v1.py
# Copyright 2019 Xilinx Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ctypes import * import cv2 import numpy as np import runner import os import input_fn import math import threading import time import sys from vai.dpuv1.rt.vitis.python.dpu.runner import Runner ''' Calculate softmax data: data to be calculated size: data size return: softamx result ''' def CPUCalcSoftmax(data,size): sum=0.0 result = [0 for i in range(size)] for i in range(size): result[i] = math.exp(data[i]) sum +=result[i] for i in range(size): result[i] /=sum return result def get_script_directory(): path = os.getcwd() return path ''' Get topk results according to its probability datain: data result of softmax filePath: filePath in witch that records the infotmation of kinds ''' def TopK(datain, size, filePath, labels_offset=0): cnt = [i for i in range(size) ] pair = zip(datain, cnt) pair = sorted(pair, reverse=True) softmax_new, cnt_new = zip(*pair) fp = open(filePath, "r") data1 = fp.readlines() fp.close() print ("") for i in range(5): flag = labels_offset for line in data1: if flag == cnt_new[i]: print("Top[%d] %f %s" % (i, (softmax_new[i]), (line.strip)("\n"))) flag = flag+1 l = threading.Lock() SCRIPT_DIR = get_script_directory() calib_image_dir = "./image" label_file = "./words.txt" IMAGE_WIDTH = 224 IMAGE_HEIGHT = 224 batchSize = 4 global threadnum threadnum = 0 global runTotall runRotal = 0 ''' run inception_v1 with batch dpu: dpu runner img: imagelist to be run cnt: threadnum ''' def runInceptionV1(dpu, img, cnt): """get tensor""" inputTensors = dpu.get_input_tensors() outputTensors = dpu.get_output_tensors() tensorformat = dpu.get_tensor_format() if tensorformat == dpu.TensorFormat.NCHW: outputHeight = outputTensors[0].dims[2] outputWidth = outputTensors[0].dims[3] outputChannel = outputTensors[0].dims[1] elif tensorformat == dpu.TensorFormat.NHWC: outputHeight = outputTensors[0].dims[1] outputWidth = outputTensors[0].dims[2] outputChannel = outputTensors[0].dims[3] else: exit("Format error") outputSize = outputHeight * outputWidth * outputChannel softmax = np.empty(outputSize) global runTotall count = cnt while count < runTotall: l.acquire() if (runTotall < (count+batchSize)): runSize = runTotall - count else: runSize = batchSize l.release() shapeIn = (runSize,) + tuple([inputTensors[0].dims[i] for i in range(inputTensors[0].ndims)][1:]) """prepare batch input/output """ outputData = [] inputData = [] outputData.append(np.empty((runSize, outputSize), dtype = np.float32, order = 'C')) inputData.append(np.empty((shapeIn), dtype = np.float32, order = 'C')) #imgT = np.transpose(img, (0, 3, 1, 2)) """init input image to input buffer """ for j in range(runSize): imageRun = inputData[0] imageRun[j,...] = img[count+j] """run with batch """ job_id = dpu.execute_async(inputData, outputData) dpu.wait(job_id) """softmax calculate with batch """ label_offset = 1 # For imagenet tensorflow slim for j in range(runSize): softmax = CPUCalcSoftmax(outputData[0][j], outputSize) TopK (softmax, outputSize, label_file, label_offset) l.acquire() count = count + threadnum*runSize l.release() def main(argv): global threadnum """create runner """ dpu = Runner(argv[2]) listimage=os.listdir(calib_image_dir) threadAll = [] threadnum = int(argv[1]) i = 0 global runTotall runTotall = len(listimage) """ Image list to be run """ img = [] for i in range(runTotall): path = os.path.join(calib_image_dir, listimage[i]) image = cv2.imread(path) img.append(input_fn.preprocess_fn(image)) imgT = np.transpose(img, (0, 3, 1, 2)) """run with batch """ time1 = time.time() for i in range(int(threadnum)): t1 = threading.Thread(target=runInceptionV1, args=(dpu, imgT, i*batchSize)) threadAll.append(t1) for x in threadAll: x.start() for x in threadAll: x.join() time2 = time.time() timetotal = time2 - time1 fps = float(runTotall / timetotal) print("%.2f FPS" %fps) del dpu if __name__ == "__main__": if len(sys.argv) != 3: print("please input thread number and json file path.") else : main(sys.argv)
lockfile.py
""" lockfile.py - Platform-independent advisory file locks. Requires Python 2.5 unless you apply 2.4.diff Locking is done on a per-thread basis instead of a per-process basis. Usage: >>> lock = FileLock(_testfile()) >>> try: ... lock.acquire() ... except AlreadyLocked: ... print _testfile(), 'is locked already.' ... except LockFailed: ... print _testfile(), 'can\\'t be locked.' ... else: ... print 'got lock' got lock >>> print lock.is_locked() True >>> lock.release() >>> lock = FileLock(_testfile()) >>> print lock.is_locked() False >>> with lock: ... print lock.is_locked() True >>> print lock.is_locked() False >>> # It is okay to lock twice from the same thread... >>> with lock: ... lock.acquire() ... >>> # Though no counter is kept, so you can't unlock multiple times... >>> print lock.is_locked() False Exceptions: Error - base class for other exceptions LockError - base class for all locking exceptions AlreadyLocked - Another thread or process already holds the lock LockFailed - Lock failed for some other reason UnlockError - base class for all unlocking exceptions AlreadyUnlocked - File was not locked. NotMyLock - File was locked but not by the current thread/process To do: * Write more test cases - verify that all lines of code are executed * Describe on-disk file structures in the documentation. """ from __future__ import division, with_statement import socket import os import threading import time import errno import thread class Error(Exception): """ Base class for other exceptions. >>> try: ... raise Error ... except Exception: ... pass """ pass class LockError(Error): """ Base class for error arising from attempts to acquire the lock. >>> try: ... raise LockError ... except Error: ... pass """ pass class LockTimeout(LockError): """Raised when lock creation fails within a user-defined period of time. >>> try: ... raise LockTimeout ... except LockError: ... pass """ pass class AlreadyLocked(LockError): """Some other thread/process is locking the file. >>> try: ... raise AlreadyLocked ... except LockError: ... pass """ pass class LockFailed(LockError): """Lock file creation failed for some other reason. >>> try: ... raise LockFailed ... except LockError: ... pass """ pass class UnlockError(Error): """ Base class for errors arising from attempts to release the lock. >>> try: ... raise UnlockError ... except Error: ... pass """ pass class NotLocked(UnlockError): """Raised when an attempt is made to unlock an unlocked file. >>> try: ... raise NotLocked ... except UnlockError: ... pass """ pass class NotMyLock(UnlockError): """Raised when an attempt is made to unlock a file someone else locked. >>> try: ... raise NotMyLock ... except UnlockError: ... pass """ pass class LockBase: """Base class for platform-specific lock classes.""" def __init__(self, path, threaded=True): """ >>> lock = LockBase(_testfile()) """ self.path = path self.lock_file = os.path.abspath(path) + ".lock" self.hostname = socket.gethostname() self.pid = os.getpid() if threaded: tname = "%x-" % thread.get_ident() else: tname = "" dirname = os.path.dirname(self.lock_file) self.unique_name = os.path.join(dirname, "%s.%s%s" % (self.hostname, tname, self.pid)) def acquire(self, timeout=None): """ Acquire the lock. * If timeout is omitted (or None), wait forever trying to lock the file. * If timeout > 0, try to acquire the lock for that many seconds. If the lock period expires and the file is still locked, raise LockTimeout. * If timeout <= 0, raise AlreadyLocked immediately if the file is already locked. >>> # As simple as it gets. >>> lock = FileLock(_testfile()) >>> lock.acquire() >>> lock.release() >>> # No timeout test >>> e1, e2 = threading.Event(), threading.Event() >>> t = _in_thread(_lock_wait_unlock, e1, e2) >>> e1.wait() # wait for thread t to acquire lock >>> lock2 = FileLock(_testfile()) >>> lock2.is_locked() True >>> lock2.i_am_locking() False >>> try: ... lock2.acquire(timeout=-1) ... except AlreadyLocked: ... pass ... except Exception as e: ... print 'unexpected exception', repr(e) ... else: ... print 'thread', threading.currentThread().getName(), ... print 'erroneously locked an already locked file.' ... lock2.release() ... >>> e2.set() # tell thread t to release lock >>> t.join() >>> # Timeout test >>> e1, e2 = threading.Event(), threading.Event() >>> t = _in_thread(_lock_wait_unlock, e1, e2) >>> e1.wait() # wait for thread t to acquire filelock >>> lock2 = FileLock(_testfile()) >>> lock2.is_locked() True >>> try: ... lock2.acquire(timeout=0.1) ... except LockTimeout: ... pass ... except Exception as e: ... print 'unexpected exception', repr(e) ... else: ... lock2.release() ... print 'thread', threading.currentThread().getName(), ... print 'erroneously locked an already locked file.' ... >>> e2.set() >>> t.join() """ pass def release(self): """ Release the lock. If the file is not locked, raise NotLocked. >>> lock = FileLock(_testfile()) >>> lock.acquire() >>> lock.release() >>> lock.is_locked() False >>> lock.i_am_locking() False >>> try: ... lock.release() ... except NotLocked: ... pass ... except NotMyLock: ... print 'unexpected exception', NotMyLock ... except Exception as e: ... print 'unexpected exception', repr(e) ... else: ... print 'erroneously unlocked file' >>> e1, e2 = threading.Event(), threading.Event() >>> t = _in_thread(_lock_wait_unlock, e1, e2) >>> e1.wait() >>> lock2 = FileLock(_testfile()) >>> lock2.is_locked() True >>> lock2.i_am_locking() False >>> try: ... lock2.release() ... except NotMyLock: ... pass ... except Exception as e: ... print 'unexpected exception', repr(e) ... else: ... print 'erroneously unlocked a file locked by another thread.' ... >>> e2.set() >>> t.join() """ pass def is_locked(self): """ Tell whether or not the file is locked. >>> lock = FileLock(_testfile()) >>> lock.acquire() >>> lock.is_locked() True >>> lock.release() >>> lock.is_locked() False """ pass def i_am_locking(self): """Return True if this object is locking the file. >>> lock1 = FileLock(_testfile(), threaded=False) >>> lock1.acquire() >>> lock2 = FileLock(_testfile()) >>> lock1.i_am_locking() True >>> lock2.i_am_locking() False >>> try: ... lock2.acquire(timeout=2) ... except LockTimeout: ... lock2.break_lock() ... lock2.is_locked() ... lock1.is_locked() ... lock2.acquire() ... else: ... print 'expected LockTimeout...' ... False False >>> lock1.i_am_locking() False >>> lock2.i_am_locking() True >>> lock2.release() """ pass def break_lock(self): """Remove a lock. Useful if a locking thread failed to unlock. >>> lock = FileLock(_testfile()) >>> lock.acquire() >>> lock2 = FileLock(_testfile()) >>> lock2.is_locked() True >>> lock2.break_lock() >>> lock2.is_locked() False >>> try: ... lock.release() ... except NotLocked: ... pass ... except Exception as e: ... print 'unexpected exception', repr(e) ... else: ... print 'break lock failed' """ pass def __enter__(self): """Context manager support. >>> lock = FileLock(_testfile()) >>> with lock: ... lock.is_locked() ... True >>> lock.is_locked() False """ self.acquire() return self def __exit__(self, *_exc): """Context manager support. >>> 'tested in __enter__' 'tested in __enter__' """ self.release() class LinkFileLock(LockBase): """Lock access to a file using atomic property of link(2).""" def acquire(self, timeout=None): """ >>> d = _testfile() >>> os.mkdir(d) >>> os.chmod(d, 0444) >>> try: ... lock = LinkFileLock(os.path.join(d, 'test')) ... try: ... lock.acquire() ... except LockFailed: ... pass ... else: ... lock.release() ... print 'erroneously locked', os.path.join(d, 'test') ... finally: ... os.chmod(d, 0664) ... os.rmdir(d) """ try: open(self.unique_name, "wb").close() except IOError: raise LockFailed end_time = time.time() if timeout is not None and timeout > 0: end_time += timeout while True: # Try and create a hard link to it. try: os.link(self.unique_name, self.lock_file) except OSError: # Link creation failed. Maybe we've double-locked? nlinks = os.stat(self.unique_name).st_nlink if nlinks == 2: # The original link plus the one I created == 2. We're # good to go. return else: # Otherwise the lock creation failed. if timeout is not None and time.time() > end_time: os.unlink(self.unique_name) if timeout > 0: raise LockTimeout else: raise AlreadyLocked time.sleep(timeout is not None and timeout/10 or 0.1) else: # Link creation succeeded. We're good to go. return def release(self): if not self.is_locked(): raise NotLocked elif not os.path.exists(self.unique_name): raise NotMyLock os.unlink(self.unique_name) os.unlink(self.lock_file) def is_locked(self): return os.path.exists(self.lock_file) def i_am_locking(self): return (self.is_locked() and os.path.exists(self.unique_name) and os.stat(self.unique_name).st_nlink == 2) def break_lock(self): if os.path.exists(self.lock_file): os.unlink(self.lock_file) class MkdirFileLock(LockBase): """Lock file by creating a directory.""" def __init__(self, path, threaded=True): """ >>> lock = MkdirFileLock(_testfile()) """ LockBase.__init__(self, path) if threaded: tname = "%x-" % thread.get_ident() else: tname = "" # Lock file itself is a directory. Place the unique file name into # it. self.unique_name = os.path.join(self.lock_file, "%s.%s%s" % (self.hostname, tname, self.pid)) def acquire(self, timeout=None): end_time = time.time() if timeout is not None and timeout > 0: end_time += timeout if timeout is None: wait = 0.1 else: wait = max(0, timeout / 10) while True: try: os.mkdir(self.lock_file) except OSError, err: if err.errno == errno.EEXIST: # Already locked. if os.path.exists(self.unique_name): # Already locked by me. return if timeout is not None and time.time() > end_time: if timeout > 0: raise LockTimeout else: # Someone else has the lock. raise AlreadyLocked time.sleep(wait) else: # Couldn't create the lock for some other reason raise LockFailed else: open(self.unique_name, "wb").close() return def release(self): if not self.is_locked(): raise NotLocked elif not os.path.exists(self.unique_name): raise NotMyLock os.unlink(self.unique_name) os.rmdir(self.lock_file) def is_locked(self): return os.path.exists(self.lock_file) def i_am_locking(self): return (self.is_locked() and os.path.exists(self.unique_name)) def break_lock(self): if os.path.exists(self.lock_file): for name in os.listdir(self.lock_file): os.unlink(os.path.join(self.lock_file, name)) os.rmdir(self.lock_file) class SQLiteFileLock(LockBase): "Demonstration of using same SQL-based locking." import tempfile _fd, testdb = tempfile.mkstemp() os.close(_fd) os.unlink(testdb) del _fd, tempfile def __init__(self, path, threaded=True): LockBase.__init__(self, path, threaded) self.lock_file = unicode(self.lock_file) self.unique_name = unicode(self.unique_name) import sqlite3 self.connection = sqlite3.connect(SQLiteFileLock.testdb) c = self.connection.cursor() try: c.execute("create table locks" "(" " lock_file varchar(32)," " unique_name varchar(32)" ")") except sqlite3.OperationalError: pass else: self.connection.commit() import atexit atexit.register(os.unlink, SQLiteFileLock.testdb) def acquire(self, timeout=None): end_time = time.time() if timeout is not None and timeout > 0: end_time += timeout if timeout is None: wait = 0.1 elif timeout <= 0: wait = 0 else: wait = timeout / 10 cursor = self.connection.cursor() while True: if not self.is_locked(): # Not locked. Try to lock it. cursor.execute("insert into locks" " (lock_file, unique_name)" " values" " (?, ?)", (self.lock_file, self.unique_name)) self.connection.commit() # Check to see if we are the only lock holder. cursor.execute("select * from locks" " where unique_name = ?", (self.unique_name,)) rows = cursor.fetchall() if len(rows) > 1: # Nope. Someone else got there. Remove our lock. cursor.execute("delete from locks" " where unique_name = ?", (self.unique_name,)) self.connection.commit() else: # Yup. We're done, so go home. return else: # Check to see if we are the only lock holder. cursor.execute("select * from locks" " where unique_name = ?", (self.unique_name,)) rows = cursor.fetchall() if len(rows) == 1: # We're the locker, so go home. return # Maybe we should wait a bit longer. if timeout is not None and time.time() > end_time: if timeout > 0: # No more waiting. raise LockTimeout else: # Someone else has the lock and we are impatient.. raise AlreadyLocked # Well, okay. We'll give it a bit longer. time.sleep(wait) def release(self): if not self.is_locked(): raise NotLocked if not self.i_am_locking(): raise NotMyLock, ("locker:", self._who_is_locking(), "me:", self.unique_name) cursor = self.connection.cursor() cursor.execute("delete from locks" " where unique_name = ?", (self.unique_name,)) self.connection.commit() def _who_is_locking(self): cursor = self.connection.cursor() cursor.execute("select unique_name from locks" " where lock_file = ?", (self.lock_file,)) return cursor.fetchone()[0] def is_locked(self): cursor = self.connection.cursor() cursor.execute("select * from locks" " where lock_file = ?", (self.lock_file,)) rows = cursor.fetchall() return not not rows def i_am_locking(self): cursor = self.connection.cursor() cursor.execute("select * from locks" " where lock_file = ?" " and unique_name = ?", (self.lock_file, self.unique_name)) return not not cursor.fetchall() def break_lock(self): cursor = self.connection.cursor() cursor.execute("delete from locks" " where lock_file = ?", (self.lock_file,)) self.connection.commit() if hasattr(os, "link"): FileLock = LinkFileLock else: FileLock = MkdirFileLock def _in_thread(func, *args, **kwargs): """Execute func(*args, **kwargs) after dt seconds. Helper for docttests. """ def _f(): func(*args, **kwargs) t = threading.Thread(target=_f, name='/*/*') t.start() return t def _testfile(): """Return platform-appropriate lock file name. Helper for doctests. """ import tempfile return os.path.join(tempfile.gettempdir(), 'trash-%s' % os.getpid()) def _lock_wait_unlock(event1, event2): """Lock from another thread. Helper for doctests. """ lock = FileLock(_testfile()) with lock: event1.set() # we're in, event2.wait() # wait for boss's permission to leave def _test(): global FileLock import doctest import sys def test_object(c): nfailed = ntests = 0 for (obj, recurse) in ((c, True), (LockBase, True), (sys.modules["__main__"], False)): tests = doctest.DocTestFinder(recurse=recurse).find(obj) runner = doctest.DocTestRunner(verbose="-v" in sys.argv) tests.sort(key = lambda test: test.name) for test in tests: f, t = runner.run(test) nfailed += f ntests += t print FileLock.__name__, "tests:", ntests, "failed:", nfailed return nfailed, ntests nfailed = ntests = 0 if hasattr(os, "link"): FileLock = LinkFileLock f, t = test_object(FileLock) nfailed += f ntests += t if hasattr(os, "mkdir"): FileLock = MkdirFileLock f, t = test_object(FileLock) nfailed += f ntests += t try: import sqlite3 except ImportError: print "SQLite3 is unavailable - not testing SQLiteFileLock." else: print "Testing SQLiteFileLock with sqlite", sqlite3.sqlite_version, print "& pysqlite", sqlite3.version FileLock = SQLiteFileLock f, t = test_object(FileLock) nfailed += f ntests += t print "total tests:", ntests, "total failed:", nfailed if __name__ == "__main__": _test()
__init__.py
# -*- coding: utf-8 -*- """The initialization file for the Pywikibot framework.""" # # (C) Pywikibot team, 2008-2017 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, unicode_literals __release__ = '3.0-dev' __version__ = '$Id: e4000cc8b1ddcd00ae795cdeca1a4c903d8a321e $' __url__ = 'https://www.mediawiki.org/wiki/Special:MyLanguage/Manual:Pywikibot' import atexit import datetime import math import re import sys import threading from decimal import Decimal if sys.version_info[0] > 2: from queue import Queue long = int basestring = str else: from Queue import Queue from warnings import warn # logging must be imported first so that other modules can # use these logging methods during the initialisation sequence. from pywikibot.logging import ( critical, debug, error, exception, log, output, stdout, warning ) from pywikibot import config2 as config from pywikibot.bot import ( input, input_choice, input_yn, inputChoice, handle_args, showHelp, ui, calledModuleName, Bot, CurrentPageBot, WikidataBot, # the following are flagged as deprecated on usage handleArgs, ) from pywikibot.bot_choice import ( QuitKeyboardInterrupt as _QuitKeyboardInterrupt, ) from pywikibot.data.api import UploadWarning as _UploadWarning from pywikibot.diff import PatchManager from pywikibot.exceptions import ( Error, InvalidTitle, BadTitle, NoPage, NoMoveTarget, SectionError, SiteDefinitionError, NoSuchSite, UnknownSite, UnknownFamily, UnknownExtension, NoUsername, UserBlocked, PageRelatedError, IsRedirectPage, IsNotRedirectPage, PageSaveRelatedError, PageNotSaved, OtherPageSaveError, LockedPage, CascadeLockedPage, LockedNoPage, NoCreateError, EditConflict, PageDeletedConflict, PageCreatedConflict, ServerError, FatalServerError, Server504Error, CaptchaError, SpamfilterError, CircularRedirect, InterwikiRedirectPage, WikiBaseError, CoordinateGlobeUnknownException, DeprecatedPageNotFoundError as _DeprecatedPageNotFoundError, _EmailUserError, ) from pywikibot.family import Family from pywikibot.i18n import translate from pywikibot.site import BaseSite from pywikibot.tools import ( # __ to avoid conflict with ModuleDeprecationWrapper._deprecated deprecated as __deprecated, deprecate_arg as _deprecate_arg, normalize_username, MediaWikiVersion, redirect_func, ModuleDeprecationWrapper as _ModuleDeprecationWrapper, PY2, UnicodeMixin, ) from pywikibot.tools.formatter import color_format import pywikibot.textlib as textlib textlib_methods = ( 'unescape', 'replaceExcept', 'removeDisabledParts', 'removeHTMLParts', 'isDisabled', 'interwikiFormat', 'interwikiSort', 'getLanguageLinks', 'replaceLanguageLinks', 'removeLanguageLinks', 'removeLanguageLinksAndSeparator', 'getCategoryLinks', 'categoryFormat', 'replaceCategoryLinks', 'removeCategoryLinks', 'removeCategoryLinksAndSeparator', 'replaceCategoryInPlace', 'compileLinkR', 'extract_templates_and_params', 'TimeStripper', ) __all__ = ( 'config', 'ui', 'Site', 'UnicodeMixin', 'translate', 'Page', 'FilePage', 'Category', 'Link', 'User', 'ItemPage', 'PropertyPage', 'Claim', 'html2unicode', 'url2unicode', 'unicode2html', 'stdout', 'output', 'warning', 'error', 'critical', 'debug', 'exception', 'input_choice', 'input', 'input_yn', 'inputChoice', 'handle_args', 'handleArgs', 'showHelp', 'ui', 'log', 'calledModuleName', 'Bot', 'CurrentPageBot', 'WikidataBot', 'Error', 'InvalidTitle', 'BadTitle', 'NoPage', 'NoMoveTarget', 'SectionError', 'SiteDefinitionError', 'NoSuchSite', 'UnknownSite', 'UnknownFamily', 'UnknownExtension', 'NoUsername', 'UserBlocked', 'UserActionRefuse', 'PageRelatedError', 'IsRedirectPage', 'IsNotRedirectPage', 'PageSaveRelatedError', 'PageNotSaved', 'OtherPageSaveError', 'LockedPage', 'CascadeLockedPage', 'LockedNoPage', 'NoCreateError', 'EditConflict', 'PageDeletedConflict', 'PageCreatedConflict', 'UploadWarning', 'ServerError', 'FatalServerError', 'Server504Error', 'CaptchaError', 'SpamfilterError', 'CircularRedirect', 'InterwikiRedirectPage', 'WikiBaseError', 'CoordinateGlobeUnknownException', 'QuitKeyboardInterrupt', ) __all__ += textlib_methods if PY2: # T111615: Python 2 requires __all__ is bytes globals()['__all__'] = tuple(bytes(item) for item in __all__) for _name in textlib_methods: target = getattr(textlib, _name) wrapped_func = redirect_func(target) globals()[_name] = wrapped_func deprecated = redirect_func(__deprecated) deprecate_arg = redirect_func(_deprecate_arg) class Timestamp(datetime.datetime): """Class for handling MediaWiki timestamps. This inherits from datetime.datetime, so it can use all of the methods and operations of a datetime object. To ensure that the results of any operation are also a Timestamp object, be sure to use only Timestamp objects (and datetime.timedeltas) in any operation. Use Timestamp.fromISOformat() and Timestamp.fromtimestampformat() to create Timestamp objects from MediaWiki string formats. As these constructors are typically used to create objects using data passed provided by site and page methods, some of which return a Timestamp when previously they returned a MediaWiki string representation, these methods also accept a Timestamp object, in which case they return a clone. Use Site.getcurrenttime() for the current time; this is more reliable than using Timestamp.utcnow(). """ mediawikiTSFormat = "%Y%m%d%H%M%S" ISO8601Format = "%Y-%m-%dT%H:%M:%SZ" _ISO8601Format_new = '{0:+05d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z' def clone(self): """Clone this instance.""" return self.replace(microsecond=self.microsecond) @classmethod def fromISOformat(cls, ts): """Convert an ISO 8601 timestamp to a Timestamp object.""" # If inadvertantly passed a Timestamp object, use replace() # to create a clone. if isinstance(ts, cls): return ts.clone() return cls.strptime(ts, cls.ISO8601Format) @classmethod def fromtimestampformat(cls, ts): """Convert a MediaWiki internal timestamp to a Timestamp object.""" # If inadvertantly passed a Timestamp object, use replace() # to create a clone. if isinstance(ts, cls): return ts.clone() return cls.strptime(ts, cls.mediawikiTSFormat) def isoformat(self): """ Convert object to an ISO 8601 timestamp accepted by MediaWiki. datetime.datetime.isoformat does not postfix the ISO formatted date with a 'Z' unless a timezone is included, which causes MediaWiki ~1.19 and earlier to fail. """ return self.strftime(self.ISO8601Format) toISOformat = redirect_func(isoformat, old_name='toISOformat', class_name='Timestamp') def totimestampformat(self): """Convert object to a MediaWiki internal timestamp.""" return self.strftime(self.mediawikiTSFormat) def __str__(self): """Return a string format recognized by the API.""" return self.isoformat() def __add__(self, other): """Perform addition, returning a Timestamp instead of datetime.""" newdt = super(Timestamp, self).__add__(other) if isinstance(newdt, datetime.datetime): return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour, newdt.minute, newdt.second, newdt.microsecond, newdt.tzinfo) else: return newdt def __sub__(self, other): """Perform substraction, returning a Timestamp instead of datetime.""" newdt = super(Timestamp, self).__sub__(other) if isinstance(newdt, datetime.datetime): return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour, newdt.minute, newdt.second, newdt.microsecond, newdt.tzinfo) else: return newdt from pywikibot._wbtypes import WbRepresentation as _WbRepresentation class Coordinate(_WbRepresentation): """ Class for handling and storing Coordinates. For now its just being used for DataSite, but in the future we can use it for the GeoData extension. """ _items = ('lat', 'lon', 'entity') @_deprecate_arg('entity', 'globe_item') def __init__(self, lat, lon, alt=None, precision=None, globe=None, typ='', name='', dim=None, site=None, globe_item=None): """ Represent a geo coordinate. @param lat: Latitude @type lat: float @param lon: Longitude @type lon: float @param alt: Altitude? TODO FIXME @param precision: precision @type precision: float @param globe: Which globe the point is on @type globe: str @param typ: The type of coordinate point @type typ: str @param name: The name @type name: str @param dim: Dimension (in meters) @type dim: int @param site: The Wikibase site @type site: pywikibot.site.DataSite @param globe_item: The Wikibase item for the globe, or the entity URI of this Wikibase item. Takes precedence over 'globe' if present. @type globe_item: pywikibot.ItemPage or str """ self.lat = lat self.lon = lon self.alt = alt self._precision = precision self._entity = globe_item self.type = typ self.name = name self._dim = dim self.site = site or Site().data_repository() if globe: globe = globe.lower() elif not globe_item: globe = site.default_globe() self.globe = globe @property def entity(self): """Return the entity uri of the globe.""" if not self._entity: if self.globe not in self.site.globes(): raise CoordinateGlobeUnknownException( u"%s is not supported in Wikibase yet." % self.globe) return self.site.globes()[self.globe] if isinstance(self._entity, ItemPage): return self._entity.concept_uri() return self._entity def toWikibase(self): """ Export the data to a JSON object for the Wikibase API. FIXME: Should this be in the DataSite object? @return: Wikibase JSON @rtype: dict """ return {'latitude': self.lat, 'longitude': self.lon, 'altitude': self.alt, 'globe': self.entity, 'precision': self.precision, } @classmethod def fromWikibase(cls, data, site): """ Constructor to create an object from Wikibase's JSON output. @param data: Wikibase JSON @type data: dict @param site: The Wikibase site @type site: pywikibot.site.DataSite @rtype: pywikibot.Coordinate """ globe = None if data['globe']: globes = {} for name, entity in site.globes().items(): globes[entity] = name globe = globes.get(data['globe']) return cls(data['latitude'], data['longitude'], data['altitude'], data['precision'], globe, site=site, globe_item=data['globe']) @property def precision(self): u""" Return the precision of the geo coordinate. The precision is calculated if the Coordinate does not have a precision, and self._dim is set. When no precision and no self._dim exists, None is returned. The biggest error (in degrees) will be given by the longitudinal error; the same error in meters becomes larger (in degrees) further up north. We can thus ignore the latitudinal error. The longitudinal can be derived as follows: In small angle approximation (and thus in radians): M{Δλ ≈ Δpos / r_φ}, where r_φ is the radius of earth at the given latitude. Δλ is the error in longitude. M{r_φ = r cos φ}, where r is the radius of earth, φ the latitude Therefore:: precision = math.degrees(self._dim/(radius*math.cos(math.radians(self.lat)))) @rtype: float or None """ if self._dim is None and self._precision is None: return None if self._precision is None and self._dim is not None: radius = 6378137 # TODO: Support other globes self._precision = math.degrees( self._dim / (radius * math.cos(math.radians(self.lat)))) return self._precision @precision.setter def precision(self, value): self._precision = value def precisionToDim(self): """Convert precision from Wikibase to GeoData's dim and return the latter. dim is calculated if the Coordinate doesn't have a dimension, and precision is set. When neither dim nor precision are set, ValueError is thrown. Carrying on from the earlier derivation of precision, since precision = math.degrees(dim/(radius*math.cos(math.radians(self.lat)))), we get dim = math.radians(precision)*radius*math.cos(math.radians(self.lat)) But this is not valid, since it returns a float value for dim which is an integer. We must round it off to the nearest integer. Therefore:: dim = int(round(math.radians(precision)*radius*math.cos(math.radians(self.lat)))) @rtype: int or None """ if self._dim is None and self._precision is None: raise ValueError('No values set for dim or precision') if self._dim is None and self._precision is not None: radius = 6378137 self._dim = int( round( math.radians(self._precision) * radius * math.cos(math.radians(self.lat)) ) ) return self._dim def get_globe_item(self, repo=None, lazy_load=False): """ Return the ItemPage corresponding to the globe. Note that the globe need not be in the same data repository as the Coordinate itself. A successful lookup is stored as an internal value to avoid the need for repeated lookups. @param repo: the Wikibase site for the globe, if different from that provided with the Coordinate. @type repo: pywikibot.site.DataSite @param lazy_load: Do not raise NoPage if ItemPage does not exist. @type lazy_load: bool @return: pywikibot.ItemPage """ if isinstance(self._entity, ItemPage): return self._entity repo = repo or self.site return ItemPage.from_entity_uri(repo, self.entity, lazy_load) class WbTime(_WbRepresentation): """A Wikibase time representation.""" PRECISION = {'1000000000': 0, '100000000': 1, '10000000': 2, '1000000': 3, '100000': 4, '10000': 5, 'millenia': 6, 'century': 7, 'decade': 8, 'year': 9, 'month': 10, 'day': 11, 'hour': 12, 'minute': 13, 'second': 14 } FORMATSTR = '{0:+012d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z' _items = ('year', 'month', 'day', 'hour', 'minute', 'second', 'precision', 'before', 'after', 'timezone', 'calendarmodel') def __init__(self, year=None, month=None, day=None, hour=None, minute=None, second=None, precision=None, before=0, after=0, timezone=0, calendarmodel=None, site=None): """ Create a new WbTime object. The precision can be set by the Wikibase int value (0-14) or by a human readable string, e.g., 'hour'. If no precision is given, it is set according to the given time units. Timezone information is given in three different ways depending on the time: * Times after the implementation of UTC (1972): as an offset from UTC in minutes; * Times before the implementation of UTC: the offset of the time zone from universal time; * Before the implementation of time zones: The longitude of the place of the event, in the range −180° to 180°, multiplied by 4 to convert to minutes. @param year: The year as a signed integer of between 1 and 16 digits. @type year: long @param month: Month @type month: int @param day: Day @type day: int @param hour: Hour @type hour: int @param minute: Minute @type minute: int @param second: Second @type second: int @param precision: The unit of the precision of the time. @type precision: int or str @param before: Number of units after the given time it could be, if uncertain. The unit is given by the precision. @type before: int @param after: Number of units before the given time it could be, if uncertain. The unit is given by the precision. @type after: int @param timezone: Timezone information in minutes. @type timezone: int @param calendarmodel: URI identifying the calendar model @type calendarmodel: str @param site: The Wikibase site @type site: pywikibot.site.DataSite """ if year is None: raise ValueError('no year given') self.precision = self.PRECISION['second'] if second is None: self.precision = self.PRECISION['minute'] second = 0 if minute is None: self.precision = self.PRECISION['hour'] minute = 0 if hour is None: self.precision = self.PRECISION['day'] hour = 0 if day is None: self.precision = self.PRECISION['month'] day = 1 if month is None: self.precision = self.PRECISION['year'] month = 1 self.year = long(year) self.month = month self.day = day self.hour = hour self.minute = minute self.second = second self.after = after self.before = before self.timezone = timezone if calendarmodel is None: if site is None: site = Site().data_repository() if site is None: raise ValueError('Site %s has no data repository' % Site()) calendarmodel = site.calendarmodel() self.calendarmodel = calendarmodel # if precision is given it overwrites the autodetection above if precision is not None: if (isinstance(precision, int) and precision in self.PRECISION.values()): self.precision = precision elif precision in self.PRECISION: self.precision = self.PRECISION[precision] else: raise ValueError('Invalid precision: "%s"' % precision) @classmethod def fromTimestr(cls, datetimestr, precision=14, before=0, after=0, timezone=0, calendarmodel=None, site=None): """ Create a new WbTime object from a UTC date/time string. The timestamp differs from ISO 8601 in that: * The year is always signed and having between 1 and 16 digits; * The month, day and time are zero if they are unknown; * The Z is discarded since time zone is determined from the timezone param. @param datetimestr: Timestamp in a format resembling ISO 8601, e.g. +2013-01-01T00:00:00Z @type datetimestr: str @param precision: The unit of the precision of the time. @type precision: int or str @param before: Number of units after the given time it could be, if uncertain. The unit is given by the precision. @type before: int @param after: Number of units before the given time it could be, if uncertain. The unit is given by the precision. @type after: int @param timezone: Timezone information in minutes. @type timezone: int @param calendarmodel: URI identifying the calendar model @type calendarmodel: str @param site: The Wikibase site @type site: pywikibot.site.DataSite @rtype: pywikibot.WbTime """ match = re.match(r'([-+]?\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z', datetimestr) if not match: raise ValueError(u"Invalid format: '%s'" % datetimestr) t = match.groups() return cls(long(t[0]), int(t[1]), int(t[2]), int(t[3]), int(t[4]), int(t[5]), precision, before, after, timezone, calendarmodel, site) @classmethod def fromTimestamp(cls, timestamp, precision=14, before=0, after=0, timezone=0, calendarmodel=None, site=None): """ Create a new WbTime object from a pywikibot.Timestamp. @param timestamp: Timestamp @type timestamp: pywikibot.Timestamp @param precision: The unit of the precision of the time. @type precision: int or str @param before: Number of units after the given time it could be, if uncertain. The unit is given by the precision. @type before: int @param after: Number of units before the given time it could be, if uncertain. The unit is given by the precision. @type after: int @param timezone: Timezone information in minutes. @type timezone: int @param calendarmodel: URI identifying the calendar model @type calendarmodel: str @param site: The Wikibase site @type site: pywikibot.site.DataSite @rtype: pywikibot.WbTime """ return cls.fromTimestr(timestamp.isoformat(), precision=precision, before=before, after=after, timezone=timezone, calendarmodel=calendarmodel, site=site) def toTimestr(self, force_iso=False): """ Convert the data to a UTC date/time string. See fromTimestr() for differences between output with and without force_iso. @param force_iso: whether the output should be forced to ISO 8601 @type force_iso: bool @return: Timestamp in a format resembling ISO 8601 @rtype: str """ if force_iso: return Timestamp._ISO8601Format_new.format( self.year, max(1, self.month), max(1, self.day), self.hour, self.minute, self.second) return self.FORMATSTR.format(self.year, self.month, self.day, self.hour, self.minute, self.second) def toTimestamp(self): """ Convert the data to a pywikibot.Timestamp. @return: Timestamp @rtype: pywikibot.Timestamp @raises ValueError: instance value can not be represented using Timestamp """ if self.year <= 0: raise ValueError('You cannot turn BC dates into a Timestamp') return Timestamp.fromISOformat( self.toTimestr(force_iso=True).lstrip('+')) def toWikibase(self): """ Convert the data to a JSON object for the Wikibase API. @return: Wikibase JSON @rtype: dict """ json = {'time': self.toTimestr(), 'precision': self.precision, 'after': self.after, 'before': self.before, 'timezone': self.timezone, 'calendarmodel': self.calendarmodel } return json @classmethod def fromWikibase(cls, wb, site=None): """ Create a WbTime from the JSON data given by the Wikibase API. @param wb: Wikibase JSON @type wb: dict @param site: The Wikibase site @type site: pywikibot.site.DataSite @rtype: pywikibot.WbTime """ return cls.fromTimestr(wb['time'], wb['precision'], wb['before'], wb['after'], wb['timezone'], wb['calendarmodel'], site) class WbQuantity(_WbRepresentation): """A Wikibase quantity representation.""" _items = ('amount', 'upperBound', 'lowerBound', 'unit') @staticmethod def _require_errors(site): """ Check if the Wikibase site is so old it requires error bounds to be given. If no site item is supplied it raises a warning and returns True. @param site: The Wikibase site @type site: pywikibot.site.DataSite @rtype: bool """ if not site: warning( "WbQuantity now expects a 'site' parameter. This is needed to " "ensure correct handling of error bounds.") return False return MediaWikiVersion(site.version()) < MediaWikiVersion('1.29.0-wmf.2') @staticmethod def _todecimal(value): """ Convert a string to a Decimal for use in WbQuantity. None value is returned as is. @param value: decimal number to convert @type value: str @rtype: Decimal """ if isinstance(value, Decimal): return value elif value is None: return None return Decimal(str(value)) @staticmethod def _fromdecimal(value): """ Convert a Decimal to a string representation suitable for WikiBase. None value is returned as is. @param value: decimal number to convert @type value: Decimal @rtype: str """ if value is None: return None return format(value, "+g") def __init__(self, amount, unit=None, error=None, site=None): u""" Create a new WbQuantity object. @param amount: number representing this quantity @type amount: string or Decimal. Other types are accepted, and converted via str to Decimal. @param unit: the Wikibase item for the unit or the entity URI of this Wikibase item. @type unit: pywikibot.ItemPage, str or None @param error: the uncertainty of the amount (e.g. ±1) @type error: same as amount, or tuple of two values, where the first value is the upper error and the second is the lower error value. @param site: The Wikibase site @type site: pywikibot.site.DataSite """ if amount is None: raise ValueError('no amount given') self.amount = self._todecimal(amount) self._unit = unit self.site = site or Site().data_repository() # also allow entity URIs to be provided via unit parameter if isinstance(unit, basestring) and \ unit.partition('://')[0] not in ('http', 'https'): raise ValueError("'unit' must be an ItemPage or entity uri.") if error is None and not self._require_errors(site): self.upperBound = self.lowerBound = None else: if error is None: self.upperBound = self.lowerBound = Decimal(0) elif isinstance(error, tuple): upperError = self._todecimal(error[0]) lowerError = self._todecimal(error[1]) else: upperError = lowerError = self._todecimal(error) self.upperBound = self.amount + upperError self.lowerBound = self.amount - lowerError @property def unit(self): """Return _unit's entity uri or '1' if _unit is None.""" if isinstance(self._unit, ItemPage): return self._unit.concept_uri() return self._unit or '1' def get_unit_item(self, repo=None, lazy_load=False): """ Return the ItemPage corresponding to the unit. Note that the unit need not be in the same data repository as the WbQuantity itself. A successful lookup is stored as an internal value to avoid the need for repeated lookups. @param repo: the Wikibase site for the unit, if different from that provided with the WbQuantity. @type repo: pywikibot.site.DataSite @param lazy_load: Do not raise NoPage if ItemPage does not exist. @type lazy_load: bool @return: pywikibot.ItemPage """ if not isinstance(self._unit, basestring): return self._unit repo = repo or self.site self._unit = ItemPage.from_entity_uri(repo, self._unit, lazy_load) return self._unit def toWikibase(self): """ Convert the data to a JSON object for the Wikibase API. @return: Wikibase JSON @rtype: dict """ json = {'amount': self._fromdecimal(self.amount), 'upperBound': self._fromdecimal(self.upperBound), 'lowerBound': self._fromdecimal(self.lowerBound), 'unit': self.unit } return json @classmethod def fromWikibase(cls, wb, site=None): """ Create a WbQuantity from the JSON data given by the Wikibase API. @param wb: Wikibase JSON @type wb: dict @param site: The Wikibase site @type site: pywikibot.site.DataSite @rtype: pywikibot.WbQuantity """ amount = cls._todecimal(wb['amount']) upperBound = cls._todecimal(wb.get('upperBound')) lowerBound = cls._todecimal(wb.get('lowerBound')) bounds_provided = (upperBound is not None and lowerBound is not None) error = None if bounds_provided or cls._require_errors(site): error = (upperBound - amount, amount - lowerBound) if wb['unit'] == '1': unit = None else: unit = wb['unit'] return cls(amount, unit, error, site) class WbMonolingualText(_WbRepresentation): """A Wikibase monolingual text representation.""" _items = ('text', 'language') def __init__(self, text, language): """ Create a new WbMonolingualText object. @param text: text string @type text: str @param language: language code of the string @type language: str """ if not text or not language: raise ValueError('text and language cannot be empty') self.text = text self.language = language def toWikibase(self): """ Convert the data to a JSON object for the Wikibase API. @return: Wikibase JSON @rtype: dict """ json = {'text': self.text, 'language': self.language } return json @classmethod def fromWikibase(cls, wb): """ Create a WbMonolingualText from the JSON data given by the Wikibase API. @param wb: Wikibase JSON @type wb: dict @rtype: pywikibot.WbMonolingualText """ return cls(wb['text'], wb['language']) class _WbDataPage(_WbRepresentation): """ A Wikibase representation for data pages. A temporary implementation until T162336 has been resolved. Note that this class cannot be used directly """ _items = ('page', ) @classmethod def _get_data_site(cls, repo_site): """ Return the site serving as a repository for a given data type. Must be implemented in the extended class. @param site: The Wikibase site @type site: pywikibot.site.APISite @rtype: pywikibot.site.APISite """ raise NotImplementedError @classmethod def _get_type_specifics(cls, site): """ Return the specifics for a given data type. Must be implemented in the extended class. The dict should have three keys: * ending: str, required filetype-like ending in page titles. * label: str, describing the data type for use in error messages. * data_site: pywikibot.site.APISite, site serving as a repository for the given data type. @param site: The Wikibase site @type site: pywikibot.site.APISite @rtype: dict """ raise NotImplementedError @staticmethod def _validate(page, data_site, ending, label): """ Validate the provided page against general and type specific rules. @param page: Page containing the data. @type text: pywikibot.Page @param data_site: The site serving as a repository for the given data type. @type data_site: pywikibot.site.APISite @param ending: Required filetype-like ending in page titles. E.g. '.map' @type ending: str @param label: Label describing the data type in error messages. @type site: str """ if not isinstance(page, Page): raise ValueError('Page must be a pywikibot.Page object.') # validate page exists if not page.exists(): raise ValueError('Page must exist.') # validate page is on the right site, and that site supports the type if not data_site: raise ValueError( 'The provided site does not support {0}.'.format(label)) if page.site != data_site: raise ValueError( 'Page must be on the {0} repository site.'.format(label)) # validate page title fulfills hard-coded Wikibase requirement # pcre regexp: '/^Data:[^\\[\\]#\\\:{|}]+\.map$/u' for geo-shape # pcre regexp: '/^Data:[^\\[\\]#\\\:{|}]+\.tab$/u' for tabular-data # As we have already checked for existence the following simplified # check should be enough. if not page.title().startswith('Data:') or \ not page.title().endswith(ending): raise ValueError( "Page must be in 'Data:' namespace and end in '{0}' " "for {1}.".format(ending, label)) def __init__(self, page, site=None): """ Create a new _WbDataPage object. @param page: page containing the data @type text: pywikibot.Page @param site: The Wikibase site @type site: pywikibot.site.DataSite """ site = site or Site().data_repository() specifics = type(self)._get_type_specifics(site) _WbDataPage._validate(page, specifics['data_site'], specifics['ending'], specifics['label']) self.page = page def __hash__(self): """Override super.hash() as toWikibase is a string for _WbDataPage.""" return hash(self.toWikibase()) def toWikibase(self): """ Convert the data to the value required by the Wikibase API. @return: title of the data page incl. namespace @rtype: str """ return self.page.title() @classmethod def fromWikibase(cls, page_name, site): """ Create a _WbDataPage from the JSON data given by the Wikibase API. @param page_name: page name from Wikibase value @type page_name: str @param site: The Wikibase site @type site: pywikibot.site.DataSite @rtype: pywikibot._WbDataPage """ data_site = cls._get_data_site(site) page = Page(data_site, page_name) return cls(page, site) class WbGeoShape(_WbDataPage): """ A Wikibase geo-shape representation. """ @classmethod def _get_data_site(cls, site): """ Return the site serving as a geo-shape repository. @param site: The Wikibase site @type site: pywikibot.site.DataSite @rtype: pywikibot.site.APISite """ return site.geo_shape_repository() @classmethod def _get_type_specifics(cls, site): """ Return the specifics for WbGeoShape. @param site: The Wikibase site @type site: pywikibot.site.DataSite @rtype: dict """ specifics = { 'ending': '.map', 'label': 'geo-shape', 'data_site': cls._get_data_site(site) } return specifics class WbTabularData(_WbDataPage): """ A Wikibase tabular-data representation. """ @classmethod def _get_data_site(cls, site): """ Return the site serving as a tabular-data repository. @param site: The Wikibase site @type site: pywikibot.site.DataSite @rtype: pywikibot.site.APISite """ return site.tabular_data_repository() @classmethod def _get_type_specifics(cls, site): """ Return the specifics for WbTabularData. @param site: The Wikibase site @type site: pywikibot.site.DataSite @rtype: dict """ specifics = { 'ending': '.tab', 'label': 'tabular-data', 'data_site': cls._get_data_site(site) } return specifics class WbUnknown(_WbRepresentation): """ A Wikibase representation for unknown data type. This will prevent the bot from breaking completely when a new type is introduced. This data type is just a json container """ _items = ('json',) def __init__(self, json): """ Create a new WbUnknown object. @param json: Wikibase JSON @type: dict """ self.json = json def toWikibase(self): """ Return the JSON object for the Wikibase API. @return: Wikibase JSON @rtype: dict """ return self.json @classmethod def fromWikibase(cls, json): """ Create a WbUnknown from the JSON data given by the Wikibase API. @param json: Wikibase JSON @type json: dict @rtype: pywikibot.WbUnknown """ return cls(json) _sites = {} _url_cache = {} # The code/fam pair for each URL def Site(code=None, fam=None, user=None, sysop=None, interface=None, url=None): """A factory method to obtain a Site object. Site objects are cached and reused by this method. By default rely on config settings. These defaults may all be overridden using the method parameters. @param code: language code (override config.mylang) @type code: string @param fam: family name or object (override config.family) @type fam: string or Family @param user: bot user name to use on this site (override config.usernames) @type user: unicode @param sysop: sysop user to use on this site (override config.sysopnames) @type sysop: unicode @param interface: site class or name of class in pywikibot.site (override config.site_interface) @type interface: subclass of L{pywikibot.site.BaseSite} or string @param url: Instead of code and fam, does try to get a Site based on the URL. Still requires that the family supporting that URL exists. @type url: string @rtype: pywikibot.site.APISite """ # Either code and fam or only url if url and (code or fam): raise ValueError('URL to the wiki OR a pair of code and family name ' 'should be provided') _logger = "wiki" if url: if url not in _url_cache: matched_sites = [] # Iterate through all families and look, which does apply to # the given URL for fam in config.family_files: family = Family.load(fam) code = family.from_url(url) if code is not None: matched_sites += [(code, family)] if matched_sites: if len(matched_sites) > 1: warning( 'Found multiple matches for URL "{0}": {1} (use first)' .format(url, ', '.join(str(s) for s in matched_sites))) _url_cache[url] = matched_sites[0] else: # TODO: As soon as AutoFamily is ready, try and use an # AutoFamily _url_cache[url] = None cached = _url_cache[url] if cached: code = cached[0] fam = cached[1] else: raise SiteDefinitionError("Unknown URL '{0}'.".format(url)) else: # Fallback to config defaults code = code or config.mylang fam = fam or config.family if not isinstance(fam, Family): fam = Family.load(fam) interface = interface or fam.interface(code) # config.usernames is initialised with a defaultdict for each family name family_name = str(fam) code_to_user = config.usernames['*'].copy() code_to_user.update(config.usernames[family_name]) user = user or code_to_user.get(code) or code_to_user.get('*') code_to_sysop = config.sysopnames['*'].copy() code_to_sysop.update(config.sysopnames[family_name]) sysop = sysop or code_to_sysop.get(code) or code_to_sysop.get('*') if not isinstance(interface, type): # If it isnt a class, assume it is a string try: tmp = __import__('pywikibot.site', fromlist=[interface]) interface = getattr(tmp, interface) except ImportError: raise ValueError('Invalid interface name: {0}'.format(interface)) if not issubclass(interface, BaseSite): warning('Site called with interface=%s' % interface.__name__) user = normalize_username(user) key = '%s:%s:%s:%s' % (interface.__name__, fam, code, user) if key not in _sites or not isinstance(_sites[key], interface): _sites[key] = interface(code=code, fam=fam, user=user, sysop=sysop) debug(u"Instantiated %s object '%s'" % (interface.__name__, _sites[key]), _logger) if _sites[key].code != code: warn('Site %s instantiated using different code "%s"' % (_sites[key], code), UserWarning, 2) return _sites[key] # alias for backwards-compability getSite = redirect_func(Site, old_name='getSite') # These imports depend on Wb* classes above. from pywikibot.page import ( Page, FilePage, Category, Link, User, ItemPage, PropertyPage, Claim, ) from pywikibot.page import html2unicode, url2unicode, unicode2html link_regex = re.compile(r'\[\[(?P<title>[^\]|[<>{}]*)(\|.*?)?\]\]') @__deprecated('comment parameter for page saving method') def setAction(s): """Set a summary to use for changed page submissions.""" config.default_edit_summary = s def showDiff(oldtext, newtext, context=0): """ Output a string showing the differences between oldtext and newtext. The differences are highlighted (only on compatible systems) to show which changes were made. """ PatchManager(oldtext, newtext, context=context).print_hunks() # Throttle and thread handling def stopme(): """ Drop this process from the throttle log, after pending threads finish. Can be called manually if desired. Does not clean async_manager. This should be run when a bot does not interact with the Wiki, or when it has stopped doing so. After a bot has run stopme() it will not slow down other bots any more. """ _flush(False) def _flush(stop=True): """ Drop this process from the throttle log, after pending threads finish. Wait for the page-putter to flush its queue. Also drop this process from the throttle log. Called automatically at Python exit. """ _logger = "wiki" debug('_flush() called', _logger) def remaining(): remainingPages = page_put_queue.qsize() if stop: # -1 because we added a None element to stop the queue remainingPages -= 1 remainingSeconds = datetime.timedelta( seconds=(remainingPages * config.put_throttle)) return (remainingPages, remainingSeconds) if stop: # None task element leaves async_manager page_put_queue.put((None, [], {})) num, sec = remaining() if num > 0 and sec.total_seconds() > config.noisysleep: output(color_format( '{lightblue}Waiting for {num} pages to be put. ' 'Estimated time remaining: {sec}{default}', num=num, sec=sec)) while _putthread.isAlive() and page_put_queue.qsize() > 0: try: _putthread.join(1) except KeyboardInterrupt: if input_yn('There are {0} pages remaining in the queue. ' 'Estimated time remaining: {1}\nReally exit?' ''.format(*remaining()), default=False, automatic_quit=False): return # only need one drop() call because all throttles use the same global pid try: list(_sites.values())[0].throttle.drop() log(u"Dropped throttle(s).") except IndexError: pass atexit.register(_flush) # Create a separate thread for asynchronous page saves (and other requests) def async_manager(): """Daemon; take requests from the queue and execute them in background.""" while True: (request, args, kwargs) = page_put_queue.get() if request is None: break request(*args, **kwargs) page_put_queue.task_done() def async_request(request, *args, **kwargs): """Put a request on the queue, and start the daemon if necessary.""" if not _putthread.isAlive(): try: page_put_queue.mutex.acquire() try: _putthread.start() except (AssertionError, RuntimeError): pass finally: page_put_queue.mutex.release() page_put_queue.put((request, args, kwargs)) # queue to hold pending requests page_put_queue = Queue(config.max_queue_size) # set up the background thread _putthread = threading.Thread(target=async_manager) # identification for debugging purposes _putthread.setName('Put-Thread') _putthread.setDaemon(True) wrapper = _ModuleDeprecationWrapper(__name__) wrapper._add_deprecated_attr('ImagePage', FilePage) wrapper._add_deprecated_attr( 'cookie_jar', replacement_name='pywikibot.comms.http.cookie_jar') wrapper._add_deprecated_attr( 'PageNotFound', _DeprecatedPageNotFoundError, warning_message=('{0}.{1} is deprecated, and no longer ' 'used by pywikibot; use http.fetch() instead.')) wrapper._add_deprecated_attr( 'UserActionRefuse', _EmailUserError, warning_message='UserActionRefuse is deprecated; ' 'use UserRightsError and/or NotEmailableError instead.') wrapper._add_deprecated_attr( 'QuitKeyboardInterrupt', _QuitKeyboardInterrupt, warning_message='pywikibot.QuitKeyboardInterrupt is deprecated; ' 'use pywikibot.bot.QuitKeyboardInterrupt instead.') wrapper._add_deprecated_attr( 'UploadWarning', _UploadWarning, warning_message='pywikibot.UploadWarning is deprecated; ' 'use APISite.upload with a warning handler instead.')
Hiwin_RT605_ArmCommand_Socket_20190628090913.py
#!/usr/bin/env python3 # license removed for brevity import rospy import os import socket ##多執行序 import threading import time import sys import matplotlib as plot import HiwinRA605_socket_TCPcmd_v2 as TCP import HiwinRA605_socket_Taskcmd_v2 as Taskcmd import numpy as np from std_msgs.msg import String from ROS_Socket.srv import * from ROS_Socket.msg import * from std_msgs.msg import Int32MultiArray import math import enum #Socket = 0 #data = '0' #設定傳輸資料初始值 Arm_feedback = 1 #假設手臂忙碌 NAME = 'socket_server' arm_mode_flag = False ##------------class pos------- class point(): def __init__(self, x, y, z, pitch, roll, yaw): self.x = x self.y = y self.z = z self.pitch = pitch self.roll = roll self.yaw = yaw pos = point(0.0,36.8,11.35,-90.0,0.0,0.0) ##------------class socket_cmd--------- class socket_data(): def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode): self.grip = grip self.setvel = setvel self.ra = ra self.delay = delay self.setboth = setboth self.action = action self.Speedmode = Speedmode socket_cmd = socket_data(0,0.0,0,0,0,0,0) ##-----------switch define------------## class switch(object): def __init__(self, value): self.value = value self.fall = False def __iter__(self): """Return the match method once, then stop""" yield self.match raise StopIteration def match(self, *args): """Indicate whether or not to enter a case suite""" if self.fall or not args: return True elif self.value in args: # changed for v1.5, see below self.fall = True return True else: return False ##-----------client feedback arm state---------- class StateFeedback(): def __init__(self,ArmState,SentFlag): self.ArmState = ArmState self.SentFlag = SentFlag state_feedback = StateFeedback(0,0) class client(): def __init__(self): #self.get_connect() pass def get_connect(self): self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s.connect(('192.168.0.1', 8080)) def send(self, msg): self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK! def get_recieve(self): data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少 data.decode('utf-8') return data def close(self): self.s.close() Socket = client() def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料 pos.x = x pos.y = y pos.z = z pos.pitch = pitch pos.roll = roll pos.yaw = yaw ##----------Arm Mode-------------### def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料 global arm_mode_flag socket_cmd.action = action socket_cmd.grip = grip socket_cmd.ra = ra socket_cmd.setvel = setvel socket_cmd.setboth = setboth arm_mode_flag = True Socket_command() ##-------Arm Speed Mode------------### def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料 socket_cmd.Speedmode = speedmode def socket_talker(): ##創建Server node pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10) rospy.init_node(NAME) rate = rospy.Rate(10) # 10hz print ("Ready to connect") while not rospy.is_shutdown(): # hello_str = "hello world %s" % rospy.get_time() state = Int32MultiArray() state.data = [state_feedback.ArmState,state_feedback.SentFlag] pub.publish(state) rate.sleep() ##----------socket 封包傳輸--------------## ##---------------socket 傳輸手臂命令----------------- def Socket_command(): global Socket for case in switch(socket_cmd.action): #-------PtP Mode-------- if case(Taskcmd.Action_Type.PtoP): for case in switch(socket_cmd.setboth): if case(Taskcmd.Ctrl_Mode.CTRL_POS): data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel) break if case(Taskcmd.Ctrl_Mode.CTRL_EULER): data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel) break if case(Taskcmd.Ctrl_Mode.CTRL_BOTH): data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel) break break #-------Line Mode-------- if case(Taskcmd.Action_Type.Line): for case in switch(socket_cmd.setboth): if case(Taskcmd.Ctrl_Mode.CTRL_POS): data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel) break if case(Taskcmd.Ctrl_Mode.CTRL_EULER): data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel ) break if case(Taskcmd.Ctrl_Mode.CTRL_BOTH): data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel ) break break #-------設定手臂速度-------- if case(Taskcmd.Action_Type.SetVel): data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel) break #-------設定手臂Delay時間-------- if case(Taskcmd.Action_Type.Delay): data = TCP.SetDelay(socket_cmd.grip,0) break #-------設定手臂急速&安全模式-------- if case(Taskcmd.Action_Type.Mode): data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode) break socket_cmd.action= 6 ##切換初始mode狀態 print(data) print("Socket:", Socket) #Socket.send(data.encode('utf-8'))#socket傳送for python to translate str Socket.send(data) ##-----------socket client-------- def socket_client(): global Socket try: #Socket = client() Socket.get_connect() #Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin #s.connect(('192.168.1.102', 8080))#iclab computerx print('Connection has been successful') except socket.error as msg: print(msg) sys.exit(1) #print('Connection has been successful') Socket_feedback(Socket) rospy.on_shutdown(myhook) Socket.close() def Socket_feedback(s): Socket = s while 1: feedback_str = Socket.get_recieve() #手臂端傳送手臂狀態 if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令 state_feedback.ArmState = 0 if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令 state_feedback.ArmState = 1 if str(feedback_str[2]) == '54':# 6 策略完成 state_feedback.ArmState = 6 print("shutdown") #確認傳送旗標 if str(feedback_str[4]) == '48':#回傳0 false state_feedback.SentFlag = 0 if str(feedback_str[4]) == '49':#回傳1 true state_feedback.SentFlag = 1 ##---------------socket 傳輸手臂命令 end----------------- if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown: break ##-----------socket client end-------- ##-------------socket 封包傳輸 end--------------## def myhook(): print ("shutdown time!") if __name__ == '__main__': socket_cmd.action = 6##切換初始mode狀態 ## 多執行緒 t = threading.Thread(target=socket_client) t.start() # 開啟多執行緒 #time.sleep(1) try: socket_talker() except rospy.ROSInterruptException: pass t.join() ## 多執行序 end
dicts.py
#!/usr/bin/env python2.5 """ ############################################################################# ## ## file : dicts.py ## ## description : see below ## ## project : Tango Control System ## ## $Author: Sergi Rubio Manrique, srubio@cells.es $ ## ## $Revision: 2008 $ ## ## copyleft : ALBA Synchrotron Controls Section, CELLS ## Bellaterra ## Spain ## ############################################################################# ## ## This file is part of Tango Control System ## ## Tango Control System is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as published ## by the Free Software Foundation; either version 3 of the License, or ## (at your option) any later version. ## ## Tango Control System is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ########################################################################### @package dicts Some extensions to python dictionary ThreadDict: Thread safe dictionary with redefinable read/write methods and a backgroud thread for hardware update. defaultdict_fromkey: Creates a dictionary with a default_factory function that creates new elements using key as argument. CaselessDict: caseless dictionary CaselessDefaultDict: a join venture between caseless and default dict from key @deprecated @note see in tau.core.utils.containers by Sergi Rubio, srubio@cells.es, 2008 """ import time,traceback,os import threading # needed for ThreadDict import collections from collections import defaultdict, deque try: from collections import OrderedDict except: pass from .objects import self_locked from .functional import * ENC = 'latin-1' def dict2json(dct,filename=None,throw=False,recursive=True, encoding=ENC,as_dict=False): """ It will check that all objects in dict are serializable. If throw is False, a corrected dictionary will be returned. If filename is given, dict will be saved as a .json file. """ import json result = {} for k,v in dct.items(): try: json.dumps(v,encoding=encoding) result[k] = v except Exception,e: if throw: raise e if isString(v): result[k] = '' elif isSequence(v): try: result[k] = list(v) json.dumps(result[k]) except: result[k] = [] elif isMapping(v,strict=True) and recursive: result[k] = dict2json(v,None,False,True,encoding=encoding) if filename: json.dump(result,open(filename,'w'),encoding=encoding) elif not as_dict: result = json.dumps(result) return result if not filename else filename def dec(s,encoding=ENC): #dec = lambda s: str(s.decode(encoding) if isinstance(s,unicode) else s) try: if isinstance(s,unicode): s = s.encode(encoding) return str(s) else: return str(s) except Exception,e: print('dec(%s) failed!'%(s)) traceback.print_exc() raise e def json2dict(jstr,encoding=ENC): """ Converts unicode to str recursively. :param jstr: may be json string, filename or dictionary in the last case, this method is equivalent to fandango.unicode2str(obj) """ import json if not hasattr(jstr,'items'): if '{' not in jstr and os.path.exists(jstr): f = open(jstr) jstr = json.load(f,encoding=encoding) f.close() else: jstr = json.loads(jstr,encoding=encoding) d = {} for k,v in jstr.items(): k = dec(k) if isinstance(v,basestring): d[k] = dec(v) elif isinstance(v,(list,tuple)): d[k] = [(dec(i) if isinstance(i,basestring) else i) for i in v] elif hasattr(v,'items'): d[k] = json2dict(v,encoding=encoding) else: d[k] = v return d class ThreadDict(dict): ''' Thread safe dictionary with redefinable read/write methods and a background thread for hardware update. All methods are thread-safe using @self_lock decorator. NOTE: any method decorated in this way CANNOT call other decorated methods! All values of the dictionary will be automatically updated in a separate Thread using read_method provided. Any value overwritten in the dict should launch the write_method. delay argument will pause the thread for a time after start() is called Briefing: a[2] equals to a[2]=read_method(2) a[2]=1 equals to a[2]=write_method(2,1) threadkeys(): returns the list of keys that are being automatically updated in a background thread append(key,value=None,period=3000): adds a new key to the dictionary and to the list of keys to be updated __setitem__(key,value):tdict[key]=value will also add a new key to the dictionary, but this key value will not be automatically updated. timewait is a pause inserted between readings If read_method defined: If threaded: the Thread inserts data in the dictionary, __getitem__ retrieves this data else: __getitem__ directly executes read_method else: __getitem__ as a normal thread-safe dictionary If write_method defined: any __setitem__ call executes the write_method (dictionary is no affected) else: __setitem__ as a normal thread-safe dictionary @deprecated now in tau.core.utils.containers ''' def __init__(self,other=None,read_method=None,write_method=None, timewait=0.1,threaded=True,trace=False,delay=0.): self.read_method = read_method self.write_method = write_method self.timewait = timewait self.threaded = threaded self.delay = delay self._threadkeys = [] self._periods = {} self._updates = {} self.trace = trace self.last_update = 0 self._last_read = '' self.last_cycle_start = 0 self.cycle_count = 0 self.cycle_average = 0 self.event = threading.Event() self.parent = type(self).mro()[1] #equals to self.__class__.__base__ or type(self).__bases__[0] if other: dict.update(self,other) def tracer(self,text,level=0): #if isinstance(self.trace,int) and level<self.trace: print text #@self_locked def start(self): if not self.threaded: print 'ThreadDict.start(): This Dict has no Thread!' return if hasattr(self,'_Thread') and self._Thread and self._Thread.isAlive(): print 'ThreadDict.start(): ThreadDict.stop() must be executed first!' return if self.delay: self.event.wait(self.delay) print 'In ThreadDict.start(), keys are: %s' % self.threadkeys() self.event.clear() self._Thread = threading.Thread(target=self.run) self._Thread.setDaemon(True) self._Thread.start() self.tracer('ThreadDict started!') #@self_locked def stop(self): print 'Stopping ThreadDict ...' if self.threaded and hasattr(self,'event'): #print('event set') self.event.set() self.event.wait(5.e-3) if hasattr(self,'_Thread'): #print('thread join') self._Thread.join() print 'ThreadDict Stopped' def alive(self): if not hasattr(self,'_Thread') or not self._Thread: return None #Thread never started else: return self._Thread.isAlive() #True or False def __del__(self): self.stop() def run(self): self.tracer('In ThreadDict.run()') self.event.wait(self.get_timewait()) while not self.event.isSet(): self.set_last_cycle_start(time.time()) keys = sorted(self.threadkeys()) if self._last_read and self._last_read!=keys[-1]: #Thread stopped before finishing the cycle! keys = keys[keys.index(self._last_read)+1:] for k in keys: if self.trace: self.tracer('ThreadDict::run(): updating %s'%str(k)) try: if (time.time()-self._updates.get(k,0))>self._periods.get(k,0): #if period=0 or if not updated the condition applies self.__getitem__(k,hw=True) except Exception,e: if self.trace: print('!!! ThreadDict Exception !!!'+'\n'+'%s ...'%str(traceback.format_exc())[:1000]) self.__setitem__(k,e,hw=False) finally: self._last_read = k if self.event.isSet(): break else: self.event.wait(self.timewait) if self.event.isSet(): break try: self.cycle_average = (((self.cycle_average*self.cycle_count) + (time.time()-self.last_cycle_start)) / (self.cycle_count+1)) self.cycle_count += 1 except: traceback.print_exc() self.event.wait(self.get_timewait()) self.tracer('End of ThreadDict') @self_locked def get_last_update(self): return self.last_update @self_locked def set_last_update(self,value): self.last_update=value @self_locked def get_last_cycle_start(self): return self.last_cycle_start @self_locked def set_last_cycle_start(self,value): self.last_cycle_start=value @self_locked def get_timewait(self): return self.timewait @self_locked def set_timewait(self,value): self.timewait=value @self_locked def append(self,key,value=None,period=0): """ args: key, value=None, period=0 periods shall be specified in seconds period=0 means that attribute will be updated every timewait*length """ if not dict.has_key(self,key): self.parent.__setitem__(self,key,value) if key not in self._threadkeys: self._threadkeys.append(key) self._periods[key] = period if self.trace: self.tracer('ThreadDict.append(%s,%s,%s)'%(key,value,period)) @self_locked def threadkeys(self): return self._threadkeys[:] @self_locked def __locked_setitem__(self,key,value): return dict.__setitem__(self,key,value) @self_locked def __locked_getitem__(self,key): return dict.__getitem__(self,key) @self_locked def __locked_getitem_hw__(self,key): return self.__getitem__(key,hw=True) def __getitem__(self,key,hw=False): ''' This method launches a read_method execution if there's no thread on charge of doing that or if the hw flag is set to True. ''' if self.trace: print 'In ThreadDict.__getitem__(%s,%s)'%(key,hw) if (hw or not self.threaded): #HW ACCESS MUST NOT BE DONE WITHOUT ASKING EXPLICITLY! (Use __setitem__(k,None) instead) self._updates[key] = time.time() if self.read_method: value = self.read_method(key) self.__locked_setitem__(key,value) self.set_last_update(self._updates[key]) return self.__locked_getitem__(key) def __setitem__(self,key,value,hw=True): ''' This method launches a write_method execution if the hw flag is not explicitly set to False. ''' if self.trace: print 'In ThreadDict.__setitem__(%s,...,%s)'%(key,hw) if hw and self.write_method: #It implies that a key will not be added here to read thread! nvalue = self.write_method(*[key,value]) self.__locked_setitem__(key,nvalue) else: self.__locked_setitem__(key,value) self.set_last_update(time.time()) @self_locked def get(self,key,default=None,hw=False): if hw: self.__locked_getitem_hw__(key) elif not self.threaded and self.read_method: dict.__setitem__(self,key,self.read_method(key)) self.last_update = time.time() if default is False: return dict.get(self,key) else: return dict.get(self,key,default) @self_locked def __delitem__(self, key): return dict.__delitem__(self, key) @self_locked def __contains__(self, key): return dict.__contains__(self, key) @self_locked def has_key(self, key): return dict.has_key(self, key) @self_locked def __iter__(self): return dict.__iter__(self) @self_locked def pop(self, key): return dict.pop(self, key) @self_locked def __str__(self): return "{" +",".join(["'"+str(k)+"'"+":"+"'"+str(v)+"'" for k,v in zip(dict.keys(self),dict.values(self))])+ "}" @self_locked def __repr__(self): return "{\n" +"\n,".join(["'"+str(k)+"'"+":"+"'"+str(v)+"'" for k,v in zip(dict.keys(self),dict.values(self))])+ "\n}" class defaultdict_fromkey(defaultdict): """ Creates a dictionary with a default_factory function that creates new elements using key as argument. Usage : new_dict = defaultdict_fromkey(method); where method like (lambda key: return new_obj(key)) Each time that new_dict[key] is called with a key that doesn't exist, method(key) is used to create the value Copied from PyAlarm device server @deprecated now in tau.core.utils.containers """ def __missing__(self, key): if self.default_factory is None: raise KeyError(key) try: self[key] = value = self.default_factory(key) except Exception,e: try: self[key] = value = self.default_factory() except: raise e return value class CaselessList(list): """ Python list with caseless index,contains,remove methods """ def _lowstreq(self,a,b): return (a==b) or (hasattr(a,'lower') and hasattr(b,'lower') and a.lower()==b.lower()) def __contains__(self,item): for k in self: if self._lowstreq(k,item): return True return False def index(self,item): for i,k in enumerate(self): if self._lowstreq(k,item): return i return None def __contains__(self,item): return self.index(item) is not None def remove(self,item): list.pop(self,self.index(item)) class CaselessDict(dict): """ Dictionary with caseless key resolution Copied from tau.core.utils.CaselessDict @deprecated now in tau.core.utils.containers """ def __init__(self, other=None): if other: try: # Doesn't do keyword args if hasattr(other,'items'): for k,v in other.items(): dict.__setitem__(self, k.lower() if hasattr(k,'lower') else k, v) else: for k,v in other: dict.__setitem__(self, k.lower() if hasattr(k,'lower') else k, v) except Exception,e: print 'CaselessDict(%s): failed!\n%s'%(type(other),e) raise e def __getitem__(self, key): key = str(key) if dict.has_key(self,key): return dict.__getitem__(self,key) return dict.__getitem__(self, key.lower() if hasattr(key,'lower') else key) def __setitem__(self, key, value): key = str(key) dict.__setitem__(self, key.lower() if hasattr(key,'lower') else key, value) def __contains__(self, key): key = str(key) return dict.__contains__(self, key.lower() if hasattr(key,'lower') else key) def has_key(self, key): key = str(key) return dict.has_key(self, key.lower() if hasattr(key,'lower') else key) def get(self, key, def_val=None): key = str(key) return dict.get(self, key.lower() if hasattr(key,'lower') else key, def_val) def setdefault(self, key, def_val=None): key = str(key) return dict.setdefault(self, key.lower() if hasattr(key,'lower') else key, def_val) def update(self, other): for k,v in other.items(): k = str(k) dict.__setitem__(self, k.lower() if hasattr(k,'lower') else k, v) def fromkeys(self, iterable, value=None): d = CaselessDict() for k in iterable: k = str(k) dict.__setitem__(d, k.lower() if hasattr(k,'lower') else k, value) return d def pop(self, key, def_val=None): key = str(key) return dict.pop(self, key.lower() if hasattr(key,'lower') else key, def_val) def __delitem__(self, k): k = str(k) dict.__delitem__(self, k.lower() if hasattr(k,'lower') else k) class CaselessDefaultDict(defaultdict_fromkey,CaselessDict): """ a join venture between caseless and defaultdict_fromkey This class merges the two previous ones. This declaration equals to: CaselessDefaultDict = type('CaselessDefaultType',(CaselessDict,defaultdict_fromkey),{}) """ def __getitem__(self, key): key = str(key) return defaultdict_fromkey.__getitem__(self, key.lower()) pass class DefaultThreadDict(defaultdict_fromkey,ThreadDict): """ a join venture between thread and default dict This class merges the two previous ones. @deprecated now in tau.core.utils.containers @todo This two classes are not yet well integrated ... the way a new key is added to the dict must be rewritten explicitly. """ def __init__(self,other=None,default_factory=None,read_method=None,write_method=None,timewait=0.1,threaded=True): defaultdict_fromkey.__init__(self,default_factory) ThreadDict.__init__(self,other,read_method,write_method,timewait,threaded) pass ################################################################################################## class SortedDict(dict): """ This class implements a dictionary that returns keys in the same order they were inserted. """ def __init__(self,other=None): dict.__init__(self) self._keys = [] if other is not None: self.update(other) return def sort(self,key): """ This method modifies the sorting of the dictionary overriding the existing sort key. :param key: it can be a sequence containing all the keys already existing in the dictionary or a callable providing a sorting key algorithm. """ import operator if operator.isCallable(key): self._keys = sorted(self._keys,key=key) else: for k in self._keys: if k not in self._keys: raise KeyError(k) self._keys = list(key) return self._keys[:] def __setitem__(self,k,v): if k not in self._keys: self._keys.append(k) dict.__setitem__(self,k,v) def update(self,other): if hasattr(other,'items'): other = other.items() for k,v in other: self.__setitem__(k,v) @staticmethod def fromkeys(S,v=None): return SortedDict((s,v) for s in S) def insert(self,index,key,value): """Insert key,value at given position""" if key in self: self.pop(key) self._keys.insert(index,key) dict.__setitem__(self,key,value) def pop(self,k,d=None): """Removes key and returns its (self[key] or d or None)""" if k not in self: return d self._keys.remove(k) return dict.pop(self,k) def popitem(self): """Removes and returns last key,value pair""" k = self._keys[-1] v = self[k] self.pop(k) return k,v def clear(self): self._keys = [] return dict.clear(self) def keys(self): return self._keys[:] def values(self): return [self[k] for k in self._keys] def items(self): return [(k,self[k]) for k in self._keys] def __iter__(self): return (i for i in self._keys) def iteritems(self): return ((k,self[k]) for k in self._keys) def iterkeys(self): return (i for i in self._keys) def itervalues(self): return (self[k] for k in self._keys) class CaselessSortedDict(SortedDict,CaselessDict): """ This class implements a dictionary that returns keys in the same order they were inserted. """ def __init__(self,other=None): dict.__init__(self) self._keys = [] if other is not None: CaselessDict.__init__(self,other) return @staticmethod def caseless(key): return str(key).lower() def sort(self,key): """ This method modifies the sorting of the dictionary overriding the existing sort key. :param key: it can be a sequence containing all the keys already existing in the dictionary or a callable providing a sorting key algorithm. """ import operator if operator.isCallable(key): self._keys = sorted(self._keys,key=key) else: for k in self._keys: if k not in self._keys: raise KeyError(k) self._keys = list(key) return self._keys[:] def __setitem__(self,k,v): k = self.caseless(k) if k not in self._keys: self._keys.append(k) dict.__setitem__(self,k,v) def update(self,other): if hasattr(other,'items'): other = other.items() for k,v in other: self.__setitem__(k,v) @staticmethod def fromkeys(S,v=None): S = map(self.caseless,S) return SortedDict((s,v) for s in S) def pop(self,k,d=None): """Removes key and returns its (self[key] or d or None)""" k = self.caseless(k) if k not in self: return d self._keys.remove(k) return dict.pop(self,k) ################################################################################################## def reversedict(dct,key=None,default=None): #it just exchanges keys/values in a dictionary if key is None: return dict((v,k) for k,v in dct.items()) for k,v in dct.items(): if v == key: return k return default class ReversibleDict(object):#dict): """ Dictionary that searches in both directions within a list of tuples acting like a nested dictionary. * Negative indexes and levels are used to reverse the direction of key,values retrieval (i>=0: left-to-right; i<0: rigth-to-left) * Both directions are not exclusive!!! ... search direction is managed by the tuple index sign (+/-) * The _level is always positive, and could relate to both sides of the tuple [+level,-(level-1)][i<0] * When no sublevel is entered, keys() returns both sides of each tuple * Due to this behaviour may be: len(self) < len(self.items()) * It inherits from dictionary just to be recognized by isinstance! * keys()[x] != values()[x] ... use .items() or iteritems() when you need to match it * There's no check for duplicate keys, if there's a duplicate key then the first match is returned :TODO : A nice search algorithm in sorted tables would speed up retrieving a lot dict methods implemeted: __contains__,__delitem__,__getitem__,__iter__,__len__,__repr__,__setitem__,__str__ get,has_key,items,iteritems,iterkeys,itervalues,keys,pop,values NotImplemented: clear,copy,fromkeys,setdefault,update,popitem """ DEFAULT = None SORTKEY = lambda s: ord(s[0])%3 def __init__(self, table = None, subset = None, index = None, level = 0, sorted = False, trace = False): """ :param table: The table must be a list of tuples having all the same length and must be sorted! :param subset: it will be a list of lines to be searched, the rest will be ignored; used for nesting :param index: the index in tuples to be searched, if None both begin (0) and end (-1) are searched :param level: always a positive value """ table,subset,index = table or [], subset or (), index or [] #There are strange problems if persistent types are used as __init__ arguments!?! if isinstance(table,ReversibleDict): table = table.data() elif hasattr(table,'items'): table = [t for t in table.items()] #Updating from a dictionary #Attributes persistent self._depth = table and len(table[0]) or 0 self._table = table or [] self._sorted = index or sorted self.trace(trace) if sorted and not index: self._index = self.sort(True) else: self._index = index #Attributes that changed in sub-instances self._level = level #It's always positive! self._subset = subset def update(self, other): if not other: return if hasattr(other,'iteritems'): [self.set(*t) for t in other.iteritems()] else: [self.set(*t) for t in other] #def __new__(klass,*args): #return klass(*args) def __len__(self): """ It returns the number of raw lines, not keys or values """ return len(self._subset or self._table) def range(self,full=True): """ And appropiated iterator to check all lines in the table """ return self._subset or range(full and -len(self._table) or 0,len(self._table)) def __repr__(self): return '{'+',\n'.join('\t%s%s:%s'%(k,'',v) for k,v in self.iteritems())+'}' def __str__(self): return self.__repr__() def trace(self,val=None): if val in (True,False): self._trace = val return self._trace def sort(self,update=False): """ creates indexes of the keys at each level using the self.SORTKEY method for each level there's a dictionary {SORTKEY(s):[lines where SORTKEY(key)==SORTKEY(s)]} SORTKEY is used instead of key to improve retrieve times. """ if update: self._index = {} for l in range(self._depth): self._index[l] = self._index[-(l+1)] =defaultdict(set) for i in self.range(): self._index[l][self.SORTKEY(self._table[i][l])].add(i) self._sorted = True return self._index def prune(self,filters=[]): """ This method should do a cleanup of all repeated key-chains in both directions (or delete anything matched by filters) """ raise Exception,'NotImplemented' def depth(self): return self._depth def data(self): return self._table def size(self): return len(self._table) def sorted(self): return self._sorted def nextlevel(self,i=None): if i is not None and i<0: return self.level(i)-1 else: return self._level+1 #(1 if direction>=0 else -1) #Level always positive def prevlevel(self,i=None): if i is not None and i<0: return self.level(i)+1 else: return self._level-1 #(1 if directionl>=0 else -1) #Level always positive #def iterlevel(self): #""" Returns (level,) or if level is None then it will try left-to-right and right-to-left first levels """ #return ((0,-1) if self._level is None else (self._level,)) def level(self,i = None): """ The direction depends on the sign of the tuple index """ if i is not None and i<0: return -(self._level+1) else: return self._level def last(self): return self.nextlevel()==self.depth()-1 def __iter__(self): """ It returns distinct keys """ previous = set() for i in self.range(): k = self._table[i][self.level(i)] if k in previous: continue previous.add(k) yield k pass def iterkeys(self): return self.__iter__() def keys(self): return set([k for k in self.__iter__()]) def keysets(self,key=None): """ It returns a dictionary of {key:[index set]} at actual level. The sign +/- of the index refers to left-to-right/right-to-left order The level/direcition is not initialized until a key is found by this method. """ keys = defaultdict(set) if key is None: #Don't check it at every loop! for i in self.range(): [keys[self._table[i][self.level(i)]].add(i) for i in self.range()] else: #Searching for a given key for i in self.range(): j = self.level(i) if self._table[i][j]!=key: continue keys[self._table[i][j]].add(i) return keys def itervalues(self): """ It returns values for actual keys """ if self.level()==self.depth()-1: for i in self.range(): yield self._table[i][self.level(i)] else: for ks in self.keysets().values(): yield ReversibleDict(table=self._table,index=self._index,subset=ks,level=self.nextlevel(),trace=self._trace) pass def values(self): return [v for v in self.itervalues()] def iteritems(self): """ returns key,value pairs at self.level() """ if self.nextlevel()==self.depth()-1: for i in self.range(): yield self._table[i][self.level(i)],self._table[i][self.nextlevel(i)] #Last key,value pair else: for k,ks in self.keysets().items(): yield k,ReversibleDict(table=self._table,index=self._index,subset=ks,level=self.nextlevel(),trace=self._trace) pass def items(self): return [t for t in self.iteritems()] def line(self,i): """ It returns an arranged tuple slice of the selected index of the table :param i: it must be the RAW (positive or negative) index of the line """ if i>self.size(): i = i-2*self.size() #converting to a negative index rightleft = i<0 #left-to-right or right-to-left order t = self._table[i] if not self._level: return tuple(reversed(t)) if rightleft else t else: level = self.level(i) if rightleft: return (t[level],)+tuple(reversed(t[-self._depth:level])) else: return t[level:self._depth] def iterlines(self): """ Instead of returning key,value pairs it returns a tuple with self.depth() values """ for i in self.range(full=False): yield self.line(i) def lines(self): """ Instead of returning key,value pairs it returns a tuple with self.depth() values """ return [i for i in self.iterlines()] def has_key(self,key): """ Implemented separately of __getitem__ to be more efficient. """ for i in self.range(full=True): if self._table[i][self.level(i)]==key: return True return False def __contains__(self, key): return self.has_key(key) def get(self, *keys): """Arguments are keys separated by commas, it is a recursive call to __getitem__ """ if len(keys)>self._depth: return self.DEFAULT try: v = self[keys[0]] if isinstance(v,ReversibleDict): return v.get(*keys[1:]) else: return v except: return self.DEFAULT def __getitem__(self, key,raw=False): """ It scans the dict table in both directions, returning value or a ReversibleDict instance """ ks = self.keysets(key=key) if not ks.get(key,[]): raise Exception,'KeyNotFound(%s)'%str(key) if self.nextlevel() == self.depth()-1: i = ks[key].pop() return self._table[i][self.nextlevel(i)] #Returning a first/last element of tuple else: return ReversibleDict(table=self._table,subset=ks[key],index=self._index,level=self.nextlevel(),trace=self._trace) #Returning a ReversibleDict with the subset of tuples that matched previous searches. def set(self, *keys): """ Arguments are values separated by commas, it is a recursive call to __setitem__ """ if len(keys)==1 and any(isinstance(keys[0],t) for t in (list,tuple,set)): keys = tuple(keys[0]) self[keys[0]] = keys[1:] def __setitem__(self, key, value): """ It may accept two ways: * Entering a tuple of length = depth-level * Entering directly the last value (level = depth-1); it will override existing ones!!! * Order will depend of the previously inserted tuples for the same key * If key doesn't exist it will be added as a left-to-right tuple """ #print 'In ReversibleDict.__setitem__(%s,%s), level is %s'%(key,value,self.level()) #Checking all the conditions for the arguments if not hasattr(value,'__iter__') or isinstance(value,str): value = (value,) elif not isinstance(value,tuple): value = tuple(value) if not len(value): raise Exception,'EmptyTuple!' elif self._table and (len(value))!=self.depth()-self.level()-1: raise Exception,'WrongTupleSize(%s!=%s)' % (len(value),self.depth()-self.level()-1) if self._trace: print 'In ReversibleDict[%s] = %s' % (key,value) #Creating a new table if the dict was empty if not self._table: self._table.append((key,)+value) self._depth = 1+len(value) if self._trace: print 'Creating a table ...' #Check if the key already exist elif self.has_key(key): if self._trace: print 'Updating a key ...' i = iter(self.keysets(key)[key]).next() if self.last(): #If it's a final leaf the value is overriden self._table[i] = (self._table[i][:self.nextlevel(i)]+value) if i>=0 else (value+self._table[i][self.level(i):]) else: #If not the tuple is passed to the next dictionary return self[key].__setitem__(value[0],value[1:]) #if i>=0: #else: return self[key].__setitem__(value[-1],value[:-1]) #The key exists but in reversed order (only for root dictionary) elif self.level() in (0,-1) and self.has_key(value[-1]): if self._trace: print 'Inserting reversed key ...' self[value[-1]]=tuple(reversed(value[:-1]))+(key,) #Adding new keys elif self.level(): i = iter(self._subset).next() #print 'adding new key %s at level %s, i = %s' % (key,self.level(i),i) if i>=0: self._table.append(self._table[i][:self.level(i)]+(key,)+value) else: self._table.append(tuple(reversed(value))+(key,)+ self._table[i][self.level(i)+1:] ) #+1 because slices are not symmetric! if self._trace: print 'Adding a new key ...' else: if self._trace: print 'Adding a new line ...' self._table.append((key,)+value) def __del__(self): del self._table def __delitem__(self, k): raise Exception,'NotImplemented!' def setdefault(self, key, def_val=None): raise Exception,'NotImplemented!' def fromkeys(self, iterable, value=None): raise Exception,'NotImplemented!' def pop(self, key, def_val=None): raise Exception,'NotImplemented!' ################################################################################################## """ enumeration.py: borrowed from tcoutinho@cells.es tau.core.utils library Enumeration module. In C, enums allow you to declare a bunch of constants with unique values, without necessarily specifying the actual values (except in cases where you need to). Python has an accepted idiom that's fine for very small numbers of constants (A, B, C, D = range(4)) but it doesn't scale well to large numbers, and it doesn't allow you to specify values for some constants while leaving others unspecified. This approach does those things, while verifying that all values (specified and unspecified) are unique. Enum values then are attributes of an Enumeration class (Insect.BEETLE, Car.PASSAT, etc.). """ import types class EnumException(Exception): pass class Enumeration: """ @DEPRECATED: Use python Enum type instead! Enumeration class intended to provide the 'enum' feature present in many programming languages. Usage: car = ThingWithType(fruit.Lemon) print whatkind(fruit.type, Lemon) bug = ThingWithType(Insect.BEETLE) print whatkind(bug.type, Insect) Notice that car's and bug's attributes don't include any of the enum machinery, because that machinery is all CLASS attributes and not INSTANCE attributes. So you can generate thousands of cars and bugs with reckless abandon, never worrying that time or memory will be wasted on redundant copies of the enum stuff. print car.__dict__ print bug.__dict__ pprint.pprint(Cars.__dict__) pprint.pprint(Insect.__dict__) """ def __init__(self, name, enumList): self.__doc__ = name lookup = { } reverseLookup = { } uniqueNames = [ ] self._uniqueValues = uniqueValues = [ ] self._uniqueId = 0 for x in enumList: if type(x) == types.TupleType: x, i = x if type(x) != types.StringType: raise EnumException("enum name is not a string: %s" % str(x)) if type(i) != types.IntType: raise EnumException("enum value is not an integer: %s" % str(i)) if x in uniqueNames: raise EnumException("enum name is not unique: " % str(x)) if i in uniqueValues: raise EnumException("enum value is not unique for " % str(x)) uniqueNames.append(x) uniqueValues.append(i) lookup[x] = i reverseLookup[i] = x for x in enumList: if type(x) != types.TupleType: if type(x) != types.StringType: raise EnumException("enum name is not a string: " % str(x)) if x in uniqueNames: raise EnumException("enum name is not unique: " % str(x)) uniqueNames.append(x) i = self.generateUniqueId() uniqueValues.append(i) lookup[x] = i reverseLookup[i] = x self.lookup = lookup self.reverseLookup = reverseLookup def generateUniqueId(self): while self._uniqueId in self._uniqueValues: self._uniqueId += 1 n = self._uniqueId self._uniqueId += 1 return n def __getitem__(self, i): if type(i) == types.IntType: return self.whatis(i) elif type(i) == types.StringType: return self.lookup[i] def __getattr__(self, attr): if not self.lookup.has_key(attr): raise AttributeError return self.lookup[attr] def whatis(self, value): return self.reverseLookup[value] from . import doc __doc__ = doc.get_fn_autodoc(__name__,vars())
Experiment.py
# -*- coding: utf-8 -*- # !/usr/bin/env python from tkinter import * import random import time from time import gmtime, strftime import zmq import json import queue import threading import pandas as pd import numpy as np import math import matlab.engine from calibrate import Calibrate class Ball: def __init__(self, canvas, color, eng): self.eng = eng self.color = color self.canvas = canvas self.id = canvas.create_oval(10, 10, 25, 25, fill=color) self.canvas.move(self.id, 245, 100) # 来回反弹 # --self.x = 0 # --self.y = -1 starts = [-5, -2, -1, 1, 2, 3] random.shuffle(starts) self.x = starts[0] self.y = -3 self.posBall_x = 0 self.posBall_y = 0 # winfo_height()函数来获取画布当前的高度,赋值给对象变量 self.canvas_height = self.canvas.winfo_height() # 获取X轴坐标 self.canvas_width = self.canvas.winfo_width() print('canvas: ' + str(self.canvas_width) + ' ' + str(self.canvas_height)) self.queue = queue.Queue() self.gazeSubscriber = GazeSubscriber(self.queue, self.eng) self.count_calibrate = 1 def draw_experiment(self, index): self.canvas.delete(self.id) self.id = self.canvas.create_oval(10, 10, 25, 25, fill=self.color) x_ball = [200, 1336, 768, 1336, 200] y_ball = [200, 200, 432, 664, 664] self.canvas.move(self.id, x_ball[index], y_ball[index]) print('Experiment: ' + str(x_ball[index]) + ' ' + str(y_ball[index])) self.posBall = [x_ball[index], y_ball[index]] msg = self.posBall self.queue.put(msg) def draw_class(self): self.canvas.delete(self.id) self.id = self.canvas.create_oval(10, 10, 25, 25, fill=self.color) x_ball = random.randint(1, 4) * (self.canvas_width / 8) + self.canvas_width / 16 y_ball = random.randint(0, 4) * (self.canvas_height / 5) + self.canvas_height / 10 self.canvas.move(self.id, x_ball, y_ball) print('Classification: ' + str(x_ball) + ' ' + str(y_ball)) self.posBall = [x_ball, y_ball] msg = self.posBall self.queue.put(msg) def draw(self): self.canvas.move(self.id, self.x, self.y) # 获取某个对象在画布的坐标,返回一个数组(两个坐标,左上角的坐标和右下角的两个坐标) pos = self.canvas.coords(self.id) posBall_x = 0.5 * (pos[0] + pos[2]) posBall_y = 0.5 * (pos[1] + pos[3]) self.posBall = [posBall_x, posBall_y] msg = self.posBall self.queue.put(msg) print("Queue size: " + str(self.queue.qsize())) # 打印获取的坐标 print("Pos: " + str(pos)) # 如果最上面的纵轴坐标在顶上,则往下移动一个像素 if pos[1] <= 0: self.y = 2 # 如果最下面的纵轴坐标在底上,则向上移动 if pos[3] > self.canvas_height: self.y = -2 # 宽度控制# # 如果在左边框了,那么向右边移动3像素 if pos[0] <= 0: self.x = 5 # 如果到右边框了,左移动3像素 if pos[2] > self.canvas_width: self.x = -5 class GazeSubscriber: def __init__(self, queue, eng): self.eng = eng port = "5570" self.context = zmq.Context() self.socket = self.context.socket(zmq.SUB) print("Collecting head pose updates...") self.socket.connect("tcp://127.0.0.1:%s" % port) self.socket.setsockopt_string(zmq.SUBSCRIBE, u'') self.lastScrollTime = 0 self.myColumns = ['TimeStamp', 'BallPosX', 'BallPosY', 'GazePointX', 'GazePointY'] self.df = pd.DataFrame(columns=self.myColumns) self.entry = dict(zip(self.myColumns, [np.nan] * 4)) self.dataPath = '.\\experimentData\\' self.dataName = 'Experiment' + strftime("%Y-%m-%d-%H-%M", gmtime()) + '.csv' self.running = True self.started = False self.thread1 = threading.Thread(target=self.threadSubscribe) self.queue = queue self.thread1.start() self.thread2 = threading.Thread(target=self.threadPos) self.thread2.start() def stop(self): self.running = False print(self.dataName) print('Rows: ' + str(self.df.shape[0])) if self.df.shape[0] > 1: self.df.to_csv(self.dataPath + self.dataName) def threadSubscribe(self): while self.running: if self.started: msg = self.socket.recv_multipart() data = json.loads(msg[0]) timestamp = data['timestamp'] headPose = data['pose'] gaze = data['gaze'] gaze_angle_x = gaze['gaze_angle_x'] * 180 / 3.1415926 gaze_angle_y = gaze['gaze_angle_y'] * 180 / 3.1415926 gaze_point_x = gaze['gaze_screen_x'] gaze_point_y = gaze['gaze_screen_y'] # gazeCoordinates = self.eng.ProcessTForm(gaze_point_x, gaze_point_y, nargout=2) # print(gazeCoordinates) print("BallPos: " + str(self.ballPos)) self.entry['TimeStamp'] = time.time() # (datetime.now(timezone.utc) + timedelta(days=3)).timestamp() self.entry['BallPosX'] = self.ballPos[0] self.entry['BallPosY'] = self.ballPos[1] self.entry['GazePointX'] = gaze_point_x self.entry['GazePointY'] = gaze_point_y # print(self.entry) self.df = self.df.append(self.entry, ignore_index=True) # print(self.df) def threadPos(self): while self.running: if self.queue.qsize() > 0: try: self.ballPos = self.queue.get(0) print(self.started) if self.started == False: self.started = True except queue.Empty: pass time.sleep(0.001) class Experiment: def experiment(eng): # Create Canvas tk = Tk() tk.title("Calibrate") tk.resizable(0, 0) tk.wm_attributes("-topmost", 1, "-fullscreen", 1) # bd=0,highlightthickness=0 No border around canvas canvas = Canvas(tk, width=1920, height=1080, bd=0, highlightthickness=0) canvas.pack() tk.update() # Create ball ball = Ball(canvas, 'red', eng) i = 0 count = 1000 while i < count: ball.draw_experiment(math.floor(i / 200)) tk.update_idletasks() tk.update() time.sleep(0.01) i = i + 1 ball.gazeSubscriber.stop() tk.destroy() mean_error = eng.ExperimentData() print('Mean Error: ' + str(mean_error) + 'px') if __name__ == '__main__': print("Start MatLab Engine:\n ") eng = matlab.engine.start_matlab() print("Start Calibrate:\n ") Calibrate.calibrate(eng) print("Start experiment:\n") Experiment.experiment(eng)
Blinker.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import json import time as _time import socket import threading from Blinker.BlinkerConfig import * from Blinker.BlinkerDebug import * from BlinkerUtility.BlinkerUtility import * # from BlinkerAdapters.BlinkerBLE import * # from BlinkerAdapters.BlinkerLinuxWS import * # from BlinkerAdapters.BlinkerMQTT import * # from threading import Thread # from zeroconf import ServiceInfo, Zeroconf # from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket class Protocol(): def __init__(self): self.conType = "BLINKER_WIFI" self.proto1 = None self.proto2 = None self.conn1 = None self.conn2 = None # self.debug = BLINKER_DEBUG self.msgFrom = None self.msgBuf = None self.sendBuf = '' self.isFormat = False self.autoFormatFreshTime = millis() self.state = CONNECTING self.isAvail = False self.isRead = False self.isThreadStart = False self.thread = None self.Buttons = {} self.Sliders = {} self.Toggles = {} self.Numbers = {} self.Texts = {} self.Datas = {} self.dataTime = BLINKER_DATA_FREQ_TIME self.dataCount = 0 self.dataTimes = 0 self.dataTimesLimit = 0 self.dataStorageFunc = None # self.Joystick = [BLINKER_JOYSTICK_VALUE_DEFAULT, BLINKER_JOYSTICK_VALUE_DEFAULT] self.Joystick = {} self.Ahrs = [0, 0, 0, False] self.GPS = ["0.000000", "0.000000"] self.RGB = {} self.dataFunc = None self.heartbeatFunc = None self.summaryFunc = None self.aliType = None self.duerType = None self.miType = None self.aliPowerSrareFunc = None self.aliSetColorFunc = None self.aliSetModeFunc = None self.aliSetcModeFunc = None self.aliSetBrightFunc = None self.aliRelateBrightFunc = None self.aliSetColorTempFunc = None self.aliRelateColorTempFunc = None self.aliQueryFunc = None self.duerPowerSrareFunc = None self.duerSetColorFunc = None self.duerSetModeFunc = None self.duerSetcModeFunc = None self.duerSetBrightFunc = None self.duerRelateBrightFunc = None # self.aliSetColorTempFunc = None # self.aliRelateColorTempFunc = None self.duerQueryFunc = None self.miPowerSrareFunc = None self.miSetColorFunc = None self.miSetModeFunc = None self.miSetcModeFunc = None self.miSetBrightFunc = None #self.mtRelateBrightFunc = None self.miSetColorTempFunc = None #self.mtRelateColorTempFunc = None self.miQueryFunc = None bProto = Protocol() class BlinkerPY: def mode(self, setType = "BLINKER_WIFI"): bProto.conType = setType if bProto.conType == "BLINKER_BLE": import BlinkerAdapters.BlinkerBLE as bBLE bProto.proto1 = bBLE bProto.conn1 = bProto.proto1.BlinkerBLEService() # elif bProto.conType == BLINKER_WIFI: # import BlinkerAdapters.BlinkerLinuxWS as bWS # bProto.proto1 = bWS # bProto.conn1 = bProto.proto1.WebSocketServer() elif bProto.conType == "BLINKER_MQTT" or bProto.conType == "BLINKER_WIFI": bProto.conType = "BLINKER_MQTT" import BlinkerAdapters.BlinkerLinuxWS as bWS import BlinkerAdapters as bMQTT bProto.proto1 = bMQTT bProto.proto2 = bWS bProto.conn1 = bProto.proto1.MQTTClient() bProto.conn2 = bProto.proto2.WebSocketServer(BLINKER_DIY_MQTT) def aliType(self, _type): if _type == 'BLINKER_ALIGENIE_LIGHT': bProto.aliType = '&aliType=light' elif _type == 'BLINKER_ALIGENIE_OUTLET': bProto.aliType = '&aliType=outlet' elif _type == 'BLINKER_ALIGENIE_MULTI_OUTLET': bProto.aliType = '&aliType=multi_outlet' elif _type == 'BLINKER_ALIGENIE_SENSOR': bProto.aliType = '&aliType=sensor' def duerType(self, _type): if _type == 'BLINKER_DUEROS_LIGHT': bProto.duerType = '&duerType=LIGHT' elif _type == 'BLINKER_DUEROS_OUTLET': bProto.duerType = '&duerType=SOCKET' elif _type == 'BLINKER_DUEROS_MULTI_OUTLET': bProto.duerType = '&duerType=MULTI_SOCKET' elif _type == 'BLINKER_DUEROS_SENSOR': bProto.duerType = '&duerType=AIR_MONITOR' def miType(self, _type): if _type == 'BLINKER_MIOT_LIGHT': bProto.miType = '&miType=light' elif _type == 'BLINKER_MIOT_OUTLET': bProto.miType = '&miType=outlet' elif _type == 'BLINKER_MIOT_MULTI_OUTLET': bProto.miType = '&miType=multi_outlet' elif _type == 'BLINKER_MIOT_SENSOR': bProto.miType = '&miType=sensor' # def debugLevel(level = BLINKER_DEBUG): # bProto.debug = level def begin(self, auth = None): if bProto.conType == "BLINKER_BLE": # return # bProto.proto1.bleProto.debug = bProto.debug # bProto.conn1.run() bProto.conn1.start() elif bProto.conType == "BLINKER_WIFI": # bProto.proto1.wsProto.debug = bProto.debug bProto.conn1.start() elif bProto.conType == "BLINKER_MQTT": # bProto.proto1.mProto.debug = bProto.debug # bProto.proto2.wsProto.debug = bProto.debug if auth : bProto.msgFrom = "BLINKER_MQTT" bProto.conn1.start(auth, bProto.aliType, bProto.duerType, bProto.miType) bProto.conn2.start(bProto.conn1.bmqtt.deviceName) bProto.conn1.run() else : BLINKER_ERR_LOG('Please input your device secret key!') # def thread_run(self): # if bProto.conType == "BLINKER_BLE": # bProto.conn1.run() # while True: # BlinkerPY.checkData(self) def checkData(self): if bProto.conType == "BLINKER_BLE": # return bProto.state = bProto.proto1.bleProto.state if bProto.proto1.bleProto.isRead is True: bProto.msgBuf = bProto.proto1.bleProto.msgBuf bProto.isRead = True bProto.proto1.bleProto.isRead = False BlinkerPY.parse(self) elif bProto.conType == "BLINKER_WIFI": bProto.state = bProto.proto1.wsProto.state if bProto.proto1.wsProto.isRead is True: bProto.msgBuf = str(bProto.proto1.wsProto.msgBuf) bProto.isRead = True bProto.proto1.wsProto.isRead = False BlinkerPY.parse(self) if bProto.conn1.bmqtt.isAliRead is True: bProto.msgBuf = bProto.conn1.bmqtt.msgBuf bProto.conn1.bmqtt.isAliRead = False BlinkerPY.aliParse(self) if bProto.conn1.bmqtt.isDuerRead is True: bProto.msgBuf = bProto.conn1.bmqtt.msgBuf bProto.conn1.bmqtt.isDuerRead = False BlinkerPY.duerParse(self) if bProto.conn1.bmqtt.isMiRead is True: bProto.msgBuf = bProto.conn1.bmqtt.msgBuf bProto.conn1.bmqtt.isMiRead = False BlinkerPY.miParse(self) elif bProto.conType == "BLINKER_MQTT": bProto.state = bProto.conn1.bmqtt.state if bProto.proto2.wsProto.state is CONNECTED: bProto.state = bProto.proto2.wsProto.state if bProto.conn1.bmqtt.isRead is True: bProto.msgBuf = bProto.conn1.bmqtt.msgBuf bProto.msgFrom = "BLINKER_MQTT" bProto.isRead = True bProto.conn1.bmqtt.isRead = False BlinkerPY.parse(self) if bProto.proto2.wsProto.isRead is True: bProto.msgBuf = str(bProto.proto2.wsProto.msgBuf) bProto.msgFrom = "BLINKER_WIFI" bProto.isRead = True bProto.proto2.wsProto.isRead = False BlinkerPY.parse(self) if bProto.conn1.bmqtt.isAliRead is True: bProto.msgBuf = bProto.conn1.bmqtt.msgBuf bProto.conn1.bmqtt.isAliRead = False BlinkerPY.aliParse(self) if bProto.conn1.bmqtt.isDuerRead is True: bProto.msgBuf = bProto.conn1.bmqtt.msgBuf bProto.conn1.bmqtt.isDuerRead = False BlinkerPY.duerParse(self) if bProto.conn1.bmqtt.isMiRead is True: bProto.msgBuf = bProto.conn1.bmqtt.msgBuf bProto.conn1.bmqtt.isMiRead = False BlinkerPY.miParse(self) def run(self): if bProto.isThreadStart is False: bProto.thread = threading.Thread(target=thread_run) bProto.thread.daemon = True bProto.thread.start() bProto.isThreadStart = True BlinkerPY.checkData(self) BlinkerPY.checkAutoFormat(self) # def wInit(name, wType): # if wType == W_BUTTON: # if name in bProto.Buttons: # return # else: # bProto.Buttons[name] = BLINKER_CMD_BUTTON_RELEASED # # BLINKER_LOG(bProto.Buttons) # elif wType == W_SLIDER: # if name in bProto.Sliders: # return # else: # bProto.Sliders[name] = 0 # # BLINKER_LOG(bProto.Sliders) # elif wType == W_TOGGLE: # if name in bProto.Toggles: # return # else: # bProto.Toggles[name] = False # elif wType == W_RGB: # if name in bProto.RGB: # return # else: # rgb = [0, 0, 0] # bProto.RGB[name] = rgb # BLINKER_LOG(bProto.Toggles) # def beginFormat(): # bProto.isFormat = True # bProto.sendBuf.clear() # def endFormat(): # bProto.isFormat = False # _print(bProto.sendBuf) # return checkLength(bProto.sendBuf) def checkLength(self, data): if len(data) > BLINKER_MAX_SEND_SIZE: BLINKER_ERR_LOG('SEND DATA BYTES MAX THAN LIMIT!') return False else: return True def _print(self, data): if BlinkerPY.checkLength(self, data) is False: return if bProto.conType == "BLINKER_BLE": bProto.conn1.response(data) elif bProto.conType == "BLINKER_WIFI": bProto.conn1.broadcast(data) elif bProto.conType == "BLINKER_MQTT" and bProto.msgFrom == "BLINKER_MQTT": if BLINKER_CMD_NOTICE in data: _state = True elif BLINKER_CMD_STATE in data: _state = True else: _state = False bProto.conn1.pub(data, _state) elif bProto.conType == "BLINKER_MQTT" and bProto.msgFrom == "BLINKER_WIFI": bProto.conn2.broadcast(data) BlinkerPY._parse(self, data) def print(self, key, value = None, uint = None): if value is None: if bProto.isFormat: return data = str(key) BlinkerPY._print(self, data) else: key = str(key) # if not uint is None: # value = str(value) + str(uint) # data = json_encode(key, value) # data = {} if bProto.isFormat == False: bProto.isFormat = True bProto.autoFormatFreshTime = millis() if (millis() - bProto.autoFormatFreshTime) < 100 : bProto.autoFormatFreshTime = millis() buffer = {} if bProto.sendBuf is not '' : buffer = json.loads(bProto.sendBuf) buffer[key] = value bProto.sendBuf = json.dumps(buffer) # # bProto.sendBuf[key] = value # # BLINKER_LOG_ALL("key: ", key, ", value: ", bProto.sendBuf[key]) BLINKER_LOG_ALL("sendBuf: ", bProto.sendBuf) # if bProto.isFormat is False: # _print(data) def checkAutoFormat(self): if bProto.isFormat : if (millis() - bProto.autoFormatFreshTime) >= 100 : # payload = {} # for key in bProto.sendBuf : # BLINKER_LOG_ALL(key, ", ", bProto.sendBuf[key]) BlinkerPY._print(self, json.loads(bProto.sendBuf)) BLINKER_LOG_ALL("auto format: ", json.loads(bProto.sendBuf)) bProto.sendBuf = '' bProto.isFormat = False def notify(self, msg): BlinkerPY.print(self, BLINKER_CMD_NOTICE, msg) def connected(self): if bProto.state is CONNECTED: return True else: return False def connect(self, timeout = BLINKER_STREAM_TIMEOUT): bProto.state = CONNECTING start_time = millis() while (millis() - start_time) < timeout: BlinkerPY.run(self) if bProto.state is CONNECTED: return True return False def disconnect(self): bProto.state = DISCONNECTED def delay(self, ms): start = millis() time_run = 0 while time_run < ms: BlinkerPY.run(self) time_run = millis() - start def available(self): return bProto.isAvail def attachData(self, func): bProto.dataFunc = func def attachHeartbeat(self, func): bProto.heartbeatFunc = func def attachSummary(self, func): bProto.summaryFunc = func def readString(self): bProto.isRead = False bProto.isAvail = False return bProto.msgBuf def times(self): return now() def parse(self): data = bProto.msgBuf if not data: return try: data = json.loads(data) BLINKER_LOG(data) if not isinstance(data, dict): raise TypeError() for key, value in data.items(): if key in bProto.Buttons: bProto.isRead = False bProto.Buttons[key].func(data[key]) elif key in bProto.Sliders: bProto.isRead = False bProto.Sliders[key].func(data[key]) # elif key in bProto.Toggles: # bProto.isRead = False # bProto.Toggles[key].func(data[key]) elif key in bProto.RGB: bProto.isRead = False BLINKER_LOG(bProto.RGB[key]) bProto.RGB[key].func(data[key][R], data[key][G], data[key][B], data[key][BR]) elif key in bProto.Joystick: bProto.isRead = False bProto.Joystick[key].func(data[key][J_Xaxis], data[key][J_Yaxis]) elif key == BLINKER_CMD_AHRS: # bProto.isAvail = False bProto.isRead = False bProto.Ahrs[Yaw] = data[key][Yaw] bProto.Ahrs[Pitch] = data[key][Pitch] bProto.Ahrs[Roll] = data[key][Roll] bProto.Ahrs[AHRS_state] = True # BLINKER_LOG(bProto.Ahrs) elif key == BLINKER_CMD_GPS: bProto.isRead = False bProto.GPS[LONG] = str(data[key][LONG]) bProto.GPS[LAT] = str(data[key][LAT]) elif key == BLINKER_CMD_GET and data[key] == BLINKER_CMD_VERSION: bProto.isRead = False BlinkerPY.print(self, BLINKER_CMD_VERSION, BLINKER_VERSION) elif key == BLINKER_CMD_GET and data[key] == BLINKER_CMD_STATE: bProto.isRead = False BlinkerPY.heartbeat(self) except ValueError: pass except TypeError: pass finally: if bProto.isRead: # bProto.isAvail = if bProto.dataFunc : bProto.dataFunc(data) # bProto.isAvail = False def aliParse(self): data = bProto.msgBuf if not data: return try: data = json.loads(data) BLINKER_LOG(data) # if data.has_key('set'): if 'get' in data.keys(): _num = 0 if 'num' in data.keys(): _num = int(data['num']) data = data['get'] if data == 'state': if bProto.aliType == '&aliType=multi_outlet': bProto.aliQueryFunc(BLINKER_CMD_QUERY_ALL_NUMBER, _num) else : bProto.aliQueryFunc(BLINKER_CMD_QUERY_ALL_NUMBER) elif data == 'pState': if bProto.aliType == '&aliType=multi_outlet': bProto.aliQueryFunc(BLINKER_CMD_QUERY_POWERSTATE_NUMBER, _num) else : bProto.aliQueryFunc(BLINKER_CMD_QUERY_POWERSTATE_NUMBER) elif data == 'col': if bProto.aliQueryFunc: bProto.aliQueryFunc(BLINKER_CMD_QUERY_COLOR_NUMBER) elif data == 'clr': if bProto.aliQueryFunc: bProto.aliQueryFunc(BLINKER_CMD_QUERY_COLOR_NUMBER) elif data == 'colTemp': if bProto.aliQueryFunc: bProto.aliQueryFunc(BLINKER_CMD_QUERY_COLORTEMP_NUMBER) elif data == 'bright': if bProto.aliQueryFunc: bProto.aliQueryFunc(BLINKER_CMD_QUERY_BRIGHTNESS_NUMBER) elif data == 'temp': if bProto.aliQueryFunc: bProto.aliQueryFunc(BLINKER_CMD_QUERY_TEMP_NUMBER) elif data == 'humi': if bProto.aliQueryFunc: bProto.aliQueryFunc(BLINKER_CMD_QUERY_HUMI_NUMBER) elif data == 'pm25': if bProto.aliQueryFunc: bProto.aliQueryFunc(BLINKER_CMD_QUERY_PM25_NUMBER) elif data == 'mode': if bProto.aliQueryFunc: bProto.aliQueryFunc(BLINKER_CMD_QUERY_MODE_NUMBER) # elif data.has_key('get'): elif 'set' in data.keys(): data = data['set'] _num = 0 if 'num' in data.keys(): _num = int(data['num']) for key, value in data.items(): if key == 'pState': if bProto.aliPowerSrareFunc: # if data.has_key('num'): if bProto.aliType == '&aliType=multi_outlet': bProto.aliPowerSrareFunc(value, _num) else : bProto.aliPowerSrareFunc(value) elif key == 'col': if bProto.aliSetColorFunc: bProto.aliSetColorFunc(value) elif key == 'clr': if bProto.aliSetColorFunc: bProto.aliSetColorFunc(value) elif key == 'bright': if bProto.aliSetBrightFunc: bProto.aliSetBrightFunc(value) elif key == 'upBright': if bProto.aliRelateBrightFunc: bProto.aliRelateBrightFunc(value) elif key == 'downBright': if bProto.aliRelateBrightFunc: bProto.aliRelateBrightFunc(value) elif key == 'colTemp': if bProto.aliSetColorTempFunc: bProto.aliSetColorTempFunc(value) elif key == 'upColTemp': if bProto.aliRelateColorTempFunc: bProto.aliRelateColorTempFunc(value) elif key == 'downColTemp': if bProto.aliRelateColorTempFunc: bProto.aliRelateColorTempFunc(value) elif key == 'mode': if bProto.aliSetModeFunc: bProto.aliSetModeFunc(value) elif key == 'cMode': if bProto.aliSetcModeFunc: bProto.aliSetcModeFunc(value) except ValueError: pass except TypeError: pass finally: pass def duerParse(self): data = bProto.msgBuf if not data: return try: data = json.loads(data) BLINKER_LOG(data) # if data.has_key('set'): if 'get' in data.keys(): _num = 0 if 'num' in data.keys(): _num = int(data['num']) data = data['get'] if data == 'time': if bProto.duerType == '&duerType=MULTI_SOCKET': bProto.duerQueryFunc(BLINKER_CMD_QUERY_TIME_NUMBER, _num) else : bProto.duerQueryFunc(BLINKER_CMD_QUERY_TIME_NUMBER) elif data == 'aqi': if bProto.duerQueryFunc: bProto.duerQueryFunc(BLINKER_CMD_QUERY_AQI_NUMBER) elif data == 'pm25': if bProto.duerQueryFunc: bProto.duerQueryFunc(BLINKER_CMD_QUERY_PM25_NUMBER) elif data == 'pm10': if bProto.duerQueryFunc: bProto.duerQueryFunc(BLINKER_CMD_QUERY_PM10_NUMBER) elif data == 'co2': if bProto.duerQueryFunc: bProto.duerQueryFunc(BLINKER_CMD_QUERY_CO2_NUMBER) elif data == 'temp': if bProto.duerQueryFunc: bProto.duerQueryFunc(BLINKER_CMD_QUERY_TEMP_NUMBER) elif data == 'humi': if bProto.duerQueryFunc: bProto.duerQueryFunc(BLINKER_CMD_QUERY_HUMI_NUMBER) elif data == 'pm25': if bProto.duerQueryFunc: bProto.duerQueryFunc(BLINKER_CMD_QUERY_PM25_NUMBER) elif data == 'mode': if bProto.duerQueryFunc: bProto.duerQueryFunc(BLINKER_CMD_QUERY_TIME_NUMBER) # elif data.has_key('get'): elif 'set' in data.keys(): data = data['set'] _num = 0 if 'num' in data.keys(): _num = int(data['num']) for key, value in data.items(): if key == 'pState': if bProto.duerPowerSrareFunc: # if data.has_key('num'): if bProto.duerType == '&duerType=MULTI_SOCKET': bProto.duerPowerSrareFunc(value, _num) else : bProto.duerPowerSrareFunc(value) elif key == 'col': if bProto.duerSetColorFunc: bProto.duerSetColorFunc(value) elif key == 'clr': if bProto.duerSetColorFunc: bProto.duerSetColorFunc(value) elif key == 'bright': if bProto.duerSetBrightFunc: bProto.duerSetBrightFunc(value) elif key == 'upBright': if bProto.duerRelateBrightFunc: bProto.duerRelateBrightFunc(value) elif key == 'downBright': if bProto.duerRelateBrightFunc: bProto.duerRelateBrightFunc(value) # elif key == 'colTemp': # if bProto.duerSetColorTempFunc: # bProto.duerSetColorTempFunc(value) # elif key == 'upColTemp': # if bProto.aliRelateColorTempFunc: # bProto.aliRelateColorTempFunc(value) # elif key == 'downColTemp': # if bProto.duerRelateColorTempFunc: # bProto.duerRelateColorTempFunc(value) elif key == 'mode': if bProto.duerSetModeFunc: bProto.duerSetModeFunc(value) elif key == 'cMode': if bProto.duerSetcModeFunc: bProto.duerSetcModeFunc(value) except ValueError: pass except TypeError: pass finally: pass def miParse(self): data = bProto.msgBuf if not data: return try: data = json.loads(data) BLINKER_LOG(data) # if data.has_key('set'): if 'get' in data.keys(): _num = 0 if 'num' in data.keys(): _num = int(data['num']) data = data['get'] if data == 'state': if bProto.miType == '&miType=multi_outlet': bProto.miQueryFunc(BLINKER_CMD_QUERY_ALL_NUMBER, _num) else : bProto.miQueryFunc(BLINKER_CMD_QUERY_ALL_NUMBER) elif data == 'pState': if bProto.aliType == '&miType=multi_outlet': bProto.miQueryFunc(BLINKER_CMD_QUERY_POWERSTATE_NUMBER, _num) else : bProto.miQueryFunc(BLINKER_CMD_QUERY_POWERSTATE_NUMBER) elif data == 'col': if bProto.miQueryFunc: bProto.miQueryFunc(BLINKER_CMD_QUERY_COLOR_NUMBER) elif data == 'clr': if bProto.miQueryFunc: bProto.miQueryFunc(BLINKER_CMD_QUERY_COLOR_NUMBER) elif data == 'colTemp': if bProto.miQueryFunc: bProto.miQueryFunc(BLINKER_CMD_QUERY_COLORTEMP_NUMBER) elif data == 'aqi': if bProto.miQueryFunc: bProto.miQueryFunc(BLINKER_CMD_QUERY_AQI_NUMBER) elif data == 'co2': if bProto.miQueryFunc: bProto.miQueryFunc(BLINKER_CMD_QUERY_CO2_NUMBER) elif data == 'bright': if bProto.miQueryFunc: bProto.miQueryFunc(BLINKER_CMD_QUERY_BRIGHTNESS_NUMBER) elif data == 'temp': if bProto.miQueryFunc: bProto.miQueryFunc(BLINKER_CMD_QUERY_TEMP_NUMBER) elif data == 'humi': if bProto.miQueryFunc: bProto.miQueryFunc(BLINKER_CMD_QUERY_HUMI_NUMBER) elif data == 'pm25': if bProto.miQueryFunc: bProto.miQueryFunc(BLINKER_CMD_QUERY_PM25_NUMBER) elif data == 'mode': if bProto.miQueryFunc: bProto.miQueryFunc(BLINKER_CMD_QUERY_MODE_NUMBER) # elif data.has_key('get'): elif 'set' in data.keys(): data = data['set'] _num = 0 if 'num' in data.keys(): _num = int(data['num']) for key, value in data.items(): if key == 'pState': if bProto.miPowerSrareFunc: # if data.has_key('num'): if bProto.miType == '&miType=multi_outlet': bProto.miPowerSrareFunc(value, _num) else : bProto.miPowerSrareFunc(value) elif key == 'col': if bProto.miSetColorFunc: bProto.miSetColorFunc(value) elif key == 'clr': if bProto.miSetColorFunc: bProto.miSetColorFunc(value) elif key == 'bright': if bProto.miSetBrightFunc: bProto.miSetBrightFunc(value) elif key == 'upBright': if bProto.miRelateBrightFunc: bProto.miRelateBrightFunc(value) elif key == 'downBright': if bProto.miRelateBrightFunc: bProto.miRelateBrightFunc(value) elif key == 'colTemp': if bProto.miSetColorTempFunc: bProto.miSetColorTempFunc(value) elif key == 'upColTemp': if bProto.miRelateColorTempFunc: bProto.miRelateColorTempFunc(value) elif key == 'downColTemp': if bProto.miRelateColorTempFunc: bProto.miRelateColorTempFunc(value) elif key == 'mode': if bProto.miSetModeFunc: bProto.miSetModeFunc(value) elif key == 'cMode': if bProto.miSetcModeFunc: bProto.miSetcModeFunc(value) except ValueError: pass except TypeError: pass finally: pass def _parse(self, data): if data is '': return if check_json_format(data): data = json.loads(data) for key in data.keys(): # BLINKER_LOG(key) if key in bProto.Buttons: # bProto.isAvail = False bProto.isRead = False if data[key] == BLINKER_CMD_BUTTON_TAP: bProto.Buttons[key] = BLINKER_CMD_BUTTON_TAP elif data[key] == BLINKER_CMD_BUTTON_PRESSED: bProto.Buttons[key] = BLINKER_CMD_BUTTON_PRESSED else: bProto.Buttons[key] = BLINKER_CMD_BUTTON_RELEASED # if data[key] # BLINKER_LOG(bProto.Buttons) elif key in bProto.Sliders: # bProto.isAvail = False bProto.isRead = False bProto.Sliders[key] = data[key] # BLINKER_LOG(bProto.Buttons) elif key in bProto.Toggles: # bProto.isAvail = False bProto.isRead = False if data[key] == BLINKER_CMD_ON: bProto.Toggles[key] = True else: bProto.Toggles[key] = False # BLINKER_LOG(bProto.Toggles) elif key in bProto.RGB: bProto.isRead = False rgb = [0, 0, 0] rgb[R] = data[key][R] rgb[G] = data[key][G] rgb[B] = data[key][B] bProto.RGB[key] = rgb def heartbeat(self): if bProto.conType is "BLINKER_MQTT": # beginFormat() BlinkerPY.print(self, BLINKER_CMD_STATE, BLINKER_CMD_ONLINE) if bProto.heartbeatFunc : bProto.heartbeatFunc() if bProto.summaryFunc : bProto.summaryFunc() # stateData() # if endFormat() is False: # print(BLINKER_CMD_STATE, BLINKER_CMD_ONLINE) else: # beginFormat() BlinkerPY.print(self, BLINKER_CMD_STATE, BLINKER_CMD_CONNECTED) if bProto.heartbeatFunc : bProto.heartbeatFunc() if bProto.summaryFunc : bProto.summaryFunc() # stateData() # if endFormat() is False: # print(BLINKER_CMD_STATE, BLINKER_CMD_CONNECTED) def stateData(self): for tKey in bProto.Toggles: tValue = '' if bProto.Toggles[tKey]: tValue = 'on' else: tValue = 'off' BlinkerPY.print(self, tKey, tValue) for sKey in bProto.Sliders: BlinkerPY.print(self, sKey, bProto.Sliders[sKey]) for rgbKey in bProto.RGB: BlinkerPY.print(self, rgbKey, bProto.RGB[rgbKey]) # def button(self, name): # if not name in bProto.Buttons: # wInit(name, W_BUTTON) # run() # if bProto.Buttons[name] is BLINKER_CMD_BUTTON_RELEASED: # return False # if bProto.Buttons[name] is BLINKER_CMD_BUTTON_TAP: # bProto.Buttons[name] = BLINKER_CMD_BUTTON_RELEASED # return True # def slider(self, name): # if name in bProto.Sliders: # return bProto.Sliders[name] # else: # wInit(name, W_SLIDER) # run() # return bProto.Sliders[name] # def toggle(self, name): # if name in bProto.Toggles: # return bProto.Toggles[name] # else: # wInit(name, W_TOGGLE) # run() # return bProto.Toggles[name] # def rgb(name, color): # if name in bProto.RGB: # return bProto.RGB[name][color] # else: # wInit(name, W_RGB) # run() # return bProto.RGB[name][color] def joystick(self, axis): if axis >= J_Xaxis and axis <= J_Yaxis: return bProto.Joystick[axis] else: return BLINKER_JOYSTICK_VALUE_DEFAULT def ahrs(self, axis): if axis >= Yaw and axis <= Roll: return bProto.Ahrs[axis] else: return 0 def attachAhrs(self): state = False while BlinkerPY.connected(self) is False: BlinkerPY.connect(self) BlinkerPY.print(self, BLINKER_CMD_AHRS, BLINKER_CMD_ON) BlinkerPY.delay(self, 100) run() start_time = millis() state = bProto.Ahrs[AHRS_state] while state is False: if (millis() - start_time) > BLINKER_CONNECT_TIMEOUT_MS: BLINKER_LOG("AHRS attach failed...Try again") start_time = millis() BlinkerPY.print(self, BLINKER_CMD_AHRS, BLINKER_CMD_ON) BlinkerPY.delay(self, 100) BlinkerPY.run(self) state = bProto.Ahrs[AHRS_state] BLINKER_LOG("AHRS attach sucessed...") def detachAhrs(self): BlinkerPY.print(self, BLINKER_CMD_AHRS, BLINKER_CMD_OFF) bProto.Ahrs[Yaw] = 0 bProto.Ahrs[Roll] = 0 bProto.Ahrs[Pitch] = 0 bProto.Ahrs[AHRS_state] = False def gps(self, axis): BlinkerPY.print(self, BLINKER_CMD_GET, BLINKER_CMD_GPS) BlinkerPY.delay(self, 100) BlinkerPY.run(self) if axis >= LONG and axis <= LAT: return bProto.GPS[axis] else: return "0.000000" def vibrate(self, time = 200): if time > 1000: time = 1000 BlinkerPY.print(self, BLINKER_CMD_VIBRATE, time) def time(self): return int(_time.time()) def second(self): localtime = _time.localtime(int(_time.time())) return localtime.tm_sec def minute(self): localtime = _time.localtime(int(_time.time())) return localtime.tm_min def hour(self): localtime = _time.localtime(int(_time.time())) return localtime.tm_hour def mday(self): localtime = _time.localtime(int(_time.time())) return localtime.tm_mday def wday(self): localtime = _time.localtime(int(_time.time())) return (localtime.tm_wday + 1) % 7 def month(self): localtime = _time.localtime(int(_time.time())) return localtime.tm_mon def year(self): localtime = _time.localtime(int(_time.time())) return localtime.tm_year def yday(self): localtime = _time.localtime(int(_time.time())) return localtime.tm_yday def dtime(self): localtime = _time.localtime(int(_time.time())) return localtime.tm_hour * 60 * 60 + localtime.tm_min * 60 + localtime.tm_sec def sms(self, msg): if bProto.conType == "BLINKER_MQTT": bProto.conn1.sms(msg) else: BLINKER_ERR_LOG('This code is intended to run on the MQTT!') def push(self, msg): if bProto.conType == "BLINKER_MQTT": bProto.conn1.push(msg) else: BLINKER_ERR_LOG('This code is intended to run on the MQTT!') def wechat(self, title, state, msg): if bProto.conType == "BLINKER_MQTT": bProto.conn1.wechat(title, state, msg) else: BLINKER_ERR_LOG('This code is intended to run on the MQTT!') def weather(self, city = 'default'): if bProto.conType == "BLINKER_MQTT": return bProto.conn1.weather(city) else: BLINKER_ERR_LOG('This code is intended to run on the MQTT!') def aqi(self, city = 'default'): if bProto.conType == "BLINKER_MQTT": return bProto.conn1.aqi(city) else: BLINKER_ERR_LOG('This code is intended to run on the MQTT!') def dataStorageCallback(self): timer = threading.Timer(bProto.dataTime, dataStorage) timer.start() bProto.dataStorageFunc() bProto.dataTimesLimit = bProto.dataTimesLimit + 1 if bProto.dataTimesLimit >= bProto.dataTimes: if BlinkerPY.dataUpdate(self): bProto.dataTimesLimit = 0 def attachDataStorage(self, func, limit = BLINKER_DATA_FREQ_TIME, times = 4): bProto.dataStorageFunc = func if limit >= 60.0: bProto.dataTime = limit else: bProto.dataTime = 60.0 bProto.dataTimes = times timer = threading.Timer(limit, dataStorage) timer.start() def dataStorage(self, name, data): now_time = BlinkerPY.time(self) - BlinkerPY.second(self) now_time = now_time - now_time % 10 BLINKER_LOG_ALL('now_time: ', now_time) if name in bProto.Datas: bProto.Datas[name].saveData(data, now_time, bProto.dataTime) else: _Data = BlinkerData(name) _Data.saveData(data, now_time, bProto.dataTime) bProto.dataCount = bProto.dataCount + 1 def dataUpdate(self): if bProto.dataCount == 0: return if bProto.conType == "BLINKER_MQTT": datas = {} for name in bProto.Datas: # datas.append(bProto.Datas[name].getData()) datas[name] = bProto.Datas[name].getData() if bProto.conn1.dataUpdate(datas): for name in bProto.Datas: bProto.Datas[name].flush() return True return False else: BLINKER_ERR_LOG('This code is intended to run on the MQTT!') ## ali def attachAliGenieSetPowerState(self, _func): bProto.aliPowerSrareFunc = _func def attachAliGenieSetColor(self, _func): bProto.aliSetColorFunc = _func def attachAliGenieSetMode(self, _func): bProto.aliSetModeFunc = _func def attachAliGenieSetcMode(self, _func): bProto.aliSetcModeFunc = _func def attachAliGenieSetBrightness(self, _func): bProto.aliSetBrightFunc = _func def attachAliGenieRelativeBrightness(self, _func): bProto.aliRelateBrightFunc = _func def attachAliGenieSetColorTemperature(self, _func): bProto.aliSetColorTempFunc = _func def attachAliGenieRelativeColorTemperature(self, _func): bProto.aliRelateColorTempFunc = _func def attachAliGenieQuery(self, _func): bProto.aliQueryFunc = _func def aliPrint(self, data): bProto.conn1.aliPrint(data) ## duer def attachDuerOSSetPowerState(self, _func): bProto.duerPowerSrareFunc = _func def attachDuerOSSetColor(self, _func): bProto.duerSetColorFunc = _func def attachDuerOSSetMode(self, _func): bProto.duerSetModeFunc = _func def attachDuerOSSetcMode(self, _func): bProto.duerSetcModeFunc = _func def attachDuerOSSetBrightness(self, _func): bProto.duerSetBrightFunc = _func def attachDuerOSRelativeBrightness(self, _func): bProto.duerRelateBrightFunc = _func # def attachAliGenieSetColorTemperature(self, _func): # bProto.aliSetColorTempFunc = _func # def attachAliGenieRelativeColorTemperature(self, _func): # bProto.aliRelateColorTempFunc = _func def attachDuerOSQuery(self, _func): bProto.duerQueryFunc = _func def duerPrint(self, data): bProto.conn1.duerPrint(data) ## mi def attachMiSetPowerState(self, _func): bProto.miPowerSrareFunc = _func def attachMiSetColor(self, _func): bProto.miSetColorFunc = _func def attachMiSetMode(self, _func): bProto.miSetModeFunc = _func def attachMiSetcMode(self, _func): bProto.miSetcModeFunc = _func def attachMiSetBrightness(self, _func): bProto.miSetBrightFunc = _func def attachMiRelativeBrightness(self, _func): bProto.miRelateBrightFunc = _func def attachMiSetColorTemperature(self, _func): bProto.miSetColorTempFunc = _func def attachMiRelativeColorTemperature(self, _func): bProto.miRelateColorTempFunc = _func def attachMiQuery(self, _func): bProto.miQueryFunc = _func def miPrint(self, data): bProto.conn1.miPrint(data) Blinker = BlinkerPY() def thread_run(): if bProto.conType == "BLINKER_BLE": bProto.conn1.run() while True: Blinker.checkData() def dataStorage(): Blinker.dataStorageCallback() class BlinkerData(object): """ """ def __init__(self, name): self.name = name self.lastTime = 0 self.dataCount = 0 self.data = [] bProto.Datas[name] = self # def name(self, name) # self.name = name # bProto.Datas[name] = self def saveData(self, _data, _now, _limit): if self.dataCount : if (_now - self.lastTime) < _limit : return self.lastTime = _now dataList = [_now, _data] self.data.append(dataList) self.dataCount = self.dataCount + 1 BLINKER_LOG_ALL(self.data) def getData(self): return self.data def flush(self): self.data.clear() class BlinkerButton(object): """ """ def __init__(self, name, func=None): self.name = name self.func = func self._icon = "" self.iconClr = "" self._content = "" self._text = "" self._text1 = "" self.textClr = "" self.buttonData = {} bProto.Buttons[name] = self def icon(self, _icon): self._icon = _icon def color(self, _clr): self.iconClr = _clr def content(self, _con): self._content = str(_con) def text(self, _text1, _text2=None): self._text = str(_text1) if _text2: self._text1 = str(_text2) def textColor(self, _clr): self.textClr = _clr def attach(self, func): self.func = func def print(self, state=None): if state : self.buttonData[BLINKER_CMD_SWITCH] = state if self._icon: self.buttonData[BLINKER_CMD_ICON] = self._icon if self.iconClr: self.buttonData[BLINKER_CMD_COLOR] = self.iconClr if self._content: self.buttonData[BLINKER_CMD_CONNECTED] = self._content if self._text: self.buttonData[BLINKER_CMD_TEXT] = self._text if self._text1: self.buttonData[BLINKER_CMD_TEXT1] = self._text1 if self.textClr: self.buttonData[BLINKER_CMD_TEXTCOLOR] = self.textClr if len(self.buttonData) : # data = json.dumps(self.buttonData) # data = {self.name: self.buttonData} # Blinker._print(data) Blinker.print(self.name, self.buttonData) self.buttonData.clear() self._icon = "" self.iconClr = "" self._content = "" self._text = "" self._text1 = "" self.textClr = "" class BlinkerNumber(object): """ """ def __init__(self, name): self.name = name self._icon = "" self._color = "" self._unit = "" self._text = "" self.numberData = {} bProto.Numbers[name] = self def icon(self, _icon): self._icon = _icon def color(self, _clr): self._color = _clr def unit(self, _unit): self._unit = _unit def text(self, _text): self._text = _text def print(self, value = None): if value: self.numberData[BLINKER_CMD_VALUE] = value if self._icon: self.numberData[BLINKER_CMD_ICON] = self._icon if self._color: self.numberData[BLINKER_CMD_COLOR] = self._color if self._unit: self.numberData[BLINKER_CMD_UNIT] = self._unit if self._text: self.numberData[BLINKER_CMD_TEXT] = self._text if len(self.numberData) : # data = json.dumps(self.numberData) # data = {self.name: self.numberData} # Blinker._print(data) Blinker.print(self.name, self.numberData) self.numberData.clear() self._icon = "" self._color = "" self._unit = "" self._text = "" class BlinkerRGB(object): """ """ def __init__(self, name, func=None): self.name = name self.func = func self.rgbbrightness = 0 self.rgbData = [] self.registered = False bProto.RGB[name] = self def attach(self, func): self.func = func def brightness(self, _bright): self.rgbbrightness = _bright def print(self, r, g, b, _bright=None): self.rgbData.append(r) self.rgbData.append(g) self.rgbData.append(b) if _bright is None: self.rgbData.append(self.rgbbrightness) else: self.rgbData.append(_bright) # _print(self.rgbData) # data = {self.name: self.rgbData} # Blinker._print(data) Blinker.print(self.name, self.rgbData) class BlinkerSlider(object): """ """ def __init__(self, name, func=None): self.name = name self.func = func self.textClr = "" self.sliderData = {} bProto.Sliders[name] = self def attach(self, func): self.func = func def color(self, _clr): self.textClr = _clr def print(self, value): self.sliderData[BLINKER_CMD_VALUE] = value if self.textClr: self.sliderData[BLINKER_CMD_COLOR] = self.textClr # data = json.dumps(self.sliderData) # data = {self.name: self.sliderData} # Blinker._print(data) Blinker.print(self.name, self.sliderData) class BlinkerText(object): """ """ def __init__(self, name): self.name = name self.textData = {} bProto.Texts[name] = self def print(self, text1, text2=None): self.textData[BLINKER_CMD_TEXT] = text1 if text2: self.textData[BLINKER_CMD_TEXT1] = text2 # data = json.dumps(self.textData) # data = {self.name: self.textData} # Blinker._print(data) Blinker.print(self.name, self.textData) class BlinkerJoystick(object): """ """ def __init__(self, name, func=None): self.name = name self.func = func bProto.Joystick[name] = self def attach(self, _func): self.func = _func class BlinkerSwitch(object): """ """ def __init__(self, name=BLINKER_CMD_BUILTIN_SWITCH, func=None): self.name = name self.func = func bProto.Toggles[name] = self def attach(self, _func): self.func = _func def print(self, _state): Blinker.print(self.name, _state) BUILTIN_SWITCH = BlinkerSwitch() class BLINKERA_LIGENIE(): def __init__(self): self.payload = {} def attachPowerState(self, _func): Blinker.attachAliGenieSetPowerState(_func) def attachColor(self, _func): Blinker.attachAliGenieSetColor(_func) def attachMode(self, _func): Blinker.attachAliGenieSetMode(_func) def attachCancelMode(self, _func): Blinker.attachAliGenieSetcMode(_func) def attachBrightness(self, _func): Blinker.attachAliGenieSetBrightness(_func) def attachRelativeBrightness(self, _func): Blinker.attachAliGenieRelativeBrightness(_func) def attachColorTemperature(self, _func): Blinker.attachAliGenieSetColorTemperature(_func) def attachRelativeColorTemperature(self, _func): Blinker.attachAliGenieRelativeColorTemperature(_func) def attachQuery(self, _func): Blinker.attachAliGenieQuery(_func) def powerState(self, state, num = None): self.payload['pState'] = state if num : self.payload['num'] = num def color(self, clr): self.payload['clr'] = clr def mode(self, md): self.payload['mode'] = md def colorTemp(self, clrTemp): self.payload['colTemp'] = clrTemp def brightness(self, bright): self.payload['bright'] = bright def temp(self, tem): self.payload['temp'] = tem def humi(self, hum): self.payload['humi'] = hum def pm25(self, pm): self.payload['pm25'] = pm def print(self): BLINKER_LOG_ALL(self.payload) Blinker.aliPrint(self.payload) self.payload.clear() BlinkerAliGenie = BLINKERA_LIGENIE() class BLINKERA_DUEROS(): def __init__(self): self.payload = {} def attachPowerState(self, _func): Blinker.attachDuerOSSetPowerState(_func) def attachColor(self, _func): Blinker.attachDuerOSSetColor(_func) def attachMode(self, _func): Blinker.attachDuerOSSetMode(_func) def attachCancelMode(self, _func): Blinker.attachDuerOSSetcMode(_func) def attachBrightness(self, _func): Blinker.attachDuerOSSetBrightness(_func) def attachRelativeBrightness(self, _func): Blinker.attachDuerOSRelativeBrightness(_func) # def attachColorTemperature(self, _func): # Blinker.attachAliGenieSetColorTemperature(_func) # def attachRelativeColorTemperature(self, _func): # Blinker.attachAliGenieRelativeColorTemperature(_func) def attachQuery(self, _func): Blinker.attachDuerOSQuery(_func) def powerState(self, state, num = None): self.payload['pState'] = state if num : self.payload['num'] = num def color(self, clr): self.payload['clr'] = clr def mode(self, md): self.payload['mode'] = ['', md] # def colorTemp(self, clrTemp): # self.payload['colTemp'] = clrTemp def brightness(self, bright): self.payload['bright'] = ['', bright] def temp(self, tem): self.payload['temp'] = tem def humi(self, hum): self.payload['humi'] = hum def pm25(self, pm): self.payload['pm25'] = pm def pm10(self, pm): self.payload['pm10'] = pm def co2(self, pm): self.payload['co2'] = pm def aqi(self, pm): self.payload['aqi'] = pm def time(self, pm): self.payload['time'] = pm def print(self): BLINKER_LOG_ALL(self.payload) Blinker.duerPrint(self.payload) self.payload.clear() BlinkerDuerOS = BLINKERA_DUEROS() class BLINKERA_MIOT(): def __init__(self): self.payload = {} def attachPowerState(self, _func): Blinker.attachMiSetPowerState(_func) def attachColor(self, _func): Blinker.attachMiSetColor(_func) def attachMode(self, _func): Blinker.attachMiSetMode(_func) def attachCancelMode(self, _func): Blinker.attachMiSetcMode(_func) def attachBrightness(self, _func): Blinker.attachMiSetBrightness(_func) def attachRelativeBrightness(self, _func): Blinker.attachMiRelativeBrightness(_func) def attachColorTemperature(self, _func): Blinker.attachMiSetColorTemperature(_func) def attachRelativeColorTemperature(self, _func): Blinker.attachMiRelativeColorTemperature(_func) def attachQuery(self, _func): Blinker.attachMiQuery(_func) def powerState(self, state, num = None): self.payload['pState'] = state if num : self.payload['num'] = num def color(self, clr): self.payload['clr'] = clr def mode(self, md): self.payload['mode'] = md def colorTemp(self, clrTemp): self.payload['colTemp'] = clrTemp def brightness(self, bright): self.payload['bright'] = bright def co2(self, pm): self.payload['co2'] = pm def aqi(self, pm): self.payload['aqi'] = pm def temp(self, tem): self.payload['temp'] = tem def humi(self, hum): self.payload['humi'] = hum def pm25(self, pm): self.payload['pm25'] = pm def print(self): BLINKER_LOG_ALL(self.payload) Blinker.miPrint(self.payload) self.payload.clear() BlinkerMiot = BLINKERA_MIOT()
ch08_listing_source.py
# coding: utf-8 import BaseHTTPServer import cgi import functools import json import math import random import socket import SocketServer import time import threading import unittest import uuid import urlparse import redis def acquire_lock_with_timeout( conn, lockname, acquire_timeout=10, lock_timeout=10): identifier = str(uuid.uuid4()) #A lockname = 'lock:' + lockname lock_timeout = int(math.ceil(lock_timeout)) #D end = time.time() + acquire_timeout while time.time() < end: if conn.setnx(lockname, identifier): #B conn.expire(lockname, lock_timeout) #B return identifier elif not conn.ttl(lockname): #C conn.expire(lockname, lock_timeout) #C time.sleep(.001) return False def release_lock(conn, lockname, identifier): pipe = conn.pipeline(True) lockname = 'lock:' + lockname while True: try: pipe.watch(lockname) #A if pipe.get(lockname) == identifier: #A pipe.multi() #B pipe.delete(lockname) #B pipe.execute() #B return True #B pipe.unwatch() break except redis.exceptions.WatchError: #C pass #C return False #D CONFIGS = {} CHECKED = {} def get_config(conn, type, component, wait=1): key = 'config:%s:%s'%(type, component) if CHECKED.get(key) < time.time() - wait: #A CHECKED[key] = time.time() #B config = json.loads(conn.get(key) or '{}') #C old_config = CONFIGS.get(key) #D if config != old_config: #E CONFIGS[key] = config #F return CONFIGS.get(key) REDIS_CONNECTIONS = {} def redis_connection(component, wait=1): #A key = 'config:redis:' + component #B def wrapper(function): #C @functools.wraps(function) #D def call(*args, **kwargs): #E old_config = CONFIGS.get(key, object()) #F _config = get_config( #G config_connection, 'redis', component, wait) #G config = {} for k, v in _config.iteritems(): #L config[k.encode('utf-8')] = v #L if config != old_config: #H REDIS_CONNECTIONS[key] = redis.Redis(**config) #H return function( #I REDIS_CONNECTIONS.get(key), *args, **kwargs) #I return call #J return wrapper #K def execute_later(conn, queue, name, args): # this is just for testing purposes assert conn is args[0] t = threading.Thread(target=globals()[name], args=tuple(args)) t.setDaemon(1) t.start() # 代码清单 8-1 # <start id="create-twitter-user"/> def create_user(conn, login, name): llogin = login.lower() # 使用第 6 章定义的加锁函数尝试对小写的用户名进行加锁。 lock = acquire_lock_with_timeout(conn, 'user:' + llogin, 1) # 如果加锁不成功,那么说明给定的用户名已经被其他用户占用了。 if not lock: return None # 程序使用了一个散列来储存小写的用户名以及用户 ID 之间的映射, # 如果给定的用户名已经被映射到了某个用户 ID , # 那么程序就不会再将这个用户名分配给其他人。 if conn.hget('users:', llogin): release_lock(conn, 'user:' + llogin, lock) return None # 每个用户都有一个独一无二的 ID , # 这个 ID 是通过对计数器执行自增操作产生的。 id = conn.incr('user:id:') pipeline = conn.pipeline(True) # 在散列里面将小写的用户名映射至用户 ID 。 pipeline.hset('users:', llogin, id) # 将用户信息添加到用户对应的散列里面。 pipeline.hmset('user:%s'%id, { 'login': login, 'id': id, 'name': name, 'followers': 0, 'following': 0, 'posts': 0, 'signup': time.time(), }) pipeline.execute() # 释放之前对用户名加的锁。 release_lock(conn, 'user:' + llogin, lock) # 返回用户 ID 。 return id # <end id="create-twitter-user"/> # 代码清单 8-2 # <start id="create-twitter-status"/> def create_status(conn, uid, message, **data): pipeline = conn.pipeline(True) # 根据用户 ID 获取用户的用户名。 pipeline.hget('user:%s'%uid, 'login') # 为这条状态消息创建一个新的 ID 。 pipeline.incr('status:id:') login, id = pipeline.execute() # 在发布状态消息之前,先检查用户的账号是否存在。 if not login: return None # 准备并设置状态消息的各项信息。 data.update({ 'message': message, 'posted': time.time(), 'id': id, 'uid': uid, 'login': login, }) pipeline.hmset('status:%s'%id, data) # 更新用户的已发送状态消息数量。 pipeline.hincrby('user:%s'%uid, 'posts') pipeline.execute() # 返回新创建的状态消息的 ID 。 return id # <end id="create-twitter-status"/> # 代码清单 8-3 # <start id="fetch-page"/> # 函数接受三个可选参数, # 它们分别用于指定函数要获取哪条时间线、要获取多少页时间线、以及每页要有多少条状态消息。 def get_status_messages(conn, uid, timeline='home:', page=1, count=30): # 获取时间线上面最新的状态消息的 ID 。 statuses = conn.zrevrange( '%s%s'%(timeline, uid), (page-1)*count, page*count-1) pipeline = conn.pipeline(True) # 获取状态消息本身。 for id in statuses: pipeline.hgetall('status:%s'%id) # 使用过滤器移除那些已经被删除了的状态消息。 return filter(None, pipeline.execute()) # <end id="fetch-page"/> # 代码清单 8-4 # <start id="follow-user"/> HOME_TIMELINE_SIZE = 1000 def follow_user(conn, uid, other_uid): # 把正在关注有序集合以及关注者有序集合的键名缓存起来。 fkey1 = 'following:%s'%uid fkey2 = 'followers:%s'%other_uid # 如果 uid 指定的用户已经关注了 other_uid 指定的用户,那么函数直接返回。 if conn.zscore(fkey1, other_uid): return None now = time.time() pipeline = conn.pipeline(True) # 将两个用户的 ID 分别添加到相应的正在关注有序集合以及关注者有序集合里面。 pipeline.zadd(fkey1, other_uid, now) pipeline.zadd(fkey2, uid, now) # 从被关注用户的个人时间线里面获取 HOME_TIMELINE_SIZE 条最新的状态消息。 pipeline.zrevrange('profile:%s'%other_uid, 0, HOME_TIMELINE_SIZE-1, withscores=True) following, followers, status_and_score = pipeline.execute()[-3:] # 修改两个用户的散列,更新他们各自的正在关注数量以及关注者数量。 pipeline.hincrby('user:%s'%uid, 'following', int(following)) pipeline.hincrby('user:%s'%other_uid, 'followers', int(followers)) if status_and_score: # 对执行关注操作的用户的定制时间线进行更新,并保留时间线上面的最新 1000 条状态消息。 pipeline.zadd('home:%s'%uid, **dict(status_and_score)) pipeline.zremrangebyrank('home:%s'%uid, 0, -HOME_TIMELINE_SIZE-1) pipeline.execute() # 返回 True 表示关注操作已经成功执行。 return True # <end id="follow-user"/> # 代码清单 8-5 # <start id="unfollow-user"/> def unfollow_user(conn, uid, other_uid): # 把正在关注有序集合以及关注者有序集合的键名缓存起来。 fkey1 = 'following:%s'%uid fkey2 = 'followers:%s'%other_uid # 如果 uid 指定的用户并未关注 other_uid 指定的用户,那么函数直接返回。 if not conn.zscore(fkey1, other_uid): return None pipeline = conn.pipeline(True) # 从正在关注有序集合以及关注者有序集合里面移除双方的用户 ID 。 pipeline.zrem(fkey1, other_uid) pipeline.zrem(fkey2, uid) # 获取被取消关注的用户最近发布的 HOME_TIMELINE_SIZE 条状态消息。 pipeline.zrevrange('profile:%s'%other_uid, 0, HOME_TIMELINE_SIZE-1) following, followers, statuses = pipeline.execute()[-3:] # 对用户信息散列里面的正在关注数量以及关注者数量进行更新。 pipeline.hincrby('user:%s'%uid, 'following', int(following)) pipeline.hincrby('user:%s'%other_uid, 'followers', int(followers)) if statuses: # 对执行取消关注操作的用户的定制时间线进行更新, # 移除被取消关注的用户发布的所有状态消息。 pipeline.zrem('home:%s'%uid, *statuses) pipeline.execute() # 返回 True 表示取消关注操作执行成功。 return True # <end id="unfollow-user"/> # <start id="exercise-refilling-timelines"/> REFILL_USERS_STEP = 50 def refill_timeline(conn, incoming, timeline, start=0): if not start and conn.zcard(timeline) >= 750: # 如果时间线已经被填满了 3/4 或以上 return # 那么不对它进行重新填充 users = conn.zrangebyscore(incoming, start, 'inf', # 获取一组用户,这些用户发布的消息将被用于填充时间线 start=0, num=REFILL_USERS_STEP, withscores=True) # pipeline = conn.pipeline(False) for uid, start in users: pipeline.zrevrange('profile:%s'%uid, # 从正在关注的人哪里获取最新的状态消息 0, HOME_TIMELINE_SIZE-1, withscores=True) # messages = [] for results in pipeline.execute(): messages.extend(results) # 将取得的所有状态消息放到一起 messages.sort(key=lambda x:-x[1]) # 根据发布时间对取得的所有状态消息进行排序, del messages[HOME_TIMELINE_SIZE:] # 并保留其中最新的 100 条状态消息 pipeline = conn.pipeline(True) if messages: pipeline.zadd(timeline, **dict(messages)) # 将挑选出的状态消息添加到用户的主页时间线上面 pipeline.zremrangebyrank( # 对时间线进行修剪,只保留最新的 100 条状态消息 timeline, 0, -HOME_TIMELINE_SIZE-1) # pipeline.execute() if len(users) >= REFILL_USERS_STEP: execute_later(conn, 'default', 'refill_timeline', # 如果还要其他用户的时间线需要进行重新填充, [conn, incoming, timeline, start]) # 那么继续执行这个动作 # <end id="exercise-refilling-timelines"/> # <start id="exercise-follow-user-list"/> def follow_user_list(conn, uid, other_uid, list_id): fkey1 = 'list:in:%s'%list_id # 把相关的键名缓存起来 fkey2 = 'list:out:%s'%other_uid # timeline = 'list:statuses:%s'%list_id # if conn.zscore(fkey1, other_uid): # 如果 other_uid 已经包含在列表里面, return None # 那么直接返回 now = time.time() pipeline = conn.pipeline(True) pipeline.zadd(fkey1, other_uid, now) # 将各个用户ID添加到相应的有序集合里面 pipeline.zadd(fkey2, list_id, now) # pipeline.zcard(fkey1) # 获取有序集合的大小 pipeline.zrevrange('profile:%s'%other_uid, # 从用户的个人时间线里面获取最新的状态消息 0, HOME_TIMELINE_SIZE-1, withscores=True) # following, status_and_score = pipeline.execute()[-2:] pipeline.hset('list:%s'%list_id, 'following', following) # 对存储列表信息的散列进行更新,将列表的新大小记录到散列里面 pipeline.zadd(timeline, **dict(status_and_score)) # 对列表的状态消息进行更新 pipeline.zremrangebyrank(timeline, 0, -HOME_TIMELINE_SIZE-1)# pipeline.execute() return True # 返回 True 值,表示用户已经被添加到列表里面 # <end id="exercise-follow-user"/> # <start id="exercise-unfollow-user-list"/> def unfollow_user_list(conn, uid, other_uid, list_id): fkey1 = 'list:in:%s'%list_id # 把相关的键名缓存起来 fkey2 = 'list:out:%s'%other_uid # timeline = 'list:statuses:%s'%list_id # if not conn.zscore(fkey1, other_uid): # 如果用户并未关注 other_uid , return None # 那么直接返回 pipeline = conn.pipeline(True) pipeline.zrem(fkey1, other_uid) # 从相应的有序集合里面移除各个用户ID pipeline.zrem(fkey2, list_id) # pipeline.zcard(fkey1) # 获取有序集合的大小 pipeline.zrevrange('profile:%s'%other_uid, # 从被取消关注的用户那里获取他最新发布的状态消息 0, HOME_TIMELINE_SIZE-1) # following, statuses = pipeline.execute()[-2:] pipeline.hset('list:%s'%list_id, 'following', following) # 对存储列表信息的散列进行更新,将列表的新大小记录到散列里面 if statuses: pipeline.zrem(timeline, *statuses) # 从时间线里面移除被取消关注的用户所发布的状态消息 refill_timeline(fkey1, timeline) # 重新填充时间线 pipeline.execute() return True # 返回 True 值,表示用户已经被取消关注 # <end id="exercise-unfollow-user-list"/> # <start id="exercise-create-user-list"/> def create_user_list(conn, uid, name): pipeline = conn.pipeline(True) pipeline.hget('user:%s'%uid, 'login') # 获取创建列表的用户的用户名 pipeline.incr('list:id:') # 生成一个新的列表ID login, id = pipeline.execute() if not login: # 如果用户不存在,那么直接返回 return None # now = time.time() pipeline = conn.pipeline(True) pipeline.zadd('lists:%s'%uid, **{id: now}) # 将新创建的列表添加到用户已经创建了的有序集合里面 pipeline.hmset('list:%s'%id, { # 创建记录列表信息的散列 'name': name, # 'id': id, # 'uid': uid, # 'login': login, # 'following': 0, # 'created': now, # }) pipeline.execute() return id # 返回新列表的ID # <end id="exercise-create-user-list"/> # 代码清单 8-6 # <start id="post-message"/> def post_status(conn, uid, message, **data): # 使用之前介绍过的函数来创建一条新的状态消息。 id = create_status(conn, uid, message, **data) # 如果创建状态消息失败,那么直接返回。 if not id: return None # 获取消息的发布时间。 posted = conn.hget('status:%s'%id, 'posted') # 如果程序未能顺利地获取消息的发布时间,那么直接返回。 if not posted: return None post = {str(id): float(posted)} # 将状态消息添加到用户的个人时间线里面。 conn.zadd('profile:%s'%uid, **post) # 将状态消息推送给用户的关注者。 syndicate_status(conn, uid, post) return id # <end id="post-message"/> # 代码清单 8-7 # <start id="syndicate-message"/> # 函数每次被调用时,最多只会将状态消息发送给一千个关注者。 POSTS_PER_PASS = 1000 def syndicate_status(conn, uid, post, start=0): # 以上次被更新的最后一个关注者为起点,获取接下来的一千个关注者。 followers = conn.zrangebyscore('followers:%s'%uid, start, 'inf', start=0, num=POSTS_PER_PASS, withscores=True) pipeline = conn.pipeline(False) # 在遍历关注者的同时, # 对 start 变量的值进行更新, # 这个变量可以在有需要的时候传递给下一个 syndicate_status() 调用。 for follower, start in followers: # 将状态消息添加到所有被获取的关注者的定制时间线里面, # 并在有需要的时候对关注者的定制时间线进行修剪, # 防止它超过限定的最大长度。 pipeline.zadd('home:%s'%follower, **post) pipeline.zremrangebyrank( 'home:%s'%follower, 0, -HOME_TIMELINE_SIZE-1) pipeline.execute() # 如果需要更新的关注者数量超过一千人, # 那么在延迟任务里面继续执行剩余的更新操作。 if len(followers) >= POSTS_PER_PASS: execute_later(conn, 'default', 'syndicate_status', [conn, uid, post, start]) # <end id="syndicate-message"/> # <start id="syndicate-message-list"/> def syndicate_status_list(conn, uid, post, start=0, on_lists=False): key = 'followers:%s'%uid # 根据操作的处理进度(depending on how far along we are), base = 'home:%s' # 选择对主页时间线还是对用户时间线进行操作 if on_lists: # key = 'list:out:%s'%uid # base = 'list:statuses:%s' # followers = conn.zrangebyscore(key, start, 'inf', # 从上次更新时的最后一个用户或者列表作为起点, start=0, num=POSTS_PER_PASS, withscores=True) # 获取下一组用户或者列表(数量为 1000 个) pipeline = conn.pipeline(False) for follower, start in followers: # 将状态消息添加到所有已获取关注者的主页时间线里面 pipeline.zadd(base%follower, **post) # pipeline.zremrangebyrank( # base%follower, 0, -HOME_TIMELINE_SIZE-1) # pipeline.execute() if len(followers) >= POSTS_PER_PASS: # 如果已经对至少 1000 个用户进行了更新, execute_later(conn, 'default', 'syndicate_status', # 那么将后续的更新操作留到下次再进行 [conn, uid, post, start, on_lists]) # elif not on_lists: execute_later(conn, 'default', 'syndicate_status', # 如果针对列表的操作并未完成,那么对列表进行操作 [conn, uid, post, 0, True]) # 如果操作只是对主页时间线执行的话,那么程序无需执行这一步 # <end id="syndicate-message-list"/> # 代码清单 8-8 # <start id="delete-message"/> def delete_status(conn, uid, status_id): key = 'status:%s'%status_id # 对指定的状态消息进行加锁,防止两个程序同时删除同一条状态消息的情况出现。 lock = acquire_lock_with_timeout(conn, key, 1) # 如果加锁失败,那么直接返回。 if not lock: return None # 如果 uid 指定的用户并非状态消息的发布人,那么函数直接返回。 if conn.hget(key, 'uid') != str(uid): release_lock(conn, key, lock) return None pipeline = conn.pipeline(True) # 删除指定的状态消息。 pipeline.delete(key) # 从用户的个人时间线里面移除指定的状态消息 ID 。 pipeline.zrem('profile:%s'%uid, status_id) # 从用户的定制时间线里面移除指定的状态消息 ID 。 pipeline.zrem('home:%s'%uid, status_id) # 对储存着用户信息的散列进行更新,减少已发布状态消息的数量。 pipeline.hincrby('user:%s'%uid, 'posts', -1) pipeline.execute() release_lock(conn, key, lock) return True # <end id="delete-message"/> # <start id="exercise-clean-out-timelines"/> def clean_timelines(conn, uid, status_id, start=0, on_lists=False): key = 'followers:%s'%uid # 根据操作的处理进度, base = 'home:%s' # 选择对主页时间线还是对用户时间线进行操作 if on_lists: # key = 'list:out:%s'%uid # base = 'list:statuses:%s' # followers = conn.zrangebyscore(key, start, 'inf', # 从上次更新时的最后一个用户或者列表作为起点, start=0, num=POSTS_PER_PASS, withscores=True) # 获取下一组用户或者列表(数量为 1000 个) pipeline = conn.pipeline(False) for follower, start in followers: # 从所有已获取的关注者的主页时间线上面, pipeline.zrem(base%follower, status_id) # 移除指定的状态消息 pipeline.execute() if len(followers) >= POSTS_PER_PASS: # 如果本次更新已经处理了至少 1000 个关注者, execute_later(conn, 'default', 'clean_timelines' , # 那么将后续的工作留到下次再执行 [conn, uid, status_id, start, on_lists]) # elif not on_lists: execute_later(conn, 'default', 'clean_timelines', # 如果针对列表的操作并未完成,那么对列表进行操作 [conn, uid, status_id, 0, True]) # 如果操作只是对主页时间线执行的话,那么程序无需执行这一步 # <end id="exercise-clean-out-timelines"/> # 代码清单 8-9 # <start id="streaming-http-server"/> # 创建一个名为 StreamingAPIServer 的类。 class StreamingAPIServer( # 这个类是一个 HTTP 服务器, # 并且它具有为每个请求创建一个新线程的能力。 SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): # 让线程服务器内部组件在主服务器线程死亡(die)之后, # 关闭所有客户端请求线程。 daemon_threads = True # 创建一个名为 StreamingAPIRequestHandler 的类。 class StreamingAPIRequestHandler( # 这个新创建的类可以用于处理 HTTP 请求。 BaseHTTPServer.BaseHTTPRequestHandler): # 创建一个名为 do_GET() 的方法,用于处理服务器接收到的 GET 请求。 def do_GET(self): # 调用辅助函数,获取客户端标识符。 parse_identifier(self) # 如果这个 GET 请求访问的不是 sample 流或者 firehose 流, # 那么返回“404 页面未找到”错误。 if self.path != '/statuses/sample.json': return self.send_error(404) # 如果一切顺利,那么调用辅助函数,执行实际的过滤工作。 process_filters(self) # 创建一个名为 do_POST() 的方法,用于处理服务器接收到的 POST 请求。 def do_POST(self): # 调用辅助函数,获取客户端标识符。 parse_identifier(self) # 如果这个 POST 请求访问的不是用户过滤器、关键字过滤器或者位置过滤器, # 那么返回“404 页面未找到”错误。 if self.path != '/statuses/filter.json': return self.send_error(404) # 如果一切顺利,那么调用辅助函数,执行实际的过滤工作。 process_filters(self) # <end id="streaming-http-server"/> # 代码清单 8-11 # <start id="get-identifier"/> def parse_identifier(handler): # 将标识符和查询参数设置为预留值。 handler.identifier = None handler.query = {} # 如果请求里面包含了查询参数,那么处理这些参数。 if '?' in handler.path: # 取出路径里面包含查询参数的部分,并对路径进行更新。 handler.path, _, query = handler.path.partition('?') # 通过语法分析得出查询参数。 handler.query = urlparse.parse_qs(query) # 获取名为 identifier 的查询参数列表。 identifier = handler.query.get('identifier') or [None] # 使用第一个传入的标识符。 handler.identifier = identifier[0] # <end id="get-identifier"/> # 代码清单 8-12 # <start id="stream-to-client"/> # 把需要传入参数的过滤器都放到一个列表里面。 FILTERS = ('track', 'filter', 'location') def process_filters(handler): id = handler.identifier # 如果客户端没有提供标识符,那么返回一个错误。 if not id: return handler.send_error(401, "identifier missing") # 获取客户端指定的方法, # 结果应该是 sample (随机消息)或者 filter (过滤器)这两种的其中一种。 method = handler.path.rsplit('/')[-1].split('.')[0] name = None args = None # 如果客户端指定的是过滤器方法,那么程序需要获取相应的过滤参数。 if method == 'filter': # 对 POST 请求进行语法分析,从而获知过滤器的类型以及参数。 data = cgi.FieldStorage( fp=handler.rfile, headers=handler.headers, environ={'REQUEST_METHOD':'POST', 'CONTENT_TYPE':handler.headers['Content-Type'], }) # 找到客户端在请求中指定的过滤器。 for name in data: if name in FILTERS: args = data.getfirst(name).lower().split(',') break # 如果客户端没有指定任何过滤器,那么返回一个错误。 if not args: return handler.send_error(401, "no filter provided") else: # 如果客户端指定的是随机消息请求,那么将查询参数用作 args 变量的值。 args = handler.query # 最后,向客户端返回一个回复, # 告知客户端,服务器接下来将向它发送流回复。 handler.send_response(200) handler.send_header('Transfer-Encoding', 'chunked') handler.end_headers() # 使用 Python 列表来做引用传递(pass-by-reference)变量的占位符, # 用户可以通过这个变量来让内容过滤器停止接收消息。 quit = [False] # 对过滤结果进行迭代。 for item in filter_content(id, method, name, args, quit): try: # 使用分块传输编码向客户端发送经过预编码后(pre-encoded)的回复。 handler.wfile.write('%X\r\n%s\r\n'%(len(item), item)) # 如果发送操作引发了错误,那么让订阅者停止订阅并关闭自身。 except socket.error: quit[0] = True if not quit[0]: # 如果服务器与客户端的连接并未断开, # 那么向客户端发送表示“分块到此结束”的消息。 handler.wfile.write('0\r\n\r\n') # <end id="stream-to-client"/> _create_status = create_status # 代码清单 8-13 # <start id="create-message-streaming"/> def create_status(conn, uid, message, **data): pipeline = conn.pipeline(True) pipeline.hget('user:%s'%uid, 'login') pipeline.incr('status:id:') login, id = pipeline.execute() if not login: return None data.update({ 'message': message, 'posted': time.time(), 'id': id, 'uid': uid, 'login': login, }) pipeline.hmset('status:%s'%id, data) pipeline.hincrby('user:%s'%uid, 'posts') # 新添加的这一行代码用于向流过滤器发送消息。 pipeline.publish('streaming:status:', json.dumps(data)) pipeline.execute() return id # <end id="create-message-streaming"/> _delete_status = delete_status # 代码清单 8-14 # <start id="delete-message-streaming"/> def delete_status(conn, uid, status_id): key = 'status:%s'%status_id lock = acquire_lock_with_timeout(conn, key, 1) if not lock: return None if conn.hget(key, 'uid') != str(uid): release_lock(conn, key, lock) return None pipeline = conn.pipeline(True) # 获取状态消息, # 以便流过滤器可以通过执行相同的过滤器来判断是否需要将被删除的消息传递给客户端。 status = conn.hgetall(key) # 将状态消息标记为“已被删除”。 status['deleted'] = True # 将已被删除的状态消息发送到流里面。 pipeline.publish('streaming:status:', json.dumps(status)) pipeline.delete(key) pipeline.zrem('profile:%s'%uid, status_id) pipeline.zrem('home:%s'%uid, status_id) pipeline.hincrby('user:%s'%uid, 'posts', -1) pipeline.execute() release_lock(conn, key, lock) return True # <end id="delete-message-streaming"/> # 代码清单 8-15 # <start id="message-subscription"/> # 使用第 5 章介绍的自动连接装饰器。 @redis_connection('social-network') def filter_content(conn, id, method, name, args, quit): # 创建一个过滤器,让它来判断是否应该将消息发送给客户端。 match = create_filters(id, method, name, args) # 执行订阅前的准备工作。 pubsub = conn.pubsub() pubsub.subscribe(['streaming:status:']) # 通过订阅来获取消息。 for item in pubsub.listen(): # 从订阅结构中取出状态消息。 message = item['data'] decoded = json.loads(message) # 检查状态消息是否与过滤器相匹配。 if match(decoded): # 在发送被删除的消息之前, # 先给消息添加一个特殊的“已被删除”占位符。 if decoded.get('deleted'): yield json.dumps({ 'id': decoded['id'], 'deleted': True}) else: # 对于未被删除的消息,程序直接发送消息本身。 yield message # 如果服务器与客户端之间的连接已经断开,那么停止过滤消息。 if quit[0]: break # 重置 Redis 连接, # 清空因为连接速度不够快而滞留在 Redis 服务器输出缓冲区里面的数据。 pubsub.reset() # <end id="message-subscription"/> # 代码清单 8-16 # <start id="create-filters"/> def create_filters(id, method, name, args): # sample 方法不需要用到 name 参数, # 只需要给定 id 参数和 args 参数即可。 if method == 'sample': return SampleFilter(id, args) elif name == 'track': # filter 方法需要创建并返回用户指定的过滤器。 return TrackFilter(args) # elif name == 'follow': # return FollowFilter(args) # elif name == 'location': # return LocationFilter(args) # # 如果没有任何过滤器被选中,那么引发一个异常。 raise Exception("Unknown filter") # <end id="create-filters"/> # 代码清单 8-17 # <start id="sample-filter"/> # 定义一个 SampleFilter 函数,它接受 id 和 args 两个参数。 def SampleFilter(id, args): # args 参数是一个字典,它来源于 GET 请求传递的参数。 percent = int(args.get('percent', ['10'])[0], 10) # 使用 id 参数来随机地选择其中一部分消息 ID , # 被选中 ID 的数量由传入的 percent 参数决定。 ids = range(100) shuffler = random.Random(id) shuffler.shuffle(ids) # 使用 Python 集合来快速地判断给定的状态消息是否符合过滤器的标准。 keep = set(ids[:max(percent, 1)]) # 创建并返回一个闭包函数, # 这个函数就是被创建出来的随机取样消息过滤器。 def check(status): # 为了对状态消息进行过滤, # 程序会获取给定状态消息的 ID , # 并将 ID 的值取模 100 , # 然后通过检查取模结果是否存在于 keep 集合来判断给定的状态消息是否符合过滤器的标准。 return (status['id'] % 100) in keep return check # <end id="sample-filter"/> # 代码清单 8-18 # <start id="track-filter"/> def TrackFilter(list_of_strings): # 函数接受一个由词组构成的列表为参数, # 如果一条状态消息包含某个词组里面的所有单词, # 那么这条消息就与过滤器相匹配。 groups = [] for group in list_of_strings: group = set(group.lower().split()) if group: # 每个词组至少需要包含一个单词。 groups.append(group) def check(status): # 以空格为分隔符,从消息里面分割出多个单词。 message_words = set(status['message'].lower().split()) # 遍历所有词组。 for group in groups: # 如果某个词组的所有单词都在消息里面出现了, # 那么过滤器将接受(accept)这条消息。 if len(group & message_words) == len(group): return True return False return check # <end id="track-filter"/> # 代码清单 8-19 # <start id="follow-filter"/> def FollowFilter(names): # 过滤器会根据给定的用户名,对消息内容以及消息的发送者进行匹配。 nset = set() # 以“@用户名”的形式储存所有给定用户的名字。 for name in names: nset.add('@' + name.lower().lstrip('@')) def check(status): # 根据消息内容以及消息发布者的名字,构建一个由空格分割的词组。 message_words = set(status['message'].lower().split()) message_words.add('@' + status['login'].lower()) # 如果给定的用户名与词组中的某个词语相同, # 那么这条消息与过滤器相匹配。 return message_words & nset return check # <end id="follow-filter"/> # 代码清单 8-20 # <start id="location-filter"/> def LocationFilter(list_of_boxes): # 创建一个区域集合,这个集合定义了过滤器接受的消息来自于哪些区域。 boxes = [] for start in xrange(0, len(list_of_boxes)-3, 4): boxes.append(map(float, list_of_boxes[start:start+4])) def check(self, status): # 尝试从状态消息里面取出位置数据。 location = status.get('location') # 如果消息未包含任何位置数据, # 那么这条消息不在任何区域的范围之内。 if not location: return False # 如果消息包含位置数据,那么取出纬度和经度。 lat, lon = map(float, location.split(',')) # 遍历所有区域,尝试进行匹配。 for box in self.boxes: # 如果状态消息的位置在给定区域的经纬度范围之内, # 那么这条状态消息与过滤器相匹配。 if (box[1] <= lat <= box[3] and box[0] <= lon <= box[2]): return True return False return check # <end id="location-filter"/> _filter_content = filter_content def filter_content(identifier, method, name, args, quit): print "got:", identifier, method, name, args for i in xrange(10): yield json.dumps({'id':i}) if quit[0]: break time.sleep(.1) ''' # <start id="start-http-server"/> if __name__ == '__main__': # 如果这个模块是以命令行方式运行的,那么执行下方的代码块 server = StreamingAPIServer( # 创建一个流API服务器实例,并让它监视本地主机的 8080 端口, ('localhost', 8080), StreamingAPIRequestHandler)# 然后使用 StreamingAPIRequestHandler 去处理请求 print 'Starting server, use <Ctrl-C> to stop' # 打印信息行 server.serve_forever() # 一直运行,直到这个进程被杀死为止 # <end id="start-http-server"/> ''' class TestCh08(unittest.TestCase): def setUp(self): self.conn = redis.Redis(db=15) self.conn.flushdb() def tearDown(self): self.conn.flushdb() def test_create_user_and_status(self): self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1) self.assertEquals(create_user(self.conn, 'TestUser', 'Test User2'), None) self.assertEquals(create_status(self.conn, 1, "This is a new status message"), 1) self.assertEquals(self.conn.hget('user:1', 'posts'), '1') def test_follow_unfollow_user(self): self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1) self.assertEquals(create_user(self.conn, 'TestUser2', 'Test User2'), 2) self.assertTrue(follow_user(self.conn, 1, 2)) self.assertEquals(self.conn.zcard('followers:2'), 1) self.assertEquals(self.conn.zcard('followers:1'), 0) self.assertEquals(self.conn.zcard('following:1'), 1) self.assertEquals(self.conn.zcard('following:2'), 0) self.assertEquals(self.conn.hget('user:1', 'following'), '1') self.assertEquals(self.conn.hget('user:2', 'following'), '0') self.assertEquals(self.conn.hget('user:1', 'followers'), '0') self.assertEquals(self.conn.hget('user:2', 'followers'), '1') self.assertEquals(unfollow_user(self.conn, 2, 1), None) self.assertEquals(unfollow_user(self.conn, 1, 2), True) self.assertEquals(self.conn.zcard('followers:2'), 0) self.assertEquals(self.conn.zcard('followers:1'), 0) self.assertEquals(self.conn.zcard('following:1'), 0) self.assertEquals(self.conn.zcard('following:2'), 0) self.assertEquals(self.conn.hget('user:1', 'following'), '0') self.assertEquals(self.conn.hget('user:2', 'following'), '0') self.assertEquals(self.conn.hget('user:1', 'followers'), '0') self.assertEquals(self.conn.hget('user:2', 'followers'), '0') def test_syndicate_status(self): self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1) self.assertEquals(create_user(self.conn, 'TestUser2', 'Test User2'), 2) self.assertTrue(follow_user(self.conn, 1, 2)) self.assertEquals(self.conn.zcard('followers:2'), 1) self.assertEquals(self.conn.hget('user:1', 'following'), '1') self.assertEquals(post_status(self.conn, 2, 'this is some message content'), 1) self.assertEquals(len(get_status_messages(self.conn, 1)), 1) for i in xrange(3, 11): self.assertEquals(create_user(self.conn, 'TestUser%s'%i, 'Test User%s'%i), i) follow_user(self.conn, i, 2) global POSTS_PER_PASS POSTS_PER_PASS = 5 self.assertEquals(post_status(self.conn, 2, 'this is some other message content'), 2) time.sleep(.1) self.assertEquals(len(get_status_messages(self.conn, 9)), 2) self.assertTrue(unfollow_user(self.conn, 1, 2)) self.assertEquals(len(get_status_messages(self.conn, 1)), 0) def test_refill_timeline(self): self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1) self.assertEquals(create_user(self.conn, 'TestUser2', 'Test User2'), 2) self.assertEquals(create_user(self.conn, 'TestUser3', 'Test User3'), 3) self.assertTrue(follow_user(self.conn, 1, 2)) self.assertTrue(follow_user(self.conn, 1, 3)) global HOME_TIMELINE_SIZE HOME_TIMELINE_SIZE = 5 for i in xrange(10): self.assertTrue(post_status(self.conn, 2, 'message')) self.assertTrue(post_status(self.conn, 3, 'message')) time.sleep(.05) self.assertEquals(len(get_status_messages(self.conn, 1)), 5) self.assertTrue(unfollow_user(self.conn, 1, 2)) self.assertTrue(len(get_status_messages(self.conn, 1)) < 5) refill_timeline(self.conn, 'following:1', 'home:1') messages = get_status_messages(self.conn, 1) self.assertEquals(len(messages), 5) for msg in messages: self.assertEquals(msg['uid'], '3') delete_status(self.conn, '3', messages[-1]['id']) self.assertEquals(len(get_status_messages(self.conn, 1)), 4) self.assertEquals(self.conn.zcard('home:1'), 5) clean_timelines(self.conn, '3', messages[-1]['id']) self.assertEquals(self.conn.zcard('home:1'), 4) if __name__ == '__main__': unittest.main()
train.py
""" Learning Deep Generative Models of Graphs Paper: https://arxiv.org/pdf/1803.03324.pdf """ import datetime import time import torch import torch.distributed as dist from dgl import model_zoo from torch.optim import Adam from torch.utils.data import DataLoader from utils import MoleculeDataset, Printer, set_random_seed, synchronize, launch_a_process def evaluate(epoch, model, data_loader, printer): model.eval() batch_size = data_loader.batch_size total_log_prob = 0 with torch.no_grad(): for i, data in enumerate(data_loader): log_prob = model(actions=data, compute_log_prob=True).detach() total_log_prob -= log_prob if printer is not None: prob = log_prob.detach().exp() printer.update(epoch + 1, - log_prob / batch_size, prob / batch_size) return total_log_prob / len(data_loader) def main(rank, args): """ Parameters ---------- rank : int Subprocess id args : dict Configuration """ if rank == 0: t1 = time.time() set_random_seed(args['seed']) # Remove the line below will result in problems for multiprocess torch.set_num_threads(1) # Setup dataset and data loader dataset = MoleculeDataset(args['dataset'], args['order'], ['train', 'val'], subset_id=rank, n_subsets=args['num_processes']) # Note that currently the batch size for the loaders should only be 1. train_loader = DataLoader(dataset.train_set, batch_size=args['batch_size'], shuffle=True, collate_fn=dataset.collate) val_loader = DataLoader(dataset.val_set, batch_size=args['batch_size'], shuffle=True, collate_fn=dataset.collate) if rank == 0: try: from tensorboardX import SummaryWriter writer = SummaryWriter(args['log_dir']) except ImportError: print('If you want to use tensorboard, install tensorboardX with pip.') writer = None train_printer = Printer(args['nepochs'], len(dataset.train_set), args['batch_size'], writer) val_printer = Printer(args['nepochs'], len(dataset.val_set), args['batch_size']) else: val_printer = None # Initialize model model = model_zoo.chem.DGMG(atom_types=dataset.atom_types, bond_types=dataset.bond_types, node_hidden_size=args['node_hidden_size'], num_prop_rounds=args['num_propagation_rounds'], dropout=args['dropout']) if args['num_processes'] == 1: from utils import Optimizer optimizer = Optimizer(args['lr'], Adam(model.parameters(), lr=args['lr'])) else: from utils import MultiProcessOptimizer optimizer = MultiProcessOptimizer(args['num_processes'], args['lr'], Adam(model.parameters(), lr=args['lr'])) if rank == 0: t2 = time.time() best_val_prob = 0 # Training for epoch in range(args['nepochs']): model.train() if rank == 0: print('Training') for i, data in enumerate(train_loader): log_prob = model(actions=data, compute_log_prob=True) prob = log_prob.detach().exp() loss_averaged = - log_prob prob_averaged = prob optimizer.backward_and_step(loss_averaged) if rank == 0: train_printer.update(epoch + 1, loss_averaged.item(), prob_averaged.item()) synchronize(args['num_processes']) # Validation val_log_prob = evaluate(epoch, model, val_loader, val_printer) if args['num_processes'] > 1: dist.all_reduce(val_log_prob, op=dist.ReduceOp.SUM) val_log_prob /= args['num_processes'] # Strictly speaking, the computation of probability here is different from what is # performed on the training set as we first take an average of log likelihood and then # take the exponentiation. By Jensen's inequality, the resulting value is then a # lower bound of the real probabilities. val_prob = (- val_log_prob).exp().item() val_log_prob = val_log_prob.item() if val_prob >= best_val_prob: if rank == 0: torch.save({'model_state_dict': model.state_dict()}, args['checkpoint_dir']) print('Old val prob {:.10f} | new val prob {:.10f} | model saved'.format(best_val_prob, val_prob)) best_val_prob = val_prob elif epoch >= args['warmup_epochs']: optimizer.decay_lr() if rank == 0: print('Validation') if writer is not None: writer.add_scalar('validation_log_prob', val_log_prob, epoch) writer.add_scalar('validation_prob', val_prob, epoch) writer.add_scalar('lr', optimizer.lr, epoch) print('Validation log prob {:.4f} | prob {:.10f}'.format(val_log_prob, val_prob)) synchronize(args['num_processes']) if rank == 0: t3 = time.time() print('It took {} to setup.'.format(datetime.timedelta(seconds=t2 - t1))) print('It took {} to finish training.'.format(datetime.timedelta(seconds=t3 - t2))) print('--------------------------------------------------------------------------') print('On average, an epoch takes {}.'.format(datetime.timedelta( seconds=(t3 - t2) / args['nepochs']))) if __name__ == '__main__': import argparse from utils import setup parser = argparse.ArgumentParser(description='Training DGMG for molecule generation', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # configure parser.add_argument('-s', '--seed', type=int, default=0, help='random seed') parser.add_argument('-w', '--warmup-epochs', type=int, default=10, help='Number of epochs where no lr decay is performed.') # dataset and setting parser.add_argument('-d', '--dataset', help='dataset to use') parser.add_argument('-o', '--order', choices=['random', 'canonical'], help='order to generate graphs') parser.add_argument('-tf', '--train-file', type=str, default=None, help='Path to a file with one SMILES a line for training data. ' 'This is only necessary if you want to use a new dataset.') parser.add_argument('-vf', '--val-file', type=str, default=None, help='Path to a file with one SMILES a line for validation data. ' 'This is only necessary if you want to use a new dataset.') # log parser.add_argument('-l', '--log-dir', default='./training_results', help='folder to save info like experiment configuration') # multi-process parser.add_argument('-np', '--num-processes', type=int, default=32, help='number of processes to use') parser.add_argument('-mi', '--master-ip', type=str, default='127.0.0.1') parser.add_argument('-mp', '--master-port', type=str, default='12345') args = parser.parse_args() args = setup(args, train=True) if args['num_processes'] == 1: main(0, args) else: mp = torch.multiprocessing.get_context('spawn') procs = [] for rank in range(args['num_processes']): procs.append(mp.Process(target=launch_a_process, args=(rank, args, main), daemon=True)) procs[-1].start() for p in procs: p.join()
measurementcontroller.py
import datetime import glob import logging import shutil import threading import time from enum import Enum import os from flask import json from flask_restful import fields from flask_restful import marshal from core.interface import EnumField, DATETIME_FORMAT MEASUREMENT_TIMES_CLASH = "Measurement times clash" targetStateFields = { 'fs': fields.Integer, 'accelerometerSens': fields.Integer, 'gyroSens': fields.Integer, 'accelerometerEnabled': fields.Boolean, 'gyroEnabled': fields.Boolean } measurementFields = { 'id': fields.String, 'name': fields.String, 'startTime': fields.String(attribute=lambda x: x.startTime.strftime(DATETIME_FORMAT)), 'duration': fields.Float, 'description': fields.String, 'measurementParameters': fields.Raw, 'status': EnumField, 'recordingDevices': fields.Raw, 'analysis': fields.Raw } logger = logging.getLogger('analyser.measurementcontroller') DEFAULT_ANALYSIS_SERIES = { 'spectrum': ['x', 'y', 'z', 'sum'], 'peakSpectrum': ['x', 'y', 'z', 'sum'], 'psd': ['x', 'y', 'z'] } class MeasurementStatus(Enum): """ Models the states an ActiveMeasurement can be in. """ NEW = 1, SCHEDULED = 2, RECORDING = 3, DYING = 4, FAILED = 5, COMPLETE = 6, class RecordStatus(Enum): """ The state of a device's measurement from the perspective of the measurement. """ SCHEDULED = 1 RECORDING = 2, COMPLETE = 3, FAILED = 4 def getMeasurementId(measurementStartTime, measurementName): """ the unique id for this measurement. :param measurementStartTime: the measurement startTime :param measurementName: the measurement name :return: the id. """ return measurementStartTime.strftime('%Y%m%d_%H%M%S') + '_' + measurementName class ActiveMeasurement(object): """ Models a measurement that is scheduled or is currently in progress. """ def __init__(self, name, startTime, duration, deviceState, description=None): self.name = name self.startTime = startTime self.duration = duration self.endTime = startTime + datetime.timedelta(days=0, seconds=duration) self.measurementParameters = marshal(deviceState, targetStateFields) self.description = description self.recordingDevices = {} self.status = MeasurementStatus.NEW self.id = getMeasurementId(self.startTime, self.name) self.idAsPath = self.id.replace('_', '/') # hardcoded here rather than in the UI self.analysis = DEFAULT_ANALYSIS_SERIES def overlapsWith(self, targetStartTime, duration): """ Tests if the given times overlap with this measurement. :param targetStartTime: the target start time. :param duration: the duration :return: true if the given times overlap with this measurement. """ targetEndTime = targetStartTime + datetime.timedelta(days=0, seconds=duration) return (self.startTime <= targetStartTime <= self.endTime) \ or (targetStartTime <= self.startTime <= targetEndTime) def updateDeviceStatus(self, deviceName, state, reason=None): """ Updates the current device status. :param deviceName: the device name. :param state: the state. :param reason: the reason for the change. :return: """ logger.info('Updating recording device state for ' + deviceName + ' to ' + state.name + ('' if reason is None else '[reason: ' + reason + ']')) currentState = self.recordingDevices.get(deviceName) count = 0 if currentState is not None: if currentState['state'] == MeasurementStatus.RECORDING.name: count = currentState['count'] self.recordingDevices[deviceName] = { 'state': state.name, 'reason': reason, 'time': datetime.datetime.utcnow().strftime(DATETIME_FORMAT), 'count': count } def stillRecording(self, deviceId, dataCount): """ For a device that is recording, updates the last timestamp so we now when we last received data. :param deviceId: the device id. :param dataCount: the no of items of data recorded in this batch. :return: """ status = self.recordingDevices[deviceId] if status is not None: if status['state'] == MeasurementStatus.RECORDING.name: status['last'] = datetime.datetime.utcnow().strftime(DATETIME_FORMAT) status['count'] = status['count'] + dataCount def __str__(self): """ :return: a human readable format """ return "ActiveMeasurement[" + self.id + "-" + self.status.name + " for " + self.duration + "s]" class CompleteMeasurement(object): """ A complete measurement is one which has successfully completed on the devices and for which we have a full dataset. The system only keeps, and can analyse, complete measurements. """ def __init__(self, meta, dataDir): self.name = meta['name'] self.startTime = datetime.datetime.strptime(meta['startTime'], DATETIME_FORMAT) self.duration = meta['duration'] self.endTime = self.startTime + datetime.timedelta(days=0, seconds=self.duration) self.measurementParameters = meta['measurementParameters'] self.description = meta['description'] self.recordingDevices = meta['recordingDevices'] self.status = MeasurementStatus[meta['status']] self.id = getMeasurementId(self.startTime, self.name) # self.analysis = meta.get('analysis', DEFAULT_ANALYSIS_SERIES) self.analysis = DEFAULT_ANALYSIS_SERIES self.idAsPath = self.id.replace('_', '/') self.dataDir = dataDir self.data = {} def updateName(self, newName): self.name = newName self.id = getMeasurementId(self.startTime, self.name) self.idAsPath = self.id.replace('_', '/') def inflate(self): """ loads the recording into memory and returns it as a Signal :return: """ if self.measurementParameters['accelerometerEnabled']: if len(self.data) == 0: logger.info('Loading measurement data for ' + self.name) self.data = {name: self._loadXYZ(name) for name, value in self.recordingDevices.items()} return True else: # TODO error handling return False def _loadXYZ(self, name): dataPath = os.path.join(self.dataDir, self.idAsPath, name, 'data.out') if os.path.exists(dataPath): from analyser.common.signal import loadTriAxisSignalFromFile return loadTriAxisSignalFromFile(dataPath) else: raise ValueError("Data does not exist") def __str__(self): """ :return: a human readable format """ return "CompleteMeasurement[" + self.id + " for " + self.duration + "s]" class MeasurementController(object): """ Contains all the logic around measurement scheduling and is responsible for ensuring we have valid measurements only. """ def __init__(self, targetStateProvider, dataDir, deviceController, maxTimeTilDeathbedSeconds=30, maxTimeOnDeathbedSeconds=120): self.targetStateProvider = targetStateProvider self.deviceController = deviceController self.dataDir = dataDir self.activeMeasurements = [] self.completeMeasurements = [] self.failedMeasurements = [] self.deathBed = {} self.reloadCompletedMeasurements() self.maxTimeTilDeathbedSeconds = maxTimeTilDeathbedSeconds self.maxTimeOnDeathbedSeconds = maxTimeOnDeathbedSeconds self.running = True self.worker = threading.Thread(name='MeasurementCaretaker', target=self._sweep, daemon=True) self.worker.start() def shutdown(self): logger.warning("Shutting down the MeasurementCaretaker") self.running = False def _sweep(self): """ Checks the state of each measurement and verifies their state, if an active measurement is now complete then passes them to the completed measurement set, if failed then to the failed set, if failed and old then evicts. :return: """ while self.running: for am in list(self.activeMeasurements): now = datetime.datetime.utcnow() # devices were allocated and have completed == complete recordingDeviceCount = len(am.recordingDevices) if recordingDeviceCount > 0: if all(entry['state'] == RecordStatus.COMPLETE.name for entry in am.recordingDevices.values()): logger.info("Detected completedmeasurement " + am.id) self._moveToComplete(am) # we have reached the end time and we have either all failed devices or no devices == kill if now > (am.endTime + datetime.timedelta(days=0, seconds=1)): allFailed = all(entry['state'] == RecordStatus.FAILED.name for entry in am.recordingDevices.values()) if (recordingDeviceCount > 0 and allFailed) or recordingDeviceCount == 0: logger.warning("Detected failed measurement " + am.id + " with " + str(recordingDeviceCount) + " devices, allFailed: " + str(allFailed)) self._moveToFailed(am) # we are well past the end time and we have failed devices or an ongoing recording == kill or deathbed if now > (am.endTime + datetime.timedelta(days=0, seconds=self.maxTimeTilDeathbedSeconds)): if any(entry['state'] == RecordStatus.FAILED.name for entry in am.recordingDevices.values()): logger.warning("Detected failed and incomplete measurement " + am.id + ", assumed dead") self._moveToFailed(am) elif all(entry['state'] == RecordStatus.RECORDING.name for entry in am.recordingDevices.values()): self._handleDeathbed(am) time.sleep(0.1) logger.warning("MeasurementCaretaker is now shutdown") def _handleDeathbed(self, am): # check if in the deathbed, if not add it now = datetime.datetime.utcnow() if am in self.deathBed.keys(): # if it is, check if it's been there for too long if now > (self.deathBed[am] + datetime.timedelta(days=0, seconds=self.maxTimeOnDeathbedSeconds)): logger.warning(am.id + " has been on the deathbed since " + self.deathBed[am].strftime(DATETIME_FORMAT) + ", max time allowed is " + str(self.maxTimeOnDeathbedSeconds) + ", evicting") # ensure all recording devices that have not completed are marked as failed for deviceName, status in am.recordingDevices.items(): if status['state'] == RecordStatus.RECORDING.name or status['state'] == RecordStatus.SCHEDULED.name: logger.warning("Marking " + deviceName + " as failed due to deathbed eviction") if not self.failMeasurement(am.id, deviceName, failureReason='Evicting from deathbed'): logger.warning("Failed to mark " + deviceName + " as failed") self._moveToFailed(am) del self.deathBed[am] else: logger.warning(am.id + " was expected to finish at " + am.endTime.strftime(DATETIME_FORMAT) + ", adding to deathbed") am.status = MeasurementStatus.DYING self.deathBed.update({am: now}) def _moveToComplete(self, am): am.status = MeasurementStatus.COMPLETE self.activeMeasurements.remove(am) self.completeMeasurements.append(CompleteMeasurement(self.store(am), self.dataDir)) def _moveToFailed(self, am): am.status = MeasurementStatus.FAILED self.activeMeasurements.remove(am) self.failedMeasurements.append(am) self.store(am) def schedule(self, name, duration, startTime, description=None): """ Schedules a new measurement with the given name. :param name: :param duration: :param startTime: :param description: :return: a tuple boolean: measurement was scheduled if true message: description, generally only used as an error code """ if self._clashes(startTime, duration): return False, MEASUREMENT_TIMES_CLASH else: am = ActiveMeasurement(name, startTime, duration, self.targetStateProvider.state, description=description) logger.info("Scheduling measurement " + am.id + " for " + str(duration) + "s") self.activeMeasurements.append(am) devices = self.deviceController.scheduleMeasurement(am.id, am.duration, am.startTime) anyFail = False for device, status in devices.items(): if status == 200: deviceStatus = RecordStatus.SCHEDULED else: deviceStatus = RecordStatus.FAILED anyFail = True am.updateDeviceStatus(device.deviceId, deviceStatus) if anyFail: am.status = MeasurementStatus.FAILED else: if am.status is MeasurementStatus.NEW: am.status = MeasurementStatus.SCHEDULED return True, None def _clashes(self, startTime, duration): """ verifies that this measurement does not clash with an already scheduled measurement. :param startTime: the start time. :param duration: the duration. :return: true if the measurement is allowed. """ return [m for m in self.activeMeasurements if m.overlapsWith(startTime, duration)] def startMeasurement(self, measurementId, deviceId): """ Starts the measurement for the device. :param deviceId: the device that is starting. :param measurementId: the measurement that is started. :return: true if it started (i.e. device and measurement exists). """ am, handler = self.getDataHandler(measurementId, deviceId) if am is not None: am.status = MeasurementStatus.RECORDING am.updateDeviceStatus(deviceId, RecordStatus.RECORDING) handler.start(am.idAsPath) return True else: return False def getDataHandler(self, measurementId, deviceId): """ finds the handler. :param measurementId: the measurement :param deviceId: the device. :return: active measurement and handler """ am = next((m for m in self.activeMeasurements if m.id == measurementId), None) if am is None: return None, None else: device = self.deviceController.getDevice(deviceId) if device is None: return None, None else: return am, device.dataHandler def recordData(self, measurementId, deviceId, data): """ Passes the data to the handler. :param deviceId: the device the data comes from. :param measurementId: the measurement id. :param data: the data. :return: true if the data was handled. """ am, handler = self.getDataHandler(measurementId, deviceId) if handler is not None: am.stillRecording(deviceId, len(data)) handler.handle(data) return True else: logger.error('Received data for unknown handler ' + deviceId + '/' + measurementId) return False def completeMeasurement(self, measurementId, deviceId): """ Completes the measurement session. :param deviceId: the device id. :param measurementId: the measurement id. :return: true if it was completed. """ am, handler = self.getDataHandler(measurementId, deviceId) if handler is not None: handler.stop(measurementId) am.updateDeviceStatus(deviceId, RecordStatus.COMPLETE) return True else: return False def failMeasurement(self, measurementId, deviceName, failureReason=None): """ Fails the measurement session. :param deviceName: the device name. :param measurementId: the measurement name. :param failureReason: why it failed. :return: true if it was completed. """ am, handler = self.getDataHandler(measurementId, deviceName) if handler is not None: am.updateDeviceStatus(deviceName, RecordStatus.FAILED, reason=failureReason) handler.stop(measurementId) return True else: return False def delete(self, measurementId): """ Deletes the named measurement if it exists. If this is an active measurement then the measurement is cancelled. :param measurementId: the measurement name. :return: """ # TODO cancel active measurement return self._deleteCompletedMeasurement(measurementId) def _deleteCompletedMeasurement(self, measurementId): """ Deletes the named measurement from the completed measurement store if it exists. :param measurementId: :return: String: error messages Integer: count of measurements deleted """ message, count, deleted = self.deleteFrom(measurementId, self.completeMeasurements) if count is 0: message, count, deleted = self.deleteFrom(measurementId, self.failedMeasurements) return message, count, deleted def deleteFrom(self, measurementId, data): toDeleteIdx = [(ind, x) for ind, x in enumerate(data) if x.id == measurementId] if toDeleteIdx: errors = [] def logError(func, path, exc_info): logger.exception( "Error detected during deletion of measurement " + measurementId + " by " + str(func), exc_info=exc_info) errors.append(path) logger.info("Deleting measurement: " + measurementId) shutil.rmtree(self._getPathToMeasurementMetaDir(toDeleteIdx[0][1].idAsPath), ignore_errors=False, onerror=logError) if len(errors) is 0: popped = data.pop(toDeleteIdx[0][0]) return None, 1 if popped else 0, popped else: return errors, 0, None else: return measurementId + " does not exist", 0, None def reloadCompletedMeasurements(self): """ Reloads the completed measurements from the backing store. """ from pathlib import Path reloaded = [self.load(x.resolve()) for x in Path(self.dataDir).glob('*/*/*') if x.is_dir()] logger.info('Reloaded ' + str(len(reloaded)) + ' completed measurements') self.completeMeasurements = [x for x in reloaded if x is not None and x.status == MeasurementStatus.COMPLETE] self.failedMeasurements = [x for x in reloaded if x is not None and x.status == MeasurementStatus.FAILED] def getMeasurements(self, measurementStatus=None): """ Gets all available measurements. :param measurementStatus return only the measurements in the given state. :return: """ if measurementStatus is None: return self.activeMeasurements + self.completeMeasurements + self.failedMeasurements elif measurementStatus == MeasurementStatus.COMPLETE: return self.completeMeasurements elif measurementStatus == MeasurementStatus.FAILED: return self.failedMeasurements elif measurementStatus == MeasurementStatus.DYING: return list(self.deathBed.keys()) else: return [x for x in self.activeMeasurements if x.status == measurementStatus] def getMeasurement(self, measurementId, measurementStatus=None): """ Gets the measurement with the given id. :param measurementId: the id. :param measurementStatus: the status of the requested measurement. :return: the matching measurement or none if it doesn't exist. """ return next((x for x in self.getMeasurements(measurementStatus) if x.id == measurementId), None) def store(self, measurement): """ Writes the measurement metadata to disk on completion. :param activeMeasurement: the measurement that has completed. :returns the persisted metadata. """ os.makedirs(self._getPathToMeasurementMetaDir(measurement.idAsPath), exist_ok=True) output = marshal(measurement, measurementFields) with open(self._getPathToMeasurementMetaFile(measurement.idAsPath), 'w') as outfile: json.dump(output, outfile) return output def load(self, path): """ Loads a CompletedMeasurement from the path.á :param path: the path at which the data is found. :return: the measurement """ meta = self._loadMetaFromJson(path) return CompleteMeasurement(meta, self.dataDir) if meta is not None else None def _loadMetaFromJson(self, path): """ Reads the json meta into memory. :return: the meta. """ try: with (path / 'metadata.json').open() as infile: return json.load(infile) except FileNotFoundError: logger.error('Metadata does not exist at ' + str(path)) return None def _getPathToMeasurementMetaDir(self, measurementId): return os.path.join(self.dataDir, measurementId) def _getPathToMeasurementMetaFile(self, measurementId): return os.path.join(self.dataDir, measurementId, 'metadata.json') def editMeasurement(self, measurementId, data): """ Edits the specified measurement with the provided data. :param measurementId: the measurement id. :param data: the data to update. :return: true if the measurement was edited """ oldMeasurement = self.getMeasurement(measurementId, measurementStatus=MeasurementStatus.COMPLETE) if oldMeasurement: import copy newMeasurement = copy.deepcopy(oldMeasurement) deleteOld = False createdFilteredCopy = False newName = data.get('name', None) newDesc = data.get('description', None) newStart = float(data.get('start', 0)) newEnd = float(data.get('end', oldMeasurement.duration)) newDuration = newEnd - newStart newDevices = data.get('devices', None) if newName: logger.info('Updating name from ' + oldMeasurement.name + ' to ' + newName) newMeasurement.updateName(newName) createdFilteredCopy = True deleteOld = True if newDesc: logger.info('Updating description from ' + str(oldMeasurement.description) + ' to ' + str(newDesc)) newMeasurement.description = newDesc if newDuration != oldMeasurement.duration: logger.info('Copying measurement to allow support new duration ' + str(newDuration)) if oldMeasurement.name == newMeasurement.name: newMeasurement.updateName(newMeasurement.name + '-' + str(int(time.time()))) newMeasurement.duration = newDuration createdFilteredCopy = True if createdFilteredCopy: logger.info('Copying measurement data from ' + oldMeasurement.idAsPath + ' to ' + newMeasurement.idAsPath) newMeasurementPath = self._getPathToMeasurementMetaDir(newMeasurement.idAsPath) dataSearchPattern = self._getPathToMeasurementMetaDir(oldMeasurement.idAsPath) + '/**/data.out' newDataCountsByDevice = [self._filterCopy(dataFile, newStart, newEnd, newMeasurementPath) for dataFile in glob.glob(dataSearchPattern)] for device, count in newDataCountsByDevice: newMeasurement.recordingDevices.get(device)['count'] = count self.store(newMeasurement) if newDevices: for renames in newDevices: logger.info('Updating device name from ' + str(renames[0]) + ' to ' + str(renames[1])) deviceState = newMeasurement.recordingDevices.get(renames[0]) newMeasurement.recordingDevices[renames[1]] = deviceState del newMeasurement.recordingDevices[renames[0]] os.rename(os.path.join(self._getPathToMeasurementMetaDir(newMeasurement.idAsPath), renames[0]), os.path.join(self._getPathToMeasurementMetaDir(newMeasurement.idAsPath), renames[1])) self.store(newMeasurement) if deleteOld or createdFilteredCopy or newDevices: self.completeMeasurements.append(newMeasurement) if deleteOld: self.delete(oldMeasurement.id) return True else: return False def _filterCopy(self, dataFile, newStart, newEnd, newDataDir): """ Copies the data file to a new file in the tmp dir, filtering it according to newStart and newEnd and adjusting the times as appropriate so it starts from 0. :param dataFile: the input file. :param newStart: the new start time. :param newEnd: the new end time. :param newDataDir: the tmp dir to write to. :return: the device name & no of rows in the data. """ import csv pathToData = os.path.split(dataFile) dataFileName = pathToData[1] dataDeviceName = os.path.split(pathToData[0])[1] os.makedirs(os.path.join(newDataDir, dataDeviceName), exist_ok=True) outputFile = os.path.join(newDataDir, dataDeviceName, dataFileName) dataCount = 0 rowNum = 0 with open(dataFile, mode='rt', newline='') as dataIn, open(outputFile, mode='wt', newline='') as dataOut: writer = csv.writer(dataOut, delimiter=',') for row in csv.reader(dataIn, delimiter=','): if len(row) > 0: time = float(row[0]) if newStart <= time <= newEnd: newRow = row[:] if newStart > 0: newRow[0] = "{0:.3f}".format(time - newStart) writer.writerow(newRow) dataCount += 1 else: logger.warning('Ignoring empty row ' + str(rowNum) + ' in ' + str(dataFile)) rowNum += 1 return dataDeviceName, dataCount # TODO allow remote reset of the recorder
multithreading.py
import threading # since multiple arguments may be passed every item in args is a list def multi_threading(func, arguments, n=10): for i in range(0, len(arguments), n): thread_objects = [] for j in range(i, min(len(arguments), n + i)): thread = threading.Thread(target=func, args=arguments[j]) thread_objects.append(thread) thread_objects.append(thread) thread.start() for thread in thread_objects: thread.join() print(int(1000 * (n + i) / len(arguments)) / 10, "% Completed") def test_function(a): print(a) if __name__ == '__main__': multi_threading(test_function, [[x] for x in range(100)])
cleanup.py
import tempfile import argparse import logging import datetime import threading import os import re from botocore.exceptions import ClientError from ocs_ci.framework import config from ocs_ci.ocs.constants import ( CLEANUP_YAML, TEMPLATE_CLEANUP_DIR, AWS_CLOUDFORMATION_TAG, ) from ocs_ci.ocs.exceptions import CommandFailed from ocs_ci.utility.utils import get_openshift_installer, destroy_cluster from ocs_ci.utility import templating from ocs_ci.utility.aws import ( AWS, delete_cluster_buckets, destroy_volumes, get_rhel_worker_instances, StackStatusError, terminate_rhel_workers, ) from ocs_ci.cleanup.aws import defaults FORMAT = "%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s" logging.basicConfig(format=FORMAT, level=logging.DEBUG) logger = logging.getLogger(__name__) def cleanup(cluster_name, cluster_id, upi=False, failed_deletions=None): """ Cleanup existing cluster in AWS Args: cluster_name (str): Name of the cluster cluster_id (str): Cluster id to cleanup upi (bool): True for UPI cluster, False otherwise failed_deletions (list): list of clusters we failed to delete, used for reporting purposes """ data = {"cluster_name": cluster_name, "cluster_id": cluster_id} template = templating.Templating(base_path=TEMPLATE_CLEANUP_DIR) cleanup_template = template.render_template(CLEANUP_YAML, data) cleanup_path = tempfile.mkdtemp(prefix="cleanup_") cleanup_file = os.path.join(cleanup_path, "metadata.json") with open(cleanup_file, "w") as temp: temp.write(cleanup_template) bin_dir = os.path.expanduser(config.RUN["bin_dir"]) oc_bin = os.path.join(bin_dir, "openshift-install") if upi: aws = AWS() rhel_workers = get_rhel_worker_instances(cleanup_path) logger.info(f"{cluster_name}'s RHEL workers: {rhel_workers}") if rhel_workers: terminate_rhel_workers(rhel_workers) # Destroy extra volumes destroy_volumes(cluster_name) aws.delete_apps_record_set(cluster_name) stack_names = list() # Get master, bootstrap and security group stacks for stack_type in ["ma", "bs", "sg"]: try: stack_names.append( aws.get_cloudformation_stacks( pattern=f"{cluster_name}-{stack_type}" )[0]["StackName"] ) except ClientError: continue # Get the worker stacks worker_index = 0 worker_stack_exists = True while worker_stack_exists: try: stack_names.append( aws.get_cloudformation_stacks( pattern=f"{cluster_name}-no{worker_index}" )[0]["StackName"] ) worker_index += 1 except ClientError: worker_stack_exists = False logger.info(f"Deleting stacks: {stack_names}") aws.delete_cloudformation_stacks(stack_names) # Destroy the cluster logger.info(f"cleaning up {cluster_id}") destroy_cluster(installer=oc_bin, cluster_path=cleanup_path) for stack_type in ["inf", "vpc"]: try: stack_names.append( aws.get_cloudformation_stacks( pattern=f"{cluster_name}-{stack_type}" )[0]["StackName"] ) except ClientError: continue try: aws.delete_cloudformation_stacks(stack_names) except StackStatusError: logger.error("Failed to fully destroy cluster %s", cluster_name) if failed_deletions: failed_deletions.append(cluster_name) raise else: logger.info(f"cleaning up {cluster_id}") try: destroy_cluster(installer=oc_bin, cluster_path=cleanup_path) except CommandFailed: logger.error("Failed to fully destroy cluster %s", cluster_name) if failed_deletions: failed_deletions.append(cluster_name) raise delete_cluster_buckets(cluster_name) def get_clusters(time_to_delete, region_name, prefixes_hours_to_spare): """ Get all cluster names that their EC2 instances running time is greater than the specified time to delete Args: time_to_delete (int): The maximum time in seconds that is allowed for clusters to continue running region_name (str): The name of the AWS region to delete the resources from prefixes_hours_to_spare (dict): Dictionaries of the cluster prefixes to spare along with the maximum time in hours that is allowed for spared clusters to continue running Returns: tuple: List of the cluster names (e.g ebenahar-cluster-gqtd4) to be provided to the ci-cleanup script, a list of VPCs that are part of cloudformation, and a list of remaining clusters """ def determine_cluster_deletion(ec2_instances, cluster_name): for instance in ec2_instances: allowed_running_time = time_to_delete do_not_delete = False if instance.state["Name"] == "running": for prefix, hours in prefixes_hours_to_spare.items(): # case insensitive 'startswith' if bool(re.match(prefix, cluster_name, re.I)): if hours == "never": do_not_delete = True else: allowed_running_time = int(hours) * 60 * 60 break if do_not_delete: logger.info( "%s marked as 'do not delete' and will not be " "destroyed", cluster_name, ) return False else: launch_time = instance.launch_time current_time = datetime.datetime.now(launch_time.tzinfo) running_time = current_time - launch_time logger.info( f"Instance {[tag['Value'] for tag in instance.tags if tag['Key'] == 'Name'][0]} " f"(id: {instance.id}) running time is {running_time} hours while the allowed" f" running time for it is {allowed_running_time/3600} hours" ) if running_time.total_seconds() > allowed_running_time: return True return False aws = AWS(region_name=region_name) clusters_to_delete = list() remaining_clusters = list() cloudformation_vpc_names = list() vpcs = aws.ec2_client.describe_vpcs()["Vpcs"] vpc_ids = [vpc["VpcId"] for vpc in vpcs] vpc_objs = [aws.ec2_resource.Vpc(vpc_id) for vpc_id in vpc_ids] for vpc_obj in vpc_objs: vpc_tags = vpc_obj.tags if vpc_tags: cloudformation_vpc_name = [ tag["Value"] for tag in vpc_tags if tag["Key"] == AWS_CLOUDFORMATION_TAG ] if cloudformation_vpc_name: cloudformation_vpc_names.append(cloudformation_vpc_name[0]) continue vpc_name = [tag["Value"] for tag in vpc_tags if tag["Key"] == "Name"][0] cluster_name = vpc_name.replace("-vpc", "") vpc_instances = vpc_obj.instances.all() if not vpc_instances: clusters_to_delete.append(cluster_name) continue # Append to clusters_to_delete if cluster should be deleted if determine_cluster_deletion(vpc_instances, cluster_name): clusters_to_delete.append(cluster_name) else: remaining_clusters.append(cluster_name) else: logger.info("No tags found for VPC") # Get all cloudformation based clusters to delete cf_clusters_to_delete = list() for vpc_name in cloudformation_vpc_names: instance_dicts = aws.get_instances_by_name_pattern( f"{vpc_name.replace('-vpc', '')}*" ) ec2_instances = [ aws.get_ec2_instance(instance_dict["id"]) for instance_dict in instance_dicts ] if not ec2_instances: continue cluster_io_tag = None for instance in ec2_instances: cluster_io_tag = [ tag["Key"] for tag in instance.tags if "kubernetes.io/cluster" in tag["Key"] ] if cluster_io_tag: break if not cluster_io_tag: logger.warning( "Unable to find valid cluster IO tag from ec2 instance tags " "for VPC %s. This is probably not an OCS cluster VPC!", vpc_name, ) continue cluster_name = cluster_io_tag[0].replace("kubernetes.io/cluster/", "") if determine_cluster_deletion(ec2_instances, cluster_name): cf_clusters_to_delete.append(cluster_name) else: remaining_clusters.append(cluster_name) return clusters_to_delete, cf_clusters_to_delete, remaining_clusters def cluster_cleanup(): parser = argparse.ArgumentParser(description="Cleanup AWS Resource") parser.add_argument( "--cluster", nargs=1, action="append", required=True, help="Cluster name tag" ) parser.add_argument( "--upi", action="store_true", required=False, help="For UPI cluster deletion" ) logging.basicConfig(level=logging.DEBUG) args = parser.parse_args() procs = [] for id in args.cluster: cluster_name = id[0].rsplit("-", 1)[0] logger.info(f"cleaning up {id[0]}") proc = threading.Thread(target=cleanup, args=(cluster_name, id[0], args.upi)) proc.start() procs.append(proc) for p in procs: p.join() def aws_cleanup(): parser = argparse.ArgumentParser( description="AWS overall resources cleanup according to running time" ) parser.add_argument( "--hours", type=hour_valid, action="store", required=True, help=""" Maximum running time of the cluster (in hours). Clusters older than this will be deleted. The minimum is 10 hours """, ) parser.add_argument( "--region", action="store", required=False, help="The name of the AWS region to delete the resources from", ) parser.add_argument( "--prefix", action="append", required=False, type=prefix_hour_mapping, help=""" Additional prefix:hour combo to treat as a special rule. Clusters starting with this prefix will only be cleaned up if their runtime exceeds the provided hour(this takes precedence over the value provided to --hours). Note: if you want to skip cleanup of a cluster entirely you can use 'never' for the hour. Example: --prefix foo:24 --prefix bar:48 --prefix foobar:never """, ) parser.add_argument( "--force", action="store_true", required=False, help=""" Force cluster cleanup. User will not be prompted for confirmation. WARNING: this utility is destructive, only use this option if you know what you are doing. """, ) args = parser.parse_args() if not args.force: confirmation = input( "Careful! This action could be highly destructive. " "Are you sure you want to proceed? " ) assert ( confirmation == defaults.CONFIRMATION_ANSWER ), "Wrong confirmation answer. Exiting" prefixes_hours_to_spare = defaults.CLUSTER_PREFIXES_SPECIAL_RULES if args.prefix: for prefix, hours in args.prefix: logger.info( "Adding special rule for prefix '%s' with hours %s", prefix, hours ) prefixes_hours_to_spare.update({prefix: hours}) time_to_delete = args.hours * 60 * 60 region = defaults.AWS_REGION if not args.region else args.region clusters_to_delete, cf_clusters_to_delete, remaining_clusters = get_clusters( time_to_delete=time_to_delete, region_name=region, prefixes_hours_to_spare=prefixes_hours_to_spare, ) if not clusters_to_delete: logger.info("No clusters to delete") else: logger.info("Deleting clusters: %s", clusters_to_delete) get_openshift_installer() procs = [] failed_deletions = [] for cluster in clusters_to_delete: cluster_name = cluster.rsplit("-", 1)[0] logger.info(f"Deleting cluster {cluster_name}") proc = threading.Thread( target=cleanup, args=(cluster_name, cluster, False, failed_deletions) ) proc.start() procs.append(proc) for p in procs: p.join() for cluster in cf_clusters_to_delete: cluster_name = cluster.rsplit("-", 1)[0] logger.info(f"Deleting UPI cluster {cluster_name}") proc = threading.Thread( target=cleanup, args=(cluster_name, cluster, True, failed_deletions) ) proc.start() procs.append(proc) for p in procs: p.join() logger.info("Remaining clusters: %s", remaining_clusters) filename = "failed_cluster_deletions.txt" content = "None\n" if failed_deletions: logger.error("Failed cluster deletions: %s", failed_deletions) content = "" for cluster in failed_deletions: content += f"{cluster}\n" with open(filename, "w") as f: f.write(content) def prefix_hour_mapping(string): """ Validate that the string provided to --prefix is properly formatted Args: string (str): input provided to --prefix Raises: argparse.ArgumentTypeError: if the provided string is not correctly formatted Returns: str, str: prefix, hours """ msg = ( f"{string} is not a properly formatted prefix:hour combination. " f"See the --help for more information." ) try: prefix, hours = string.split(":") if not prefix or not hours: raise argparse.ArgumentTypeError(msg) # 'never' should be the only non-int value for hours if hours != "never": int(hours) except ValueError: raise argparse.ArgumentTypeError(msg) return prefix, hours def hour_valid(string): """ Validate that the hour value provided is an int and not lower than the minimum allowed running time Args: string: input provided to --hours Raises: argparse.ArgumentTypeError: if the provided hours value is not an int or lower than the minimum allowed running time Returns: int: valid hour value """ try: hours = int(string) assert hours >= defaults.MINIMUM_CLUSTER_RUNNING_TIME except ValueError: msg = f"{string} is not an int, please provide an int value" raise argparse.ArgumentTypeError(msg) except AssertionError: msg = ( f"Number of hours ({hours}) is lower than the required minimum " f"({defaults.MINIMUM_CLUSTER_RUNNING_TIME})." ) raise argparse.ArgumentTypeError(msg) return hours
userInterface.py
from Tkinter import * import ttk import tkMessageBox import pcapReader import plotLanNetwork import communicationDetailsFetch import reportGen import time import threading import Queue from PIL import Image,ImageTk import os class pcapXrayGui: def __init__(self, base): # Base Frame Configuration self.base = base base.title("PcapXray") Label(base, text="PcapXray Tool - A LAN Network Analyzer") # Style Configuration style = ttk.Style() style.configure("BW.TLabel", foreground="black") style.configure("BW.TEntry", foreground="black") # 1st Frame - Initial Frame InitFrame = ttk.Frame(base, width=50, padding="10 10 10 10",relief= GROOVE) InitFrame.grid(column=10, row=10, sticky=(N, W, E, S)) InitFrame.columnconfigure(10, weight=1) InitFrame.rowconfigure(10, weight=1) # Pcap File Entry self.pcap_file = StringVar() ttk.Label(InitFrame, text="Enter pcap file path: ",style="BW.TLabel").grid(column=0, row=0, sticky="W") ttk.Entry(InitFrame, width=30, textvariable=self.pcap_file, style="BW.TEntry").grid(column=1, row=0, sticky="W, E") self.progressbar = ttk.Progressbar(InitFrame, orient="horizontal", length=200,value=0, maximum=200, mode="indeterminate") ttk.Button(InitFrame, text="Analyze!", command=self.pcap_analyse).grid(column=2, row=0, padx=10, pady=10,sticky="E") self.progressbar.grid(column=3, row=0, padx=10, pady=10, sticky="E") # Second Frame with Options SecondFrame = ttk.Frame(base, width=50, padding="10 10 10 10",relief= GROOVE) SecondFrame.grid(column=10, row=20, sticky=(N, W, E, S)) SecondFrame.columnconfigure(10, weight=1) SecondFrame.rowconfigure(10, weight=1) ttk.Label(SecondFrame, text="Options: ", style="BW.TLabel").grid(column=0, row=10, sticky="W") self.option = StringVar() self.options = {'All','HTTP','HTTPS','Tor','Malicious'} #self.option.set('Tor') ttk.OptionMenu(SecondFrame,self.option,"Select",*self.options).grid(column=1, row=10,sticky="W, E") # Third Frame with Results and Descriptioms self.ThirdFrame = ttk.Frame(base, width=100, height=100, padding="10 10 10 10",relief= GROOVE) description = """It is a tool aimed to simplyfy the network analysis and speed the process of analysing the network traffic.\nThis prototype aims to accomplish 4 important modules, \n 1. Web Traffic\n 2. Tor Traffic \n 3. Malicious Traffic \n 4. Device/Traffic Details\n\nPlease contact me @ spg349@nyu.edu for any bugs or problems ! """ self.label = ttk.Label(self.ThirdFrame, text="Description: \nPcapXray tools is an aid for Network Forensics or Any Network Analysis!\n"+description, style="BW.TLabel") self.label.grid(column=10, row=10,sticky="W") self.xscrollbar = Scrollbar(self.ThirdFrame, orient=HORIZONTAL) self.xscrollbar.grid(row=100, column=0, sticky=E + W) self.yscrollbar = Scrollbar(self.ThirdFrame, orient=VERTICAL) self.yscrollbar.grid(row=0, column=100, sticky=N + S) self.ThirdFrame.grid(column=10, row=30, sticky=(N, W, E, S)) self.ThirdFrame.columnconfigure(0, weight=1) self.ThirdFrame.rowconfigure(0, weight=1) self.name_servers = "" def pcap_analyse(self): if os.path.exists(self.pcap_file.get()): self.progressbar.start() result = Queue.Queue() packet_read = threading.Thread(target=pcapReader.pcapReader,args=(self.pcap_file.get(),result)) packet_read.start() while packet_read.is_alive(): self.progressbar.update() packet_read.join() self.progressbar.stop() #packet_read.join() self.capture_read = result.get() reportThreadpcap = threading.Thread(target=reportGen.reportGen().packetDetails,args=(self.capture_read,)) reportThreadpcap.start() #self.option.set("Tor") self.option.trace("w",self.map_select) #self.option.set("Tor") self.name_servers = "" else: tkMessageBox.showerror("Error","File Not Found !") def generate_graph(self): if self.name_servers == "": result = Queue.Queue() t = threading.Thread(target=communicationDetailsFetch.trafficDetailsFetch,args=(self.capture_read,result)) t.start() self.progressbar.start() while t.is_alive(): self.progressbar.update() t.join() self.progressbar.stop() self.name_servers = result.get() reportThread = threading.Thread(target=reportGen.reportGen().communicationDetailsReport,args=(self.name_servers,)) reportThread.start() if not os.path.exists("Report/"+self.pcap_file.get().replace(".pcap","")+self.option.get()+".png"): t1 = threading.Thread(target=plotLanNetwork.plotLan, args=(self.capture_read, self.pcap_file.get().replace(".pcap",""),self.name_servers,self.option.get(),)) t1.start() self.progressbar.start() while t1.is_alive(): self.progressbar.update() t1.join() self.progressbar.stop() self.label.grid_forget() canvas = Canvas(self.ThirdFrame, width=700,height=600, bd=0, bg="navy", xscrollcommand=self.xscrollbar.set, yscrollcommand=self.yscrollbar.set) canvas.grid(row=0, column=0, sticky=N + S + E + W) self.img = ImageTk.PhotoImage(Image.open("Report/"+self.pcap_file.get().replace(".pcap","")+self.option.get()+".png").resize((900,900),Image.ANTIALIAS).convert('RGB')) canvas.create_image(0,0, image=self.img) canvas.config(scrollregion=canvas.bbox(ALL)) self.xscrollbar.config(command=canvas.xview) self.yscrollbar.config(command=canvas.yview) def map_select(self, *args): print self.option.get() self.generate_graph() def main(): base = Tk() pcapXrayGui(base) base.mainloop() #main()
util.py
import os import re import sys import time import json import requests from datetime import datetime from subprocess import run, PIPE, DEVNULL from multiprocessing import Process from urllib.parse import quote from config import ( IS_TTY, OUTPUT_PERMISSIONS, REPO_DIR, SOURCES_DIR, OUTPUT_DIR, ARCHIVE_DIR, TIMEOUT, TERM_WIDTH, SHOW_PROGRESS, ANSI, CHROME_BINARY, FETCH_WGET, FETCH_PDF, FETCH_SCREENSHOT, FETCH_DOM, FETCH_FAVICON, FETCH_AUDIO, FETCH_VIDEO, SUBMIT_ARCHIVE_DOT_ORG, ) # URL helpers without_scheme = lambda url: url.replace('http://', '').replace('https://', '').replace('ftp://', '') without_query = lambda url: url.split('?', 1)[0] without_hash = lambda url: url.split('#', 1)[0] without_path = lambda url: url.split('/', 1)[0] domain = lambda url: without_hash(without_query(without_path(without_scheme(url)))) base_url = lambda url: without_scheme(url) # uniq base url used to dedupe links short_ts = lambda ts: ts.split('.')[0] def check_dependencies(): """Check that all necessary dependencies are installed, and have valid versions""" python_vers = float('{}.{}'.format(sys.version_info.major, sys.version_info.minor)) if python_vers < 3.5: print('{}[X] Python version is not new enough: {} (>3.5 is required){}'.format(ANSI['red'], python_vers, ANSI['reset'])) print(' See https://github.com/pirate/bookmark-archiver#troubleshooting for help upgrading your Python installation.') raise SystemExit(1) if FETCH_PDF or FETCH_SCREENSHOT or FETCH_DOM: if run(['which', CHROME_BINARY], stdout=DEVNULL).returncode: print('{}[X] Missing dependency: {}{}'.format(ANSI['red'], CHROME_BINARY, ANSI['reset'])) print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CHROME_BINARY)) print(' See https://github.com/pirate/bookmark-archiver for help.') raise SystemExit(1) # parse chrome --version e.g. Google Chrome 61.0.3114.0 canary / Chromium 59.0.3029.110 built on Ubuntu, running on Ubuntu 16.04 try: result = run([CHROME_BINARY, '--version'], stdout=PIPE) version_str = result.stdout.decode('utf-8') version_lines = re.sub("(Google Chrome|Chromium) (\\d+?)\\.(\\d+?)\\.(\\d+?).*?$", "\\2", version_str).split('\n') version = [l for l in version_lines if l.isdigit()][-1] if int(version) < 59: print(version_lines) print('{red}[X] Chrome version must be 59 or greater for headless PDF, screenshot, and DOM saving{reset}'.format(**ANSI)) print(' See https://github.com/pirate/bookmark-archiver for help.') raise SystemExit(1) except (IndexError, TypeError, OSError): print('{red}[X] Failed to parse Chrome version, is it installed properly?{reset}'.format(**ANSI)) print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CHROME_BINARY)) print(' See https://github.com/pirate/bookmark-archiver for help.') raise SystemExit(1) if FETCH_WGET: if run(['which', 'wget'], stdout=DEVNULL).returncode or run(['wget', '--version'], stdout=DEVNULL).returncode: print('{red}[X] Missing dependency: wget{reset}'.format(**ANSI)) print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format('wget')) print(' See https://github.com/pirate/bookmark-archiver for help.') raise SystemExit(1) if FETCH_FAVICON or SUBMIT_ARCHIVE_DOT_ORG: if run(['which', 'curl'], stdout=DEVNULL).returncode or run(['curl', '--version'], stdout=DEVNULL).returncode: print('{red}[X] Missing dependency: curl{reset}'.format(**ANSI)) print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format('curl')) print(' See https://github.com/pirate/bookmark-archiver for help.') raise SystemExit(1) if FETCH_AUDIO or FETCH_VIDEO: if run(['which', 'youtube-dl'], stdout=DEVNULL).returncode or run(['youtube-dl', '--version'], stdout=DEVNULL).returncode: print('{red}[X] Missing dependency: youtube-dl{reset}'.format(**ANSI)) print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format('youtube-dl')) print(' See https://github.com/pirate/bookmark-archiver for help.') raise SystemExit(1) def chmod_file(path, cwd='.', permissions=OUTPUT_PERMISSIONS, timeout=30): """chmod -R <permissions> <cwd>/<path>""" if not os.path.exists(os.path.join(cwd, path)): raise Exception('Failed to chmod: {} does not exist (did the previous step fail?)'.format(path)) chmod_result = run(['chmod', '-R', permissions, path], cwd=cwd, stdout=DEVNULL, stderr=PIPE, timeout=timeout) if chmod_result.returncode == 1: print(' ', chmod_result.stderr.decode()) raise Exception('Failed to chmod {}/{}'.format(cwd, path)) def progress(seconds=TIMEOUT, prefix=''): """Show a (subprocess-controlled) progress bar with a <seconds> timeout, returns end() function to instantly finish the progress """ if not SHOW_PROGRESS: return lambda: None chunk = '█' if sys.stdout.encoding == 'UTF-8' else '#' chunks = TERM_WIDTH - len(prefix) - 20 # number of progress chunks to show (aka max bar width) def progress_bar(seconds=seconds, prefix=prefix): """show timer in the form of progress bar, with percentage and seconds remaining""" try: for s in range(seconds * chunks): progress = s / chunks / seconds * 100 bar_width = round(progress/(100/chunks)) # ████████████████████ 0.9% (1/60sec) sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format( prefix, ANSI['green'], (chunk * bar_width).ljust(chunks), ANSI['reset'], round(progress, 1), round(s/chunks), seconds, )) sys.stdout.flush() time.sleep(1 / chunks) # ██████████████████████████████████ 100.0% (60/60sec) sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)\n'.format( prefix, ANSI['red'], chunk * chunks, ANSI['reset'], 100.0, seconds, seconds, )) sys.stdout.flush() except KeyboardInterrupt: print() pass p = Process(target=progress_bar) p.start() def end(): """immediately finish progress and clear the progressbar line""" p.terminate() sys.stdout.write('\r{}{}\r'.format((' ' * TERM_WIDTH), ANSI['reset'])) # clear whole terminal line sys.stdout.flush() return end def pretty_path(path): """convert paths like .../bookmark-archiver/archiver/../output/abc into output/abc""" return path.replace(REPO_DIR + '/', '') def download_url(url): """download a given url's content into downloads/domain.txt""" if not os.path.exists(SOURCES_DIR): os.makedirs(SOURCES_DIR) ts = str(datetime.now().timestamp()).split('.', 1)[0] source_path = os.path.join(SOURCES_DIR, '{}-{}.txt'.format(domain(url), ts)) print('[*] [{}] Downloading {} > {}'.format( datetime.now().strftime('%Y-%m-%d %H:%M:%S'), url, pretty_path(source_path), )) end = progress(TIMEOUT, prefix=' ') try: downloaded_xml = requests.get(url).content.decode() end() except Exception as e: end() print('[!] Failed to download {}\n'.format(url)) print(' ', e) raise SystemExit(1) with open(source_path, 'w', encoding='utf-8') as f: f.write(downloaded_xml) return source_path def str_between(string, start, end=None): """(<abc>12345</def>, <abc>, </def>) -> 12345""" content = string.split(start, 1)[-1] if end is not None: content = content.rsplit(end, 1)[0] return content def get_link_type(link): """Certain types of links need to be handled specially, this figures out when that's the case""" if link['base_url'].endswith('.pdf'): return 'PDF' elif link['base_url'].rsplit('.', 1) in ('pdf', 'png', 'jpg', 'jpeg', 'svg', 'bmp', 'gif', 'tiff', 'webp'): return 'image' elif 'wikipedia.org' in link['domain']: return 'wiki' elif 'youtube.com' in link['domain']: return 'youtube' elif 'soundcloud.com' in link['domain']: return 'soundcloud' elif 'youku.com' in link['domain']: return 'youku' elif 'vimeo.com' in link['domain']: return 'vimeo' return None def merge_links(a, b): """deterministially merge two links, favoring longer field values over shorter, and "cleaner" values over worse ones. """ longer = lambda key: a[key] if len(a[key]) > len(b[key]) else b[key] earlier = lambda key: a[key] if a[key] < b[key] else b[key] url = longer('url') longest_title = longer('title') cleanest_title = a['title'] if '://' not in a['title'] else b['title'] link = { 'timestamp': earlier('timestamp'), 'url': url, 'domain': domain(url), 'base_url': base_url(url), 'tags': longer('tags'), 'title': longest_title if '://' not in longest_title else cleanest_title, 'sources': list(set(a.get('sources', []) + b.get('sources', []))), } link['type'] = get_link_type(link) return link def find_link(folder, links): """for a given archive folder, find the corresponding link object in links""" url = parse_url(folder) if url: for link in links: if (link['base_url'] in url) or (url in link['url']): return link timestamp = folder.split('.')[0] for link in links: if link['timestamp'].startswith(timestamp): if link['domain'] in os.listdir(os.path.join(ARCHIVE_DIR, folder)): return link # careful now, this isn't safe for most ppl if link['domain'] in parse_url(folder): return link return None def parse_url(folder): """for a given archive folder, figure out what url it's for""" link_json = os.path.join(ARCHIVE_DIR, folder, 'index.json') if os.path.exists(link_json): with open(link_json, 'r') as f: try: link_json = f.read().strip() if link_json: link = json.loads(link_json) return link['base_url'] except ValueError: print('File contains invalid JSON: {}!'.format(link_json)) archive_org_txt = os.path.join(ARCHIVE_DIR, folder, 'archive.org.txt') if os.path.exists(archive_org_txt): with open(archive_org_txt, 'r') as f: original_link = f.read().strip().split('/http', 1)[-1] with_scheme = 'http{}'.format(original_link) return with_scheme return '' def manually_merge_folders(source, target): """prompt for user input to resolve a conflict between two archive folders""" if not IS_TTY: return fname = lambda path: path.split('/')[-1] print(' {} and {} have conflicting files, which do you want to keep?'.format(fname(source), fname(target))) print(' - [enter]: do nothing (keep both)') print(' - a: prefer files from {}'.format(source)) print(' - b: prefer files from {}'.format(target)) print(' - q: quit and resolve the conflict manually') try: answer = input('> ').strip().lower() except KeyboardInterrupt: answer = 'q' assert answer in ('', 'a', 'b', 'q'), 'Invalid choice.' if answer == 'q': print('\nJust run Bookmark Archiver again to pick up where you left off.') raise SystemExit(0) elif answer == '': return files_in_source = set(os.listdir(source)) files_in_target = set(os.listdir(target)) for file in files_in_source: if file in files_in_target: to_delete = target if answer == 'a' else source run(['rm', '-Rf', os.path.join(to_delete, file)]) run(['mv', os.path.join(source, file), os.path.join(target, file)]) if not set(os.listdir(source)): run(['rm', '-Rf', source]) def fix_folder_path(archive_path, link_folder, link): """given a folder, merge it to the canonical 'correct' path for the given link object""" source = os.path.join(archive_path, link_folder) target = os.path.join(archive_path, link['timestamp']) url_in_folder = parse_url(source) if not (url_in_folder in link['base_url'] or link['base_url'] in url_in_folder): raise ValueError('The link does not match the url for this folder.') if not os.path.exists(target): # target doesn't exist so nothing needs merging, simply move A to B run(['mv', source, target]) else: # target folder exists, check for conflicting files and attempt manual merge files_in_source = set(os.listdir(source)) files_in_target = set(os.listdir(target)) conflicting_files = files_in_source & files_in_target if not conflicting_files: for file in files_in_source: run(['mv', os.path.join(source, file), os.path.join(target, file)]) if os.path.exists(source): files_in_source = set(os.listdir(source)) if files_in_source: manually_merge_folders(source, target) else: run(['rm', '-R', source]) def migrate_data(): # migrate old folder to new OUTPUT folder old_dir = os.path.join(REPO_DIR, 'html') if os.path.exists(old_dir): print('[!] WARNING: Moved old output folder "html" to new location: {}'.format(OUTPUT_DIR)) run(['mv', old_dir, OUTPUT_DIR], timeout=10) def cleanup_archive(archive_path, links): """move any incorrectly named folders to their canonical locations""" # for each folder that exists, see if we can match it up with a known good link # if we can, then merge the two folders (TODO: if not, move it to lost & found) unmatched = [] bad_folders = [] if not os.path.exists(archive_path): return for folder in os.listdir(archive_path): try: files = os.listdir(os.path.join(archive_path, folder)) except NotADirectoryError: continue if files: link = find_link(folder, links) if link is None: unmatched.append(folder) continue if folder != link['timestamp']: bad_folders.append((folder, link)) else: # delete empty folders run(['rm', '-R', os.path.join(archive_path, folder)]) if bad_folders and IS_TTY and input('[!] Cleanup archive? y/[n]: ') == 'y': print('[!] Fixing {} improperly named folders in archive...'.format(len(bad_folders))) for folder, link in bad_folders: fix_folder_path(archive_path, folder, link) elif bad_folders: print('[!] Warning! {} folders need to be merged, fix by running bookmark archiver.'.format(len(bad_folders))) if unmatched: print('[!] Warning! {} unrecognized folders in html/archive/'.format(len(unmatched))) print(' '+ '\n '.join(unmatched)) def wget_output_path(link, look_in=None): """calculate the path to the wgetted .html file, since wget may adjust some paths to be different than the base_url path. See docs on wget --adjust-extension (-E) """ # if we have it stored, always prefer the actual output path to computed one if link.get('latest', {}).get('wget'): return link['latest']['wget'] urlencode = lambda s: quote(s, encoding='utf-8', errors='replace') if link['type'] in ('PDF', 'image'): return urlencode(link['base_url']) # Since the wget algorithm to for -E (appending .html) is incredibly complex # instead of trying to emulate it here, we just look in the output folder # to see what html file wget actually created as the output wget_folder = link['base_url'].rsplit('/', 1)[0].split('/') look_in = os.path.join(ARCHIVE_DIR, link['timestamp'], *wget_folder) if look_in and os.path.exists(look_in): html_files = [ f for f in os.listdir(look_in) if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", f, re.I | re.M) ] if html_files: return urlencode(os.path.join(*wget_folder, html_files[0])) return None # If finding the actual output file didn't work, fall back to the buggy # implementation of the wget .html appending algorithm # split_url = link['url'].split('#', 1) # query = ('%3F' + link['url'].split('?', 1)[-1]) if '?' in link['url'] else '' # if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", split_url[0], re.I | re.M): # # already ends in .html # return urlencode(link['base_url']) # else: # # .html needs to be appended # without_scheme = split_url[0].split('://', 1)[-1].split('?', 1)[0] # if without_scheme.endswith('/'): # if query: # return urlencode('#'.join([without_scheme + 'index.html' + query + '.html', *split_url[1:]])) # return urlencode('#'.join([without_scheme + 'index.html', *split_url[1:]])) # else: # if query: # return urlencode('#'.join([without_scheme + '/index.html' + query + '.html', *split_url[1:]])) # elif '/' in without_scheme: # return urlencode('#'.join([without_scheme + '.html', *split_url[1:]])) # return urlencode(link['base_url'] + '/index.html') def derived_link_info(link): """extend link info with the archive urls and other derived data""" link_info = { **link, 'date': datetime.fromtimestamp(float(link['timestamp'])).strftime('%Y-%m-%d %H:%M'), 'google_favicon_url': 'https://www.google.com/s2/favicons?domain={domain}'.format(**link), 'favicon_url': 'archive/{timestamp}/favicon.ico'.format(**link), 'files_url': 'archive/{timestamp}/index.html'.format(**link), 'archive_url': 'archive/{}/{}'.format(link['timestamp'], wget_output_path(link) or 'index.html'), 'pdf_link': 'archive/{timestamp}/output.pdf'.format(**link), 'screenshot_link': 'archive/{timestamp}/screenshot.png'.format(**link), 'dom_link': 'archive/{timestamp}/output.html'.format(**link), 'archive_org_url': 'https://web.archive.org/web/{base_url}'.format(**link), } # PDF and images are handled slightly differently # wget, screenshot, & pdf urls all point to the same file if link['type'] in ('PDF', 'image'): link_info.update({ 'archive_url': 'archive/{timestamp}/{base_url}'.format(**link), 'pdf_link': 'archive/{timestamp}/{base_url}'.format(**link), 'screenshot_link': 'archive/{timestamp}/{base_url}'.format(**link), 'dom_link': 'archive/{timestamp}/{base_url}'.format(**link), 'title': '{title} ({type})'.format(**link), }) return link_info
parallel.py
from __future__ import absolute_import from __future__ import unicode_literals import logging import operator import sys from threading import Thread from docker.errors import APIError from six.moves import _thread as thread from six.moves.queue import Empty from six.moves.queue import Queue from compose.cli.signals import ShutdownException from compose.utils import get_output_stream log = logging.getLogger(__name__) STOP = object() def parallel_execute(objects, func, get_name, msg, get_deps=None): """Runs func on objects in parallel while ensuring that func is ran on object only after it is ran on all its dependencies. get_deps called on object must return a collection with its dependencies. get_name called on object must return its name. """ objects = list(objects) stream = get_output_stream(sys.stderr) writer = ParallelStreamWriter(stream, msg) for obj in objects: writer.initialize(get_name(obj)) events = parallel_execute_iter(objects, func, get_deps) errors = {} results = [] error_to_reraise = None for obj, result, exception in events: if exception is None: writer.write(get_name(obj), 'done') results.append(result) elif isinstance(exception, APIError): errors[get_name(obj)] = exception.explanation writer.write(get_name(obj), 'error') elif isinstance(exception, UpstreamError): writer.write(get_name(obj), 'error') else: errors[get_name(obj)] = exception error_to_reraise = exception for obj_name, error in errors.items(): stream.write("\nERROR: for {} {}\n".format(obj_name, error)) if error_to_reraise: raise error_to_reraise return results def _no_deps(x): return [] class State(object): """ Holds the state of a partially-complete parallel operation. state.started: objects being processed state.finished: objects which have been processed state.failed: objects which either failed or whose dependencies failed """ def __init__(self, objects): self.objects = objects self.started = set() self.finished = set() self.failed = set() def is_done(self): return len(self.finished) + len(self.failed) >= len(self.objects) def pending(self): return set(self.objects) - self.started - self.finished - self.failed def parallel_execute_iter(objects, func, get_deps): """ Runs func on objects in parallel while ensuring that func is ran on object only after it is ran on all its dependencies. Returns an iterator of tuples which look like: # if func returned normally when run on object (object, result, None) # if func raised an exception when run on object (object, None, exception) # if func raised an exception when run on one of object's dependencies (object, None, UpstreamError()) """ if get_deps is None: get_deps = _no_deps results = Queue() state = State(objects) while True: feed_queue(objects, func, get_deps, results, state) try: event = results.get(timeout=0.1) except Empty: continue # See https://github.com/docker/compose/issues/189 except thread.error: raise ShutdownException() if event is STOP: break obj, _, exception = event if exception is None: log.debug('Finished processing: {}'.format(obj)) state.finished.add(obj) else: log.debug('Failed: {}'.format(obj)) state.failed.add(obj) yield event def producer(obj, func, results): """ The entry point for a producer thread which runs func on a single object. Places a tuple on the results queue once func has either returned or raised. """ try: result = func(obj) results.put((obj, result, None)) except Exception as e: results.put((obj, None, e)) def feed_queue(objects, func, get_deps, results, state): """ Starts producer threads for any objects which are ready to be processed (i.e. they have no dependencies which haven't been successfully processed). Shortcuts any objects whose dependencies have failed and places an (object, None, UpstreamError()) tuple on the results queue. """ pending = state.pending() log.debug('Pending: {}'.format(pending)) for obj in pending: deps = get_deps(obj) if any(dep in state.failed for dep in deps): log.debug('{} has upstream errors - not processing'.format(obj)) results.put((obj, None, UpstreamError())) state.failed.add(obj) elif all( dep not in objects or dep in state.finished for dep in deps ): log.debug('Starting producer thread for {}'.format(obj)) t = Thread(target=producer, args=(obj, func, results)) t.daemon = True t.start() state.started.add(obj) if state.is_done(): results.put(STOP) class UpstreamError(Exception): pass class ParallelStreamWriter(object): """Write out messages for operations happening in parallel. Each operation has it's own line, and ANSI code characters are used to jump to the correct line, and write over the line. """ def __init__(self, stream, msg): self.stream = stream self.msg = msg self.lines = [] def initialize(self, obj_index): if self.msg is None: return self.lines.append(obj_index) self.stream.write("{} {} ... \r\n".format(self.msg, obj_index)) self.stream.flush() def write(self, obj_index, status): if self.msg is None: return position = self.lines.index(obj_index) diff = len(self.lines) - position # move up self.stream.write("%c[%dA" % (27, diff)) # erase self.stream.write("%c[2K\r" % 27) self.stream.write("{} {} ... {}\r".format(self.msg, obj_index, status)) # move back down self.stream.write("%c[%dB" % (27, diff)) self.stream.flush() def parallel_operation(containers, operation, options, message): parallel_execute( containers, operator.methodcaller(operation, **options), operator.attrgetter('name'), message) def parallel_remove(containers, options): stopped_containers = [c for c in containers if not c.is_running] parallel_operation(stopped_containers, 'remove', options, 'Removing') def parallel_start(containers, options): parallel_operation(containers, 'start', options, 'Starting') def parallel_pause(containers, options): parallel_operation(containers, 'pause', options, 'Pausing') def parallel_unpause(containers, options): parallel_operation(containers, 'unpause', options, 'Unpausing') def parallel_kill(containers, options): parallel_operation(containers, 'kill', options, 'Killing') def parallel_restart(containers, options): parallel_operation(containers, 'restart', options, 'Restarting')
log_consumer.py
"""Log consumers are responsible for fetching chia logs and propagating them to subscribers for further handling. This abstraction should provide an easy ability to switch between local file reader and fetching logs from a remote machine. The latter has not been implemented yet. Feel free to add it. """ # std import logging import subprocess from abc import ABC, abstractmethod from pathlib import Path, PurePosixPath, PureWindowsPath, PurePath from threading import Thread from typing import List, Optional, Tuple # project from src.config import check_keys, is_win_platform from src.util import OS # lib import paramiko class LogConsumerSubscriber(ABC): """Interface for log consumer subscribers (i.e. handlers)""" @abstractmethod def consume_logs(self, logs: str): """This method will be called when new logs are available""" pass class LogConsumer(ABC): """Abstract class providing common interface for log consumers""" def __init__(self): self._subscribers: List[LogConsumerSubscriber] = [] @abstractmethod def stop(self): pass def subscribe(self, subscriber: LogConsumerSubscriber): self._subscribers.append(subscriber) def _notify_subscribers(self, logs: str): for subscriber in self._subscribers: subscriber.consume_logs(logs) class FileLogConsumer(LogConsumer): """Specific implementation for a simple file consumer""" def __init__(self, log_path: Path): logging.info("Enabled file log consumer.") super().__init__() self._log_path = log_path self._is_running = True self._thread = Thread(target=self._consume_loop) self._thread.start() def stop(self): logging.info("Stopping") self._is_running = False def _consume_loop(self): expanded_user_log_path = str(self._log_path.expanduser()) logging.info(f"Consuming log file from {expanded_user_log_path}") if is_win_platform(): consume_command_args = ["powershell.exe", "get-content", expanded_user_log_path, "-tail", "1", "-wait"] else: consume_command_args = ["tail", "-F", expanded_user_log_path] f = subprocess.Popen(consume_command_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) while self._is_running: log_line = f.stdout.readline().decode(encoding="utf-8") self._notify_subscribers(log_line) class NetworkLogConsumer(LogConsumer): """Consume logs over the network""" def __init__( self, remote_log_path: PurePath, remote_user: str, remote_host: str, remote_port: int, remote_platform: OS ): logging.info("Enabled network log consumer.") super().__init__() self._remote_user = remote_user self._remote_host = remote_host self._remote_port = remote_port self._remote_log_path = remote_log_path self._remote_platform = remote_platform self._ssh_client = paramiko.client.SSHClient() self._ssh_client.load_system_host_keys() self._ssh_client.connect(hostname=self._remote_host, username=self._remote_user, port=self._remote_port) # Start thread self._is_running = True self._thread = Thread(target=self._consume_loop) self._thread.start() def stop(self): logging.info("Stopping") self._is_running = False def _consume_loop(self): logging.info( f"Consuming remote log file {self._remote_log_path}" + f" from {self._remote_host}:{self._remote_port} ({self._remote_platform})" ) if self._remote_platform == OS.WINDOWS: stdin, stdout, stderr = self._ssh_client.exec_command( f"powershell.exe Get-Content {self._remote_log_path} -Wait -Tail 1" ) else: stdin, stdout, stderr = self._ssh_client.exec_command(f"tail -F {self._remote_log_path}") while self._is_running: log_line = stdout.readline() self._notify_subscribers(log_line) def get_host_info(host: str, user: str, path: str, port: int) -> Tuple[OS, PurePath]: client = paramiko.client.SSHClient() client.load_system_host_keys() client.connect(hostname=host, username=user, port=port) stdin, stdout, stderr = client.exec_command("uname -a") fout: str = stdout.readline().lower() ferr: str = stderr.readline().lower() if "linux" in fout: return OS.LINUX, PurePosixPath(path) elif "darwin" in fout: return OS.MACOS, PurePosixPath(path) elif "not recognized" in ferr: return OS.WINDOWS, PureWindowsPath(path) else: logging.error("Found unsupported platform on remote host, assuming Linux and hope for the best.") return OS.LINUX, PurePosixPath(path) def create_log_consumer_from_config(config: dict) -> Optional[LogConsumer]: enabled_consumer = None for consumer in config.keys(): if config[consumer]["enable"]: if enabled_consumer: logging.error("Detected multiple enabled consumers. This is unsupported configuration!") return None enabled_consumer = consumer if enabled_consumer is None: logging.error("Couldn't find enabled log consumer in config.yaml") return None enabled_consumer_config = config[enabled_consumer] if enabled_consumer == "file_log_consumer": if not check_keys(required_keys=["file_path"], config=enabled_consumer_config): return None return FileLogConsumer(log_path=Path(enabled_consumer_config["file_path"])) if enabled_consumer == "network_log_consumer": if not check_keys( required_keys=["remote_file_path", "remote_host", "remote_user"], config=enabled_consumer_config, ): return None # default SSH Port : 22 remote_port = enabled_consumer_config.get("remote_port", 22) platform, path = get_host_info( enabled_consumer_config["remote_host"], enabled_consumer_config["remote_user"], enabled_consumer_config["remote_file_path"], remote_port, ) return NetworkLogConsumer( remote_log_path=path, remote_host=enabled_consumer_config["remote_host"], remote_user=enabled_consumer_config["remote_user"], remote_port=remote_port, remote_platform=platform, ) logging.error("Unhandled consumer type") return None
app.py
# encoding: utf-8 ''' A REST API for Salt =================== .. versionadded:: 2014.7.0 .. py:currentmodule:: salt.netapi.rest_cherrypy.app :depends: - CherryPy Python module :optdepends: - ws4py Python module for websockets support. :configuration: All authentication is done through Salt's :ref:`external auth <acl-eauth>` system which requires additional configuration not described here. Example production-ready configuration; add to the Salt master config file: .. code-block:: yaml rest_cherrypy: port: 8000 ssl_crt: /etc/pki/tls/certs/localhost.crt ssl_key: /etc/pki/tls/certs/localhost.key Using only a secure HTTPS connection is strongly recommended since Salt authentication credentials will be sent over the wire. A self-signed certificate can be generated using the :py:func:`~salt.modules.tls.create_self_signed_cert` function in Salt (note the dependencies for this module). .. code-block:: bash salt-call tls.create_self_signed_cert All available configuration options are detailed below. These settings configure the CherryPy HTTP server and do not apply when using an external server such as Apache or Nginx. port **Required** The port for the webserver to listen on. host : ``0.0.0.0`` The socket interface for the HTTP server to listen on. debug : ``False`` Starts the web server in development mode. It will reload itself when the underlying code is changed and will output more debugging info. ssl_crt The path to a SSL certificate. (See below) ssl_key The path to the private key for your SSL certificate. (See below) disable_ssl A flag to disable SSL. Warning: your Salt authentication credentials will be sent in the clear! webhook_disable_auth : False The :py:class:`Webhook` URL requires authentication by default but external services cannot always be configured to send authentication. See the Webhook documentation for suggestions on securing this interface. webhook_url : /hook Configure the URL endpoint for the :py:class:`Webhook` entry point. thread_pool : ``100`` The number of worker threads to start up in the pool. socket_queue_size : ``30`` Specify the maximum number of HTTP connections to queue. expire_responses : True Whether to check for and kill HTTP responses that have exceeded the default timeout. max_request_body_size : ``1048576`` Maximum size for the HTTP request body. collect_stats : False Collect and report statistics about the CherryPy server Reports are available via the :py:class:`Stats` URL. static A filesystem path to static HTML/JavaScript/CSS/image assets. static_path : ``/static`` The URL prefix to use when serving static assets out of the directory specified in the ``static`` setting. app A filesystem path to an HTML file that will be served as a static file. This is useful for bootstrapping a single-page JavaScript app. app_path : ``/app`` The URL prefix to use for serving the HTML file specified in the ``app`` setting. This should be a simple name containing no slashes. Any path information after the specified path is ignored; this is useful for apps that utilize the HTML5 history API. root_prefix : ``/`` A URL path to the main entry point for the application. This is useful for serving multiple applications from the same URL. .. _rest_cherrypy-auth: Authentication -------------- Authentication is performed by passing a session token with each request. Tokens are generated via the :py:class:`Login` URL. The token may be sent in one of two ways: * Include a custom header named :mailheader:`X-Auth-Token`. * Sent via a cookie. This option is a convenience for HTTP clients that automatically handle cookie support (such as browsers). .. seealso:: You can bypass the session handling via the :py:class:`Run` URL. Usage ----- Commands are sent to a running Salt master via this module by sending HTTP requests to the URLs detailed below. .. admonition:: Content negotiation This REST interface is flexible in what data formats it will accept as well as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded). * Specify the format of data in the request body by including the :mailheader:`Content-Type` header. * Specify the desired data format for the response body with the :mailheader:`Accept` header. Data sent in :http:method:`post` and :http:method:`put` requests must be in the format of a list of lowstate dictionaries. This allows multiple commands to be executed in a single HTTP request. .. glossary:: lowstate A dictionary containing various keys that instruct Salt which command to run, where that command lives, any parameters for that command, any authentication credentials, what returner to use, etc. Salt uses the lowstate data format internally in many places to pass command data between functions. Salt also uses lowstate for the :ref:`LocalClient() <python-api>` Python API interface. The following example (in JSON format) causes Salt to execute two commands:: [{ "client": "local", "tgt": "*", "fun": "test.fib", "arg": ["10"] }, { "client": "runner", "fun": "jobs.lookup_jid", "jid": "20130603122505459265" }] .. admonition:: x-www-form-urlencoded Sending JSON or YAML in the request body is simple and most flexible, however sending data in urlencoded format is also supported with the caveats below. It is the default format for HTML forms, many JavaScript libraries, and the :command:`curl` command. For example, the equivalent to running ``salt '*' test.ping`` is sending ``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body. Caveats: * Only a single command may be sent per HTTP request. * Repeating the ``arg`` parameter multiple times will cause those parameters to be combined into a single list. Note, some popular frameworks and languages (notably jQuery, PHP, and Ruby on Rails) will automatically append empty brackets onto repeated parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``, ``arg[]=two``. This is not supported; send JSON or YAML instead. .. |req_token| replace:: a session token from :py:class:`~Login`. .. |req_accept| replace:: the desired response format. .. |req_ct| replace:: the format of the request body. .. |res_ct| replace:: the format of the response body; depends on the :mailheader:`Accept` request header. .. |200| replace:: success .. |401| replace:: authentication required .. |406| replace:: requested Content-Type not available ''' # We need a custom pylintrc here... # pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613 from __future__ import absolute_import # Import Python libs import collections import itertools import functools import logging import json import StringIO import tarfile import time from multiprocessing import Process, Pipe # Import third-party libs import cherrypy from cherrypy.lib import cpstats import yaml # Import Salt libs import salt import salt.auth import salt.utils.event # Import salt-api libs import salt.netapi logger = logging.getLogger(__name__) # Imports related to websocket try: from .tools import websockets from . import event_processor HAS_WEBSOCKETS = True except ImportError: websockets = type('websockets', (object,), { 'SynchronizingWebsocket': None, }) HAS_WEBSOCKETS = False def salt_token_tool(): ''' If the custom authentication header is supplied, put it in the cookie dict so the rest of the session-based auth works as intended ''' x_auth = cherrypy.request.headers.get('X-Auth-Token', None) # X-Auth-Token header trumps session cookie if x_auth: cherrypy.request.cookie['session_id'] = x_auth def salt_ip_verify_tool(): ''' If there is a list of restricted IPs, verify current client is coming from one of those IPs. ''' # This is overly cumbersome and crude, # But, it's also safe... ish... salt_config = cherrypy.config.get('saltopts', None) if salt_config: cherrypy_conf = salt_config.get('rest_cherrypy', None) if cherrypy_conf: auth_ip_list = cherrypy_conf.get('authorized_ips', None) if auth_ip_list: logger.debug("Found IP list: {0}".format(auth_ip_list)) rem_ip = cherrypy.request.headers.get('Remote-Addr', None) logger.debug("Request from IP: {0}".format(rem_ip)) if rem_ip not in auth_ip_list: logger.error("Blocked IP: {0}".format(rem_ip)) cherrypy.response.status = 403 return { 'status': cherrypy.response.status, 'return': "Bad IP", } def salt_auth_tool(): ''' Redirect all unauthenticated requests to the login page ''' # Redirect to the login page if the session hasn't been authed if 'token' not in cherrypy.session: # pylint: disable=W8601 raise cherrypy.HTTPError(401) # Session is authenticated; inform caches cherrypy.response.headers['Cache-Control'] = 'private' def cors_handler(*args, **kwargs): ''' Check a CORS preflight request and return a valid response ''' req_head = cherrypy.request.headers resp_head = cherrypy.response.headers ac_method = req_head.get('Access-Control-Request-Method', None) allowed_methods = ['GET', 'POST'] allowed_headers = ['X-Auth-Token', 'Content-Type'] if ac_method and ac_method in allowed_methods: resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods) resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers) resp_head['Connection'] = 'keep-alive' resp_head['Access-Control-Max-Age'] = '1400' return {} def cors_tool(): ''' Handle both simple and complex CORS requests Add CORS headers to each response. If the request is a CORS preflight request swap out the default handler with a simple, single-purpose handler that verifies the request and provides a valid CORS response. ''' req_head = cherrypy.request.headers resp_head = cherrypy.response.headers # Always set response headers necessary for 'simple' CORS. resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*') resp_head['Access-Control-Expose-Headers'] = 'GET, POST' resp_head['Access-Control-Allow-Credentials'] = 'true' # If this is a non-simple CORS preflight request swap out the handler. if cherrypy.request.method == 'OPTIONS': cherrypy.serving.request.handler = cors_handler # Be conservative in what you send # Maps Content-Type to serialization functions; this is a tuple of tuples to # preserve order of preference. ct_out_map = ( ('application/json', json.dumps), ('application/x-yaml', functools.partial( yaml.safe_dump, default_flow_style=False)), ) def hypermedia_handler(*args, **kwargs): ''' Determine the best output format based on the Accept header, execute the regular handler, and transform the output to the request content type (even if it's an error). :param args: Pass args through to the main handler :param kwargs: Pass kwargs through to the main handler ''' # Execute the real handler. Handle or pass-through any errors we know how # to handle (auth & HTTP errors). Reformat any errors we don't know how to # handle as a data structure. try: cherrypy.response.processors = dict(ct_out_map) ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs) except salt.exceptions.EauthAuthenticationError: raise cherrypy.HTTPError(401) except cherrypy.CherryPyException: raise except Exception as exc: import traceback logger.debug("Error while processing request for: %s", cherrypy.request.path_info, exc_info=True) cherrypy.response.status = 500 ret = { 'status': cherrypy.response.status, 'return': '{0}'.format(traceback.format_exc(exc)) if cherrypy.config['debug'] else "An unexpected error occurred"} # Raises 406 if requested content-type is not supported best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map]) # Transform the output from the handler into the requested output format cherrypy.response.headers['Content-Type'] = best out = cherrypy.response.processors[best] return out(ret) def hypermedia_out(): ''' Determine the best handler for the requested content type Wrap the normal handler and transform the output from that handler into the requested content type ''' request = cherrypy.serving.request request._hypermedia_inner_handler = request.handler request.handler = hypermedia_handler @functools.wraps def process_request_body(fn): ''' A decorator to skip a processor function if process_request_body is False ''' def wrapped(*args, **kwargs): # pylint: disable=C0111 if cherrypy.request.process_request_body is not False: fn(*args, **kwargs) return wrapped def urlencoded_processor(entity): ''' Accept x-www-form-urlencoded data (run through CherryPy's formatter) and reformat it into a Low State data structure. Since we can't easily represent complicated data structures with key-value pairs, any more complicated requirements (e.g. compound commands) must instead be delivered via JSON or YAML. For example:: .. code-block:: bash curl -si localhost:8000 -d client=local -d tgt='*' \\ -d fun='test.kwarg' -d arg='one=1' -d arg='two=2' :param entity: raw POST data ''' # First call out to CherryPy's default processor cherrypy._cpreqbody.process_urlencoded(entity) cherrypy.serving.request.unserialized_data = entity.params cherrypy.serving.request.raw_body = '' @process_request_body def json_processor(entity): ''' Unserialize raw POST data in JSON format to a Python data structure. :param entity: raw POST data ''' body = entity.fp.read() try: cherrypy.serving.request.unserialized_data = json.loads(body) except ValueError: raise cherrypy.HTTPError(400, 'Invalid JSON document') cherrypy.serving.request.raw_body = body @process_request_body def yaml_processor(entity): ''' Unserialize raw POST data in YAML format to a Python data structure. :param entity: raw POST data ''' body = entity.fp.read() try: cherrypy.serving.request.unserialized_data = yaml.safe_load(body) except ValueError: raise cherrypy.HTTPError(400, 'Invalid YAML document') cherrypy.serving.request.raw_body = body @process_request_body def text_processor(entity): ''' Attempt to unserialize plain text as JSON Some large services still send JSON with a text/plain Content-Type. Those services are bad and should feel bad. :param entity: raw POST data ''' body = entity.fp.read() try: cherrypy.serving.request.unserialized_data = json.loads(body) except ValueError: cherrypy.serving.request.unserialized_data = body cherrypy.serving.request.raw_body = body def hypermedia_in(): ''' Unserialize POST/PUT data of a specified Content-Type. The following custom processors all are intended to format Low State data and will place that data structure into the request object. :raises HTTPError: if the request contains a Content-Type that we do not have a processor for ''' # Be liberal in what you accept ct_in_map = { 'application/x-www-form-urlencoded': urlencoded_processor, 'application/json': json_processor, 'application/x-yaml': yaml_processor, 'text/yaml': yaml_processor, 'text/plain': text_processor, } # Do not process the body for POST requests that have specified no content # or have not specified Content-Length if (cherrypy.request.method.upper() == 'POST' and cherrypy.request.headers.get('Content-Length', '0') == '0'): cherrypy.request.process_request_body = False cherrypy.request.body.processors.clear() cherrypy.request.body.default_proc = cherrypy.HTTPError( 406, 'Content type not supported') cherrypy.request.body.processors = ct_in_map def lowdata_fmt(): ''' Validate and format lowdata from incoming unserialized request data This tool requires that the hypermedia_in tool has already been run. ''' if cherrypy.request.method.upper() != 'POST': return data = cherrypy.request.unserialized_data # if the data was sent as urlencoded, we need to make it a list. # this is a very forgiving implementation as different clients set different # headers for form encoded data (including charset or something similar) if not isinstance(data, list): # Make the 'arg' param a list if not already if 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] # Finally, make a Low State and put it in request cherrypy.request.lowstate = [data] else: cherrypy.serving.request.lowstate = data cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource', salt_token_tool, priority=55) cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body', salt_auth_tool, priority=60) cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body', hypermedia_in) cherrypy.tools.cors_tool = cherrypy.Tool('before_handler', cors_tool, priority=30) cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler', lowdata_fmt, priority=40) cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler', hypermedia_out) cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler', salt_ip_verify_tool) ############################################################################### class LowDataAdapter(object): ''' The primary entry point to Salt's REST API ''' exposed = True _cp_config = { 'tools.sessions.on': True, 'tools.sessions.timeout': 60 * 10, # 10 hours # 'tools.autovary.on': True, 'tools.hypermedia_out.on': True, 'tools.hypermedia_in.on': True, 'tools.lowdata_fmt.on': True, 'tools.salt_ip_verify.on': True, } def __init__(self): self.opts = cherrypy.config['saltopts'] self.api = salt.netapi.NetapiClient(self.opts) def exec_lowstate(self, client=None, token=None): ''' Pull a Low State data structure from request and execute the low-data chunks through Salt. The low-data chunks will be updated to include the authorization token for the current session. ''' lowstate = cherrypy.request.lowstate # Release the session lock before executing any potentially # long-running Salt commands. This allows different threads to execute # Salt commands concurrently without blocking. if cherrypy.request.config.get('tools.sessions.on', False): cherrypy.session.release_lock() # if the lowstate loaded isn't a list, lets notify the client if not isinstance(lowstate, list): raise cherrypy.HTTPError(400, 'Lowstates must be a list') # Make any requested additions or modifications to each lowstate, then # execute each one and yield the result. for chunk in lowstate: if token: chunk['token'] = token if client: chunk['client'] = client # Make any 'arg' params a list if not already. # This is largely to fix a deficiency in the urlencoded format. if 'arg' in chunk and not isinstance(chunk['arg'], list): chunk['arg'] = [chunk['arg']] ret = self.api.run(chunk) # Sometimes Salt gives us a return and sometimes an iterator if isinstance(ret, collections.Iterator): for i in ret: yield i else: yield ret def GET(self): ''' An explanation of the API with links of where to go next .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: http GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Type: application/json ''' import inspect # Grab all available client interfaces clients = [name for name, _ in inspect.getmembers(salt.netapi.NetapiClient, predicate=inspect.ismethod) if not name.startswith('__')] clients.remove('run') # run method calls client interfaces return { 'return': "Welcome", 'clients': clients, } @cherrypy.tools.salt_token() @cherrypy.tools.salt_auth() def POST(self, **kwargs): ''' Send one or more Salt commands in the request body .. http:post:: / :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. **Example request:** .. code-block:: bash curl -si https://localhost:8000 \\ -H "Accept: application/x-yaml" \\ -H "X-Auth-Token: d40d1e1e" \\ -d client=local \\ -d tgt='*' \\ -d fun='test.ping' \\ -d arg .. code-block:: http POST / HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml X-Auth-Token: d40d1e1e Content-Length: 36 Content-Type: application/x-www-form-urlencoded fun=test.ping&arg&client=local&tgt=* **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 200 Allow: GET, HEAD, POST Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true ''' return { 'return': list(self.exec_lowstate( token=cherrypy.session.get('token'))) } class Minions(LowDataAdapter): ''' Convenience URLs for working with minions ''' _cp_config = dict(LowDataAdapter._cp_config, **{ 'tools.salt_token.on': True, 'tools.salt_auth.on': True, }) def GET(self, mid=None): ''' A convenience URL for getting lists of minions or getting minion details .. http:get:: /minions/(mid) :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/minions/ms-3 .. code-block:: http GET /minions/ms-3 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 129005 Content-Type: application/x-yaml return: - ms-3: grains.items: ... ''' cherrypy.request.lowstate = [{ 'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items', }] return { 'return': list(self.exec_lowstate( token=cherrypy.session.get('token'))), } def POST(self, **kwargs): ''' Start an execution command and immediately return the job id .. http:post:: /minions :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. The ``client`` option will be set to :py:meth:`~salt.client.LocalClient.local_async`. **Example request:** .. code-block:: bash curl -sSi localhost:8000/minions \\ -H "Accept: application/x-yaml" \\ -d tgt='*' \\ -d fun='status.diskusage' .. code-block:: http POST /minions HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 26 Content-Type: application/x-www-form-urlencoded tgt=*&fun=status.diskusage **Example response:** .. code-block:: http HTTP/1.1 202 Accepted Content-Length: 86 Content-Type: application/x-yaml return: - jid: '20130603122505459265' minions: [ms-4, ms-3, ms-2, ms-1, ms-0] _links: jobs: - href: /jobs/20130603122505459265 ''' job_data = list(self.exec_lowstate(client='local_async', token=cherrypy.session.get('token'))) cherrypy.response.status = 202 return { 'return': job_data, '_links': { 'jobs': [{'href': '/jobs/{0}'.format(i['jid'])} for i in job_data if i], }, } class Jobs(LowDataAdapter): _cp_config = dict(LowDataAdapter._cp_config, **{ 'tools.salt_token.on': True, 'tools.salt_auth.on': True, }) def GET(self, jid=None): ''' A convenience URL for getting lists of previously run jobs or getting the return from a single job .. http:get:: /jobs/(jid) List jobs or show a single job from the job cache. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/jobs .. code-block:: http GET /jobs HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 165 Content-Type: application/x-yaml return: - '20121130104633606931': Arguments: - '3' Function: test.fib Start Time: 2012, Nov 30 10:46:33.606931 Target: jerry Target-type: glob **Example request:** .. code-block:: bash curl -i localhost:8000/jobs/20121130104633606931 .. code-block:: http GET /jobs/20121130104633606931 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml info: - Arguments: - '3' Function: test.fib Minions: - jerry Start Time: 2012, Nov 30 10:46:33.606931 Target: '*' Target-type: glob User: saltdev jid: '20121130104633606931' return: - jerry: - - 0 - 1 - 1 - 2 - 6.9141387939453125e-06 ''' lowstate = [{ 'client': 'runner', 'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs', 'jid': jid, }] if jid: lowstate.append({ 'client': 'runner', 'fun': 'jobs.list_job', 'jid': jid, }) cherrypy.request.lowstate = lowstate job_ret_info = list(self.exec_lowstate( token=cherrypy.session.get('token'))) ret = {} if jid: job_ret, job_info = job_ret_info ret['info'] = [job_info] else: job_ret = job_ret_info[0] ret['return'] = [job_ret] return ret class Keys(LowDataAdapter): def GET(self, mid=None): ''' A convenience URL for showing the list of minion keys or detail on a specific key .. http:get:: /keys/(mid) List all keys or show a specific key :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/keys .. code-block:: http GET /keys HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 165 Content-Type: application/x-yaml return: local: - master.pem - master.pub minions: - jerry minions_pre: [] minions_rejected: [] **Example request:** .. code-block:: bash curl -i localhost:8000/keys/jerry .. code-block:: http GET /keys/jerry HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml return: minions: jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b ''' self._cp_config['tools.salt_token.on'] = True if mid: lowstate = [{ 'client': 'wheel', 'fun': 'key.finger', 'match': mid, }] else: lowstate = [{ 'client': 'wheel', 'fun': 'key.list_all', }] cherrypy.request.lowstate = lowstate result = self.exec_lowstate(token=cherrypy.session.get('token')) return {'return': next(result, {}).get('data', {}).get('return', {})} def POST(self, mid, keysize=None, force=None, **kwargs): r''' Easily generate keys for a minion and auto-accept the new key Example partial kickstart script to bootstrap a new minion: .. code-block:: text %post mkdir -p /etc/salt/pki/minion curl -sS http://localhost:8000/keys \ -d mid=jerry \ -d username=kickstart \ -d password=kickstart \ -d eauth=pam \ | tar -C /etc/salt/pki/minion -xf - mkdir -p /etc/salt/minion.d printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf %end .. http:post:: /keys Generate a public and private key and return both as a tarball Authentication credentials must be passed in the request. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -sS http://localhost:8000/keys \ -d mid=jerry \ -d username=kickstart \ -d password=kickstart \ -d eauth=pam \ -o jerry-salt-keys.tar .. code-block:: http POST /keys HTTP/1.1 Host: localhost:8000 **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 10240 Content-Disposition: attachment; filename="saltkeys-jerry.tar" Content-Type: application/x-tar jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000 ''' self._cp_config['tools.hypermedia_out.on'] = False self._cp_config['tools.sessions.on'] = False lowstate = [{ 'client': 'wheel', 'fun': 'key.gen_accept', 'id_': mid, }] if keysize: lowstate[0]['keysize'] = keysize if force: lowstate[0]['force'] = force lowstate[0].update(kwargs) cherrypy.request.lowstate = lowstate result = self.exec_lowstate() ret = next(result, {}).get('data', {}).get('return', {}) pub_key = ret.get('pub', '') pub_key_file = tarfile.TarInfo('minion.pub') pub_key_file.size = len(pub_key) priv_key = ret.get('priv', '') priv_key_file = tarfile.TarInfo('minion.pem') priv_key_file.size = len(priv_key) fileobj = StringIO.StringIO() tarball = tarfile.open(fileobj=fileobj, mode='w') tarball.addfile(pub_key_file, StringIO.StringIO(pub_key)) tarball.addfile(priv_key_file, StringIO.StringIO(priv_key)) tarball.close() headers = cherrypy.response.headers headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(mid) headers['Content-Type'] = 'application/x-tar' headers['Content-Length'] = fileobj.len headers['Cache-Control'] = 'no-cache' fileobj.seek(0) return fileobj class Login(LowDataAdapter): ''' Log in to receive a session token :ref:`Authentication information <rest_cherrypy-auth>`. ''' def __init__(self, *args, **kwargs): super(Login, self).__init__(*args, **kwargs) self.auth = salt.auth.Resolver(self.opts) def GET(self): ''' Present the login interface .. http:get:: /login An explanation of how to log in. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/login .. code-block:: http GET /login HTTP/1.1 Host: localhost:8000 Accept: text/html **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Type: text/html ''' cherrypy.response.headers['WWW-Authenticate'] = 'Session' return { 'status': cherrypy.response.status, 'return': "Please log in", } def POST(self, **kwargs): ''' :ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system .. http:post:: /login :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :form eauth: the eauth backend configured for the user :form username: username :form password: password :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -si localhost:8000/login \\ -H "Accept: application/json" \\ -d username='saltuser' \\ -d password='saltpass' \\ -d eauth='pam' .. code-block:: http POST / HTTP/1.1 Host: localhost:8000 Content-Length: 42 Content-Type: application/x-www-form-urlencoded Accept: application/json username=saltuser&password=saltpass&eauth=pam **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Type: application/json Content-Length: 206 X-Auth-Token: 6d1b722e Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/ {"return": { "token": "6d1b722e", "start": 1363805943.776223, "expire": 1363849143.776224, "user": "saltuser", "eauth": "pam", "perms": [ "grains.*", "status.*", "sys.*", "test.*" ] }} ''' # the urlencoded_processor will wrap this in a list if isinstance(cherrypy.serving.request.lowstate, list): creds = cherrypy.serving.request.lowstate[0] else: creds = cherrypy.serving.request.lowstate token = self.auth.mk_token(creds) if 'token' not in token: raise cherrypy.HTTPError(401, 'Could not authenticate using provided credentials') cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id cherrypy.session['token'] = token['token'] cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60 # Grab eauth config for the current backend for the current user try: eauth = self.opts.get('external_auth', {}).get(token['eauth'], {}) perms = eauth.get(token['name'], eauth.get('*')) if perms is None: raise ValueError("Eauth permission list not found.") except (AttributeError, IndexError, KeyError, ValueError): logger.debug("Configuration for external_auth malformed for " "eauth '{0}', and user '{1}'." .format(token.get('eauth'), token.get('name')), exc_info=True) raise cherrypy.HTTPError(500, 'Configuration for external_auth could not be read.') return {'return': [{ 'token': cherrypy.session.id, 'expire': token['expire'], 'start': token['start'], 'user': token['name'], 'eauth': token['eauth'], 'perms': perms, }]} class Logout(LowDataAdapter): ''' Class to remove or invalidate sessions ''' _cp_config = dict(LowDataAdapter._cp_config, **{ 'tools.salt_token.on': True, 'tools.salt_auth.on': True, 'tools.lowdata_fmt.on': False, }) def POST(self): ''' Destroy the currently active session and expire the session cookie ''' cherrypy.lib.sessions.expire() # set client-side to expire cherrypy.session.regenerate() # replace server-side with new return {'return': "Your token has been cleared"} class Run(LowDataAdapter): ''' Class to run commands without normal session handling ''' _cp_config = dict(LowDataAdapter._cp_config, **{ 'tools.sessions.on': False, }) def POST(self, **kwargs): ''' Run commands bypassing the :ref:`normal session handling <rest_cherrypy-auth>` .. http:post:: /run This entry point is primarily for "one-off" commands. Each request must pass full Salt authentication credentials. Otherwise this URL is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`. :term:`lowstate` data describing Salt commands must be sent in the request body. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -sS localhost:8000/run \\ -H 'Accept: application/x-yaml' \\ -d client='local' \\ -d tgt='*' \\ -d fun='test.ping' \\ -d username='saltdev' \\ -d password='saltdev' \\ -d eauth='pam' .. code-block:: http POST /run HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 75 Content-Type: application/x-www-form-urlencoded client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true The /run enpoint can also be used to issue commands using the salt-ssh subsystem. When using salt-ssh, eauth credentials should not be supplied. Instad, authentication should be handled by the SSH layer itself. The use of the salt-ssh client does not require a salt master to be running. Instead, only a roster file must be present in the salt configuration directory. All SSH client requests are synchronous. ** Example SSH client request:** .. code-block:: bash curl -sS localhost:8000/run \\ -H 'Accept: application/x-yaml' \\ -d client='ssh' \\ -d tgt='*' \\ -d fun='test.ping' .. code-block:: http POST /run HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 75 Content-Type: application/x-www-form-urlencoded client=ssh&tgt=*&fun=test.ping **Example SSH response:** .. code-block:: http return: - silver: fun: test.ping fun_args: [] id: silver jid: '20141203103525666185' retcode: 0 return: true success: true ''' return { 'return': list(self.exec_lowstate()), } class Events(object): ''' Expose the Salt event bus The event bus on the Salt master exposes a large variety of things, notably when executions are started on the master and also when minions ultimately return their results. This URL provides a real-time window into a running Salt infrastructure. .. seealso:: :ref:`events` ''' exposed = True _cp_config = dict(LowDataAdapter._cp_config, **{ 'response.stream': True, 'tools.encode.encoding': 'utf-8', # Auth handled manually below 'tools.salt_token.on': True, 'tools.salt_auth.on': False, 'tools.hypermedia_in.on': False, 'tools.hypermedia_out.on': False, }) def __init__(self): self.opts = cherrypy.config['saltopts'] self.auth = salt.auth.LoadAuth(self.opts) self.resolver = salt.auth.Resolver(self.opts) def _is_valid_salt_token(self, salt_token): ''' Check if this is a valid salt master token More on salt master token generation can be found at http://docs.saltstack.com/en/latest/topics/eauth/index.html#tokens Returns True if this token is a valid salt token False otherwise ''' if salt_token and self.resolver.get_token(salt_token): return True return False def _is_valid_salt_api_token(self, salt_api_token): ''' Check if this is a valid salt api token Salt API tokens are generated on Login Returns True if this token is a valid salt api token False otherwise ''' if not salt_api_token: return False # Pulling the session token from an URL param is a workaround for # browsers not supporting CORS in the EventSource API. if salt_api_token: orig_sesion, _ = cherrypy.session.cache.get(salt_api_token, ({}, None)) salt_token = orig_sesion.get('token') else: salt_token = cherrypy.session.get('token') # Manually verify the token if salt_token and self.auth.get_tok(salt_token): return True return False def GET(self, token=None, salt_token=None): r''' An HTTP stream of the Salt master event bus This stream is formatted per the Server Sent Events (SSE) spec. Each event is formatted as JSON. .. http:get:: /events :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035 Or you can pass the token sent by cherrypy's `/login` endpoint (these are different tokens). :ref:`salt-token-generation` describes the process of obtaining a Salt token. .. code-block:: bash curl -NsS localhost:8000/events?token=308650dbd728d8405a32ac9c2b2c1ed7705222bc .. code-block:: http GET /events HTTP/1.1 Host: localhost:8000 **Example response:** .. code-block:: http HTTP/1.1 200 OK Connection: keep-alive Cache-Control: no-cache Content-Type: text/event-stream;charset=utf-8 retry: 400 data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}} data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}} The event stream can be easily consumed via JavaScript: .. code-block:: javascript var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75'); // Salt token works as well! // var source = new EventSource('/events?salt_token=307427657b16a70aed360a46c5370035'); source.onopen = function() { console.debug('opening') }; source.onerror = function(e) { console.debug('error!', e) }; source.onmessage = function(e) { console.debug(e.data) }; Or using CORS: .. code-block:: javascript var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true}); // You can supply the salt token as well var source = new EventSource('/events?salt_token=307427657b16a70aed360a46c5370035', {withCredentials: true}); Some browser clients lack CORS support for the ``EventSource()`` API. Such clients may instead pass the :mailheader:`X-Auth-Token` value as an URL parameter: .. code-block:: bash curl -NsS localhost:8000/events/6d1b722e It is also possible to consume the stream via the shell. Records are separated by blank lines; the ``data:`` and ``tag:`` prefixes will need to be removed manually before attempting to unserialize the JSON. curl's ``-N`` flag turns off input buffering which is required to process the stream incrementally. Here is a basic example of printing each event as it comes in: .. code-block:: bash curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035 |\ while IFS= read -r line ; do echo $line done Here is an example of using awk to filter events based on tag: .. code-block:: bash curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035 |\ awk ' BEGIN { RS=""; FS="\\n" } $1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 } ' tag: salt/job/20140112010149808995/new data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}} tag: 20140112010149808995 data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}} ''' if (not (self._is_valid_salt_api_token(token) or self._is_valid_salt_token(salt_token))): raise cherrypy.HTTPError(401) # Release the session lock before starting the long-running response cherrypy.session.release_lock() cherrypy.response.headers['Content-Type'] = 'text/event-stream' cherrypy.response.headers['Cache-Control'] = 'no-cache' cherrypy.response.headers['Connection'] = 'keep-alive' def listen(): ''' An iterator to yield Salt events ''' event = salt.utils.event.get_event( 'master', sock_dir=self.opts['sock_dir'], transport=self.opts['transport'], opts=self.opts) stream = event.iter_events(full=True) yield u'retry: {0}\n'.format(400) while True: data = next(stream) yield u'tag: {0}\n'.format(data.get('tag', '')) yield u'data: {0}\n\n'.format(json.dumps(data)) return listen() class WebsocketEndpoint(object): ''' Open a WebSocket connection to Salt's event bus The event bus on the Salt master exposes a large variety of things, notably when executions are started on the master and also when minions ultimately return their results. This URL provides a real-time window into a running Salt infrastructure. Uses websocket as the transport mechanism. .. seealso:: :ref:`events` ''' exposed = True _cp_config = dict(LowDataAdapter._cp_config, **{ 'response.stream': True, 'tools.encode.encoding': 'utf-8', # Auth handled manually below 'tools.salt_token.on': True, 'tools.salt_auth.on': False, 'tools.hypermedia_in.on': False, 'tools.hypermedia_out.on': False, 'tools.websocket.on': True, 'tools.websocket.handler_cls': websockets.SynchronizingWebsocket, }) def __init__(self): self.opts = cherrypy.config['saltopts'] self.auth = salt.auth.LoadAuth(self.opts) def GET(self, token=None, **kwargs): ''' Return a websocket connection of Salt's event stream .. http:get:: /ws/(token) :query format_events: The event stream will undergo server-side formatting if the ``format_events`` URL parameter is included in the request. This can be useful to avoid formatting on the client-side: .. code-block:: bash curl -NsS <...snip...> localhost:8000/ws?format_events :reqheader X-Auth-Token: an authentication token from :py:class:`~Login`. :status 101: switching to the websockets protocol :status 401: |401| :status 406: |406| **Example request:** curl -NsS \\ -H 'X-Auth-Token: ffedf49d' \\ -H 'Host: localhost:8000' \\ -H 'Connection: Upgrade' \\ -H 'Upgrade: websocket' \\ -H 'Origin: http://localhost:8000' \\ -H 'Sec-WebSocket-Version: 13' \\ -H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\ localhost:8000/ws .. code-block:: http GET /ws HTTP/1.1 Connection: Upgrade Upgrade: websocket Host: localhost:8000 Origin: http://localhost:8000 Sec-WebSocket-Version: 13 Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA== X-Auth-Token: ffedf49d **Example response**: .. code-block:: http HTTP/1.1 101 Switching Protocols Upgrade: websocket Connection: Upgrade Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE= Sec-WebSocket-Version: 13 An authentication token **may optionally** be passed as part of the URL for browsers that cannot be configured to send the authentication header or cookie: .. code-block:: bash curl -NsS <...snip...> localhost:8000/ws/ffedf49d The event stream can be easily consumed via JavaScript: .. code-block:: javascript // Note, you must be authenticated! var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a'); source.onerror = function(e) { console.debug('error!', e); }; source.onmessage = function(e) { console.debug(e.data); }; source.send('websocket client ready') source.close(); Or via Python, using the Python module `websocket-client <https://pypi.python.org/pypi/websocket-client/>`_ for example. .. code-block:: python # Note, you must be authenticated! from websocket import create_connection ws = create_connection('ws://localhost:8000/ws/d0ce6c1a') ws.send('websocket client ready') # Look at https://pypi.python.org/pypi/websocket-client/ for more # examples. while listening_to_events: print ws.recv() ws.close() Above examples show how to establish a websocket connection to Salt and activating real time updates from Salt's event stream by signaling ``websocket client ready``. ''' # Pulling the session token from an URL param is a workaround for # browsers not supporting CORS in the EventSource API. if token: orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None)) salt_token = orig_sesion.get('token') else: salt_token = cherrypy.session.get('token') # Manually verify the token if not salt_token or not self.auth.get_tok(salt_token): raise cherrypy.HTTPError(401) # Release the session lock before starting the long-running response cherrypy.session.release_lock() # A handler is the server side end of the websocket connection. Each # request spawns a new instance of this handler handler = cherrypy.request.ws_handler def event_stream(handler, pipe): ''' An iterator to return Salt events (and optionally format them) ''' # blocks until send is called on the parent end of this pipe. pipe.recv() event = salt.utils.event.get_event( 'master', sock_dir=self.opts['sock_dir'], transport=self.opts['transport'], opts=self.opts) stream = event.iter_events(full=True) SaltInfo = event_processor.SaltInfo(handler) while True: data = next(stream) if data: try: # work around try to decode catch unicode errors if 'format_events' in kwargs: SaltInfo.process(data, salt_token, self.opts) else: handler.send('data: {0}\n\n'.format( json.dumps(data)), False) except UnicodeDecodeError: logger.error( "Error: Salt event has non UTF-8 data:\n{0}" .format(data)) time.sleep(0.1) parent_pipe, child_pipe = Pipe() handler.pipe = parent_pipe handler.opts = self.opts # Process to handle async push to a client. # Each GET request causes a process to be kicked off. proc = Process(target=event_stream, args=(handler, child_pipe)) proc.start() class Webhook(object): ''' A generic web hook entry point that fires an event on Salt's event bus External services can POST data to this URL to trigger an event in Salt. For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks. .. note:: Be mindful of security Salt's Reactor can run any code. A Reactor SLS that responds to a hook event is responsible for validating that the event came from a trusted source and contains valid data. **This is a generic interface and securing it is up to you!** This URL requires authentication however not all external services can be configured to authenticate. For this reason authentication can be selectively disabled for this URL. Follow best practices -- always use SSL, pass a secret key, configure the firewall to only allow traffic from a known source, etc. The event data is taken from the request body. The :mailheader:`Content-Type` header is respected for the payload. The event tag is prefixed with ``salt/netapi/hook`` and the URL path is appended to the end. For example, a ``POST`` request sent to ``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag ``salt/netapi/hook/mycompany/myapp/mydata``. The following is an example ``.travis.yml`` file to send notifications to Salt of successful test runs: .. code-block:: yaml language: python script: python -m unittest tests after_success: - 'curl -sS http://saltapi-url.example.com:8000/hook/travis/build/success -d branch="${TRAVIS_BRANCH}" -d commit="${TRAVIS_COMMIT}"' .. seealso:: :ref:`events`, :ref:`reactor` ''' exposed = True tag_base = ['salt', 'netapi', 'hook'] _cp_config = dict(LowDataAdapter._cp_config, **{ # Don't do any lowdata processing on the POST data 'tools.lowdata_fmt.on': True, # Auth can be overridden in __init__(). 'tools.salt_token.on': True, 'tools.salt_auth.on': True, }) def __init__(self): self.opts = cherrypy.config['saltopts'] self.event = salt.utils.event.get_event( 'master', sock_dir=self.opts['sock_dir'], transport=self.opts['transport'], opts=self.opts, listen=False) if cherrypy.config['apiopts'].get('webhook_disable_auth'): self._cp_config['tools.salt_token.on'] = False self._cp_config['tools.salt_auth.on'] = False def POST(self, *args, **kwargs): ''' Fire an event in Salt with a custom event tag and data .. http:post:: /hook :status 200: |200| :status 401: |401| :status 406: |406| :status 413: request body is too large **Example request:** .. code-block:: bash curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!' .. code-block:: http POST /hook HTTP/1.1 Host: localhost:8000 Content-Length: 16 Content-Type: application/x-www-form-urlencoded foo=Foo&bar=Bar! **Example response**: .. code-block:: http HTTP/1.1 200 OK Content-Length: 14 Content-Type: application/json {"success": true} As a practical example, an internal continuous-integration build server could send an HTTP POST request to the URL ``http://localhost:8000/hook/mycompany/build/success`` which contains the result of a build and the SHA of the version that was built as JSON. That would then produce the following event in Salt that could be used to kick off a deployment via Salt's Reactor:: Event fired at Fri Feb 14 17:40:11 2014 ************************* Tag: salt/netapi/hook/mycompany/build/success Data: {'_stamp': '2014-02-14_17:40:11.440996', 'headers': { 'X-My-Secret-Key': 'F0fAgoQjIT@W', 'Content-Length': '37', 'Content-Type': 'application/json', 'Host': 'localhost:8000', 'Remote-Addr': '127.0.0.1'}, 'post': {'revision': 'aa22a3c4b2e7', 'result': True}} Salt's Reactor could listen for the event: .. code-block:: yaml reactor: - 'salt/netapi/hook/mycompany/build/*': - /srv/reactor/react_ci_builds.sls And finally deploy the new build: .. code-block:: yaml {% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %} {% set build = data.get('post', {}) %} {% if secret_key == 'F0fAgoQjIT@W' and build.result == True %} deploy_my_app: cmd.state.sls: - tgt: 'application*' - arg: - myapp.deploy - kwarg: pillar: revision: {{ revision }} {% endif %} ''' tag = '/'.join(itertools.chain(self.tag_base, args)) data = cherrypy.serving.request.unserialized_data raw_body = cherrypy.serving.request.raw_body headers = dict(cherrypy.request.headers) ret = self.event.fire_event({ 'body': raw_body, 'post': data, 'headers': headers, }, tag) return {'success': ret} class Stats(object): ''' Expose statistics on the running CherryPy server ''' exposed = True _cp_config = dict(LowDataAdapter._cp_config, **{ 'tools.salt_token.on': True, 'tools.salt_auth.on': True, }) def GET(self): ''' Return a dump of statistics collected from the CherryPy server .. http:get:: /stats :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| ''' if hasattr(logging, 'statistics'): return cpstats.extrapolate_statistics(logging.statistics) return {} class App(object): ''' Class to serve HTML5 apps ''' exposed = True def GET(self, *args): ''' Serve a single static file ignoring the remaining path This is useful in combination with a browser-based app using the HTML5 history API. .. http::get:: /app :reqheader X-Auth-Token: |req_token| :status 200: |200| :status 401: |401| ''' apiopts = cherrypy.config['apiopts'] return cherrypy.lib.static.serve_file(apiopts['app']) class API(object): ''' Collect configuration and URL map for building the CherryPy app ''' url_map = { 'index': LowDataAdapter, 'login': Login, 'logout': Logout, 'minions': Minions, 'run': Run, 'jobs': Jobs, 'keys': Keys, 'events': Events, 'stats': Stats, } def _setattr_url_map(self): ''' Set an attribute on the local instance for each key/val in url_map CherryPy uses class attributes to resolve URLs. ''' for url, cls in self.url_map.items(): setattr(self, url, cls()) def _update_url_map(self): ''' Assemble any dynamic or configurable URLs ''' if HAS_WEBSOCKETS: self.url_map.update({ 'ws': WebsocketEndpoint, }) # Allow the Webhook URL to be overridden from the conf. self.url_map.update({ self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook, }) # Enable the single-page JS app URL. if 'app' in self.apiopts: self.url_map.update({ self.apiopts.get('app_path', 'app').lstrip('/'): App, }) def __init__(self): self.opts = cherrypy.config['saltopts'] self.apiopts = cherrypy.config['apiopts'] self._update_url_map() self._setattr_url_map() def get_conf(self): ''' Combine the CherryPy configuration with the rest_cherrypy config values pulled from the master config and return the CherryPy configuration ''' conf = { 'global': { 'server.socket_host': self.apiopts.get('host', '0.0.0.0'), 'server.socket_port': self.apiopts.get('port', 8000), 'server.thread_pool': self.apiopts.get('thread_pool', 100), 'server.socket_queue_size': self.apiopts.get('queue_size', 30), 'engine.timeout_monitor.on': self.apiopts.get( 'expire_responses', True), 'max_request_body_size': self.apiopts.get( 'max_request_body_size', 1048576), 'debug': self.apiopts.get('debug', False), }, '/': { 'request.dispatch': cherrypy.dispatch.MethodDispatcher(), 'tools.trailing_slash.on': True, 'tools.gzip.on': True, 'tools.cpstats.on': self.apiopts.get('collect_stats', False), 'tools.cors_tool.on': True, }, } if self.apiopts.get('debug', False) is False: conf['global']['environment'] = 'production' # Serve static media if the directory has been set in the configuration if 'static' in self.apiopts: conf[self.apiopts.get('static_path', '/static')] = { 'tools.staticdir.on': True, 'tools.staticdir.dir': self.apiopts['static'], } # Add to global config cherrypy.config.update(conf['global']) return conf def get_app(opts): ''' Returns a WSGI app and a configuration dictionary ''' apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts # Add Salt and salt-api config options to the main CherryPy config dict cherrypy.config['saltopts'] = opts cherrypy.config['apiopts'] = apiopts root = API() # cherrypy app cpyopts = root.get_conf() # cherrypy app opts return root, apiopts, cpyopts
test_http.py
import contextlib import asyncio import os import pytest from http.server import BaseHTTPRequestHandler, HTTPServer import sys import threading import fsspec requests = pytest.importorskip("requests") port = 9898 data = b"\n".join([b"some test data"] * 1000) realfile = "http://localhost:%i/index/realfile" % port index = b'<a href="%s">Link</a>' % realfile.encode() win = os.name == "nt" class HTTPTestHandler(BaseHTTPRequestHandler): def _respond(self, code=200, headers=None, data=b""): headers = headers or {} headers.update({"User-Agent": "test"}) self.send_response(code) for k, v in headers.items(): self.send_header(k, str(v)) self.end_headers() if data: self.wfile.write(data) def do_GET(self): if self.path.rstrip("/") not in [ "/index/realfile", "/index/otherfile", "/index", ]: self._respond(404) return d = data if self.path in ["/index/realfile", "/index/otherfile"] else index if "Range" in self.headers: ran = self.headers["Range"] b, ran = ran.split("=") start, end = ran.split("-") d = d[int(start) : int(end) + 1] if "give_length" in self.headers: response_headers = {"Content-Length": len(d)} self._respond(200, response_headers, d) elif "give_range" in self.headers: self._respond(200, {"Content-Range": "0-%i/%i" % (len(d) - 1, len(d))}, d) else: self._respond(200, data=d) def do_HEAD(self): if "head_ok" not in self.headers: self._respond(405) return d = data if self.path == "/index/realfile" else index if self.path.rstrip("/") not in ["/index/realfile", "/index"]: self._respond(404) elif "give_length" in self.headers: response_headers = {"Content-Length": len(d)} if "zero_length" in self.headers: response_headers["Content-Length"] = 0 self._respond(200, response_headers) elif "give_range" in self.headers: self._respond(200, {"Content-Range": "0-%i/%i" % (len(d) - 1, len(d))}) else: self._respond(200) # OK response, but no useful info @contextlib.contextmanager def serve(): server_address = ("", port) httpd = HTTPServer(server_address, HTTPTestHandler) th = threading.Thread(target=httpd.serve_forever) th.daemon = True th.start() try: yield "http://localhost:%i" % port finally: httpd.socket.close() httpd.shutdown() th.join() @pytest.fixture(scope="module") def server(): with serve() as s: yield s def test_list(server): h = fsspec.filesystem("http") out = h.glob(server + "/index/*") assert out == [server + "/index/realfile"] def test_isdir(server): h = fsspec.filesystem("http") assert h.isdir(server + "/index/") assert not h.isdir(server + "/index/realfile") def test_policy_arg(server): h = fsspec.filesystem("http", size_policy="get") out = h.glob(server + "/index/*") assert out == [server + "/index/realfile"] def test_exists(server): h = fsspec.filesystem("http") assert not h.exists(server + "/notafile") with pytest.raises(FileNotFoundError): h.cat(server + "/notafile") def test_read(server): h = fsspec.filesystem("http") out = server + "/index/realfile" with h.open(out, "rb") as f: assert f.read() == data with h.open(out, "rb", block_size=0) as f: assert f.read() == data with h.open(out, "rb") as f: assert f.read(100) + f.read() == data def test_file_pickle(server): import pickle # via HTTPFile h = fsspec.filesystem("http", headers={"give_length": "true", "head_ok": "true"}) out = server + "/index/realfile" with h.open(out, "rb") as f: pic = pickle.dumps(f) assert f.read() == data with pickle.loads(pic) as f: assert f.read() == data # via HTTPStreamFile h = fsspec.filesystem("http") out = server + "/index/realfile" with h.open(out, "rb") as f: out = pickle.dumps(f) assert f.read() == data with pickle.loads(out) as f: assert f.read() == data def test_methods(server): h = fsspec.filesystem("http") url = server + "/index/realfile" assert h.exists(url) assert h.cat(url) == data @pytest.mark.parametrize( "headers", [ {}, {"give_length": "true"}, {"give_length": "true", "head_ok": "true"}, {"give_range": "true"}, ], ) def test_random_access(server, headers): h = fsspec.filesystem("http", headers=headers) url = server + "/index/realfile" with h.open(url, "rb") as f: if headers: assert f.size == len(data) assert f.read(5) == data[:5] if headers: f.seek(5, 1) assert f.read(5) == data[10:15] else: with pytest.raises(ValueError): f.seek(5, 1) def test_mapper_url(server): h = fsspec.filesystem("http") mapper = h.get_mapper(server + "/index/") assert mapper.root.startswith("http:") assert list(mapper) mapper2 = fsspec.get_mapper(server + "/index/") assert mapper2.root.startswith("http:") assert list(mapper) == list(mapper2) def test_content_length_zero(server): h = fsspec.filesystem( "http", headers={"give_length": "true", "zero_length": "true"} ) url = server + "/index/realfile" with h.open(url, "rb") as f: assert f.read() == data def test_download(server, tmpdir): h = fsspec.filesystem("http", headers={"give_length": "true", "head_ok": "true "}) url = server + "/index/realfile" fn = os.path.join(tmpdir, "afile") h.get(url, fn) assert open(fn, "rb").read() == data def test_multi_download(server, tmpdir): h = fsspec.filesystem("http", headers={"give_length": "true", "head_ok": "true "}) urla = server + "/index/realfile" urlb = server + "/index/otherfile" fna = os.path.join(tmpdir, "afile") fnb = os.path.join(tmpdir, "bfile") h.get([urla, urlb], [fna, fnb]) assert open(fna, "rb").read() == data assert open(fnb, "rb").read() == data def test_mcat(server): h = fsspec.filesystem("http", headers={"give_length": "true", "head_ok": "true "}) urla = server + "/index/realfile" urlb = server + "/index/otherfile" out = h.cat([urla, urlb]) assert out == {urla: data, urlb: data} def test_mcat_cache(server): urla = server + "/index/realfile" urlb = server + "/index/otherfile" fs = fsspec.filesystem("simplecache", target_protocol="http") assert fs.cat([urla, urlb]) == {urla: data, urlb: data} def test_mcat_expand(server): h = fsspec.filesystem("http", headers={"give_length": "true", "head_ok": "true "}) out = h.cat(server + "/index/*") assert out == {server + "/index/realfile": data} @pytest.mark.xfail( condition=sys.flags.optimize > 1, reason="no docstrings when optimised" ) def test_docstring(): h = fsspec.filesystem("http") # most methods have empty docstrings and draw from base class, but this one # is generated assert h.pipe.__doc__ def test_async_other_thread(server): import threading loop = asyncio.get_event_loop() th = threading.Thread(target=loop.run_forever) th.daemon = True th.start() fs = fsspec.filesystem("http", asynchronous=False, loop=loop) cor = fs._cat([server + "/index/realfile"]) fut = asyncio.run_coroutine_threadsafe(cor, loop=loop) assert fut.result() == [data] @pytest.mark.skipif(sys.version_info < (3, 7), reason="no asyncio.run in py36") def test_async_this_thread(server): async def _(): loop = asyncio.get_event_loop() fs = fsspec.filesystem("http", asynchronous=True, loop=loop) # fails because client creation has not yet been awaited assert isinstance( (await fs._cat([server + "/index/realfile"]))[0], RuntimeError ) with pytest.raises(RuntimeError): fs.cat([server + "/index/realfile"]) await fs.set_session() # creates client out = await fs._cat([server + "/index/realfile"]) del fs assert out == [data] asyncio.run(_()) def _inner_pass(fs, q, fn): # pass the s3 instance, but don't use it; in new process, the instance # cache should be skipped to make a new instance fs = fsspec.filesystem("http") q.put(fs.cat(fn)) @pytest.mark.skipif( bool(os.environ.get("CIRUN", "")), reason="CI runs are weird in many ways" ) @pytest.mark.parametrize("method", ["spawn", "forkserver", "fork"]) def test_processes(server, method): import multiprocessing as mp if win and method != "spawn": pytest.skip("Windows can only spawn") ctx = mp.get_context(method) fn = server + "/index/realfile" fs = fsspec.filesystem("http") q = ctx.Queue() p = ctx.Process(target=_inner_pass, args=(fs, q, fn)) p.start() assert q.get() == fs.cat(fn) p.join()
online.py
from ...objects import dp, MySignalEvent, DB from ... import utils from threading import Thread, Timer import time from vkapi import VkApi import typing import logging logger = logging.getLogger(__name__) online_thread: Thread = None stop_thread = False def set_online(v): global stop_thread db = DB() stop_thread = v if v == False: api = VkApi(db.online_token) afa_thread = Thread(target=online_th, args=(api, lambda:stop_thread)) afa_thread.setDaemon(True) afa_thread.setName('Online Thread') afa_thread.start() def online_th(api: VkApi, stop: typing.Callable): is_stop = False while True: try: if is_stop:break logger.info("Установлен онлайн") api('account.setOnline', voip=0) for _ in range(60): is_stop = stop() if is_stop:break time.sleep(5) except Exception as e: logger.info(f"Ошибка в online: {e}") @dp.my_signal_event_handle('-онлайн') def off_online(event: MySignalEvent): global online_thread global stop_thread logger.info("Выключен онлайн") if online_thread == None or not online_thread.is_alive(): utils.new_message(event.api, event.chat.peer_id, message="❗ Вечный онлайн не запущен") return "ok" stop_thread = True # online_thread.join() utils.new_message(event.api, event.chat.peer_id, message="✅ Вечный онлайн остановлен") return "ok" @dp.my_signal_event_handle('+онлайн') def on_online(event: MySignalEvent): global online_thread global stop_thread logger.info("Установлен онлайн") stop_thread = False token = event.db.online_token if token == None: utils.new_message(event.api, event.chat.peer_id, message=f"❗ Токен не установлен.\n Устанувить можно в админ-панеле https://{event.db.host}") return "ok" if online_thread != None and online_thread.is_alive(): utils.new_message(event.api, event.chat.peer_id, message="✅ Вечный онлайн и так запущен") return "ok" api_ = VkApi(token) online_thread = Thread(target=online_th, args=(api_, lambda:stop_thread)) online_thread.setDaemon(True) online_thread.setName('Online Thread') online_thread.start() utils.new_message(event.api, event.chat.peer_id, message="✅ Вечный онлайн запущен") return "ok" @dp.my_signal_event_handle('онлайн') def check_online(event: MySignalEvent): global online_thread if online_thread != None and online_thread.is_alive(): utils.new_message(event.api, event.chat.peer_id, message="✅ Вечный онлайн работает") return "ok" else: utils.new_message(event.api, event.chat.peer_id, message="✅ Вечный онлайн не работает") return "ok"
mplog.py
# Adapted from https://gist.github.com/schlamar/7003737 from __future__ import unicode_literals import contextlib import multiprocessing import logging import threading def daemon(log_queue): while True: try: record_data = log_queue.get() if record_data is None: break record = logging.makeLogRecord(record_data) logger = logging.getLogger(record.name) if logger.isEnabledFor(record.levelno): logger.handle(record) except (KeyboardInterrupt, SystemExit): raise except EOFError: break except: logging.exception('Error in log handler.') class MPLogger(logging.Logger): log_queue = None def isEnabledFor(self, level): return True def handle(self, record): ei = record.exc_info if ei: # to get traceback text into record.exc_text logging._defaultFormatter.format(record) record.exc_info = None # not needed any more d = dict(record.__dict__) d['msg'] = record.getMessage() d['args'] = None self.log_queue.put(d) def logged_call(log_queue, func, *args, **kwargs): MPLogger.log_queue = log_queue logging.setLoggerClass(MPLogger) # monkey patch root logger and already defined loggers logging.root.__class__ = MPLogger for logger in logging.Logger.manager.loggerDict.values(): if not isinstance(logger, logging.PlaceHolder): logger.__class__ = MPLogger return func(*args, **kwargs) @contextlib.contextmanager def open_queue(): log_queue = multiprocessing.Queue() daemon_thread = threading.Thread(target=daemon, args=(log_queue,)) daemon_thread.daemon = True daemon_thread.start() yield log_queue log_queue.put(None)
test_exchange.py
import helper import logging import pika import pytest import threading import time LOG = logging.getLogger() message_received = threading.Condition() def declare_exchange_and_queue(conn, exchange, exchange_type, queue): channel = conn.channel(channel_number=1) channel.exchange_declare(exchange=exchange, exchange_type=exchange_type, passive=False, durable=False, auto_delete=False, internal=False) channel.queue_declare(queue) channel.queue_bind(queue, exchange) return channel def publish_message(channel, exchange, routing_key): LOG.info('Publishing message') channel.basic_publish(exchange, routing_key, 'Hey, receiver! How are you?', pika.BasicProperties(content_type='text/plain', delivery_mode=1)) def cleanup(channel, exchange, queue): channel.queue_unbind(queue, exchange) channel.queue_delete(queue) channel.exchange_delete(exchange) def consume_message(conn, queue): channel = conn.channel(channel_number=1) channel.basic_consume(queue, on_message_callback=on_consumer_receive) channel.start_consuming() def on_consumer_receive(channel, method, properties, body): LOG.info('Got message %s', body) channel.basic_ack(delivery_tag=method.delivery_tag, multiple=False) channel.stop_consuming() with message_received: message_received.notify() def test_basic_publish(caplog): """ Send a message to the default exchange and the other user will get it. """ sender = helper.connect() channel = declare_exchange_and_queue(sender, 'my-exchange', 'topic', 'my-queue') receiver = helper.connect() threading.Thread(target=consume_message, args=(receiver, 'my-queue')).start() # Give chance to the consumer for starting and going into waiting state time.sleep(0.5) publish_message(channel, 'my-exchange', 'my-queue') with message_received: message_received.wait() cleanup(channel, 'my-exchange', 'my-queue') receiver.close() sender.close() def test_exchange_mandatory_error(caplog): """ Basic return should send back if messages is non-routable and mandatory is true """ client = helper.connect() channel = client.channel(channel_number=4) channel.confirm_delivery() channel.exchange_declare(exchange='not-routed', exchange_type='topic') with pytest.raises(pika.exceptions.UnroutableError) as exp: channel.basic_publish('not-routed', 'any', 'body', mandatory=True) channel.exchange_delete('not-routed') channel.close() client.close()
multiprocessing_daemon_join.py
#!/usr/bin/env python # encoding: utf-8 # # Copyright (c) 2008 Doug Hellmann All rights reserved. # """Daemon vs. non-daemon processes. """ #end_pymotw_header import multiprocessing import time import sys def daemon(): name = multiprocessing.current_process().name print 'Starting:', name time.sleep(2) print 'Exiting :', name def non_daemon(): name = multiprocessing.current_process().name print 'Starting:', name print 'Exiting :', name if __name__ == '__main__': d = multiprocessing.Process(name='daemon', target=daemon) d.daemon = True n = multiprocessing.Process(name='non-daemon', target=non_daemon) n.daemon = False d.start() time.sleep(1) n.start() d.join() n.join()
communication.py
# BSD-3-Clause License # # Copyright 2017 Orange # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import json import logging import socket from collections import namedtuple, defaultdict from http.server import HTTPServer, BaseHTTPRequestHandler from json import JSONDecodeError from queue import Empty, PriorityQueue from threading import Thread from time import perf_counter, sleep from typing import Tuple, Dict, Optional import requests from requests.exceptions import ConnectionError from pydcop.infrastructure.discovery import UnknownComputation, UnknownAgent from pydcop.utils.simple_repr import simple_repr, from_repr logger = logging.getLogger("infrastructure.communication") ComputationMessage = namedtuple( "ComputationMessage", ["src_comp", "dest_comp", "msg", "msg_type"] ) # FIXME: this should be an ABC, not an object class. class CommunicationLayer(object): """ Base class for CommunicationLayer objects. CommunicationLayer objects are used to sent messages from one agent to another. Each agent should have it's own CommunicationLayer instance. The behavior on message sending failure can be specified when building the instance and overridden for a specific message when calling `send_msg`, with the `òn_error` parameter, which accept the following values: * 'ignore': `send_msg` always return True even if the message could not be delivered * 'fail': if the target agent could not be found, raise a `UnknownAgent` exception. If the target agent does not host the computation for the message (and he answered the request with the corresponding error code), raise an `UnknownComputation` exception. * 'retry': `send_msg` returns `True` only if the message was delivered successfully and the target agent host the computation. Otherwise the message is kept and the `CommunicationLayer` will try to send it latter. This new attempt will be done when calling `register` or `retry` for the target agent. Notes ----- To be usable a Communication Layer needs a Discovery instance in order to have access to other agents address. This instance is not given directly in the constructor as it depend on the agent that will be using the CommunicationLayer instance, even though the CommunicationLayer is usually built before the Agent instance. So you always need something like this, which is automatically done by the agent when passing it a communication layer: self.discovery = aDiscovery Additionally a communication layer also requires a Messaging instance in order to be able to post messages in the local message queue. This association is automatically done when creating a Messaging instance. comm1 = InProcessCommunicationLayer() messaging = Messaging(name, comm) Parameters ---------- on_error: str on_error behavior, must be refactored, not really used ATM. """ def __init__(self, on_error=None) -> None: self._on_error = on_error self.discovery = None self.messaging = None self._failed_msg = defaultdict(lambda: []) @property def address(self): """ An address that can be used to sent messages to this communication layer. The concrete type of object returned depends on the class implementing the CommunicationLayer protocol. """ raise NotImplementedError("Protocol class") def send_msg( self, src_agent: str, dest_agent: str, msg: ComputationMessage, on_error=None, from_retry=False, ): """ Parameters ---------- src_agent: str name of the sender agent dest_agent: str name of the target agent msg: ComputationMessage the message on_error: error handling mode, overrides the default mode set when creating the CommunicationLayer instance from_retry: internal arg, do NOT use. """ raise NotImplementedError("Protocol class") def shutdown(self): raise NotImplementedError("Protocol class") def _on_send_error(self, src_agent, dest_agent, msg, on_error, exception): if on_error == "fail": raise exception( "Error when sending message {} -> {} : {}".format( src_agent, dest_agent, msg ) ) elif on_error == "ignore": logger.warning( "could not send message from %s to %s, ignoring : " "%s", src_agent, dest_agent, msg, ) return True elif on_error == "retry": logger.warning( "could not send message from %s to %s, will retry " "later : %s", src_agent, dest_agent, msg, ) self._failed_msg[dest_agent].append((src_agent, dest_agent, msg, on_error)) return False else: logger.warning( "could not send message from %s to %s, " "and no on_erro policy : ignoring : " "%s", src_agent, dest_agent, msg, ) return False def retry(self, dest_agent: str): """ Attempt to send all failed messages for this agent. :param dest_agent: :return: """ for src, dest, msg, on_error in self._failed_msg[dest_agent]: logger.warning( "retrying delivery of message from %s to %s : " "%s", src, dest, msg ) self.send_msg(src, dest, msg, on_error, from_retry=True) class UnreachableAgent(Exception): pass class InProcessCommunicationLayer(CommunicationLayer): """ Implements communication for several thread-based agents in the same process. For in process communication, we don't really have an address, instead we directly use InProcessCommunicationLayer instances as addresses. """ def __init__(self, on_error=None): super().__init__(on_error) @property def address(self): """ For in-process communication, we use the object itself as the address. :return: """ return self def send_msg( self, src_agent: str, dest_agent: str, msg: ComputationMessage, on_error=None, from_retry=False, ): """ Send a message to an agent. :param src_agent: name of the source agent :param dest_agent: name of the agent :param msg: the message, can be any python object (only with InProcess communication) :param on_error: how to handle failure when sending the message. When used, this parameter overrides the behavior set when building the CommunicationLayer. """ on_error = on_error if on_error is not None else self._on_error try: address = self.discovery.agent_address(dest_agent) address.receive_msg(src_agent, dest_agent, msg) except UnknownAgent: logger.warning( f"Sending message from {src_agent} to unknown agent {dest_agent} :" f" {msg} " ) return self._on_send_error( src_agent, dest_agent, msg, on_error, UnknownAgent ) return True def receive_msg(self, src_agent: str, dest_agent: str, msg: ComputationMessage): """ Called when receiving a message. :param src_agent: name of the source agent :param dest_agent: name of the agent :param msg: the message, must be an iterable containing src_computation, dest_computation, message obejct (which can be any python object with InProcess communication) """ src_computation, dest_computation, msg_obj, msg_type = msg self.messaging.post_msg(src_computation, dest_computation, msg_obj, msg_type) def shutdown(self): # There's no resources to release for InProcessCommunicationLayer as # message passing is implemented as simple function calls. pass # def force_get_address(self, agt_name: str): # # FIXME : horrible hack until We implment a proper discovery method # # This only works for in-process communication and it only works # # because the InProcessCommLayer is used as the address. # return self.discovery.agent_address('orchestrator')\ # .discovery.agent_address(agt_name) def __str__(self): return "InProcessCommunicationLayer({})".format(self.messaging) def __repr__(self): return "Comm({})".format(self.messaging) def find_local_ip(): # from https://stackoverflow.com/a/28950776/261821 # public domain/free for any use as stated in comments s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # doesn't even have to be reachable s.connect(("10.255.255.255", 1)) IP = s.getsockname()[0] except: IP = "127.0.0.1" finally: s.close() return IP class HttpCommunicationLayer(CommunicationLayer): """ This class implements the CommunicationLayer protocol. It uses an http server and client to send and receive messages. Parameters ---------- address_port: optional tuple (str, int) The IP address and port this HttpCommunicationLayer will be listening on. If the ip address or the port are not given ,we try to use the primary IP address (i.e. the one with a default route) and listen on port 9000. on_error: str Indicates how error when sending a message will be handled, possible value are 'ignore', 'retry', 'fail' """ def __init__( self, address_port: Optional[Tuple[str, int]] = None, on_error: Optional[str] = "ignore", ): super().__init__(on_error) if not address_port: self._address = find_local_ip(), 9000 else: ip_addr, port = address_port ip_addr = ip_addr if ip_addr else find_local_ip() ip_addr = ip_addr if ip_addr else "0.0.0.0" port = port if port else 9000 self._address = ip_addr, port self.logger = logging.getLogger( "infrastructure.communication.HttpCommunicationLayer" ) self._start_server() def shutdown(self): self.logger.info("Shutting down HttpCommunicationLayer " "on %s", self.address) self.httpd.shutdown() self.httpd.server_close() def _start_server(self): # start a server listening for messages self.logger.info( "Starting http server for HttpCommunicationLayer " "on %s", self.address ) try: _, port = self._address self.httpd = HTTPServer(("0.0.0.0", port), MPCHttpHandler) except OSError: self.logger.error( "Cannot bind http server on adress {}".format(self.address) ) raise self.httpd.comm = self t = Thread(name="http_thread", target=self.httpd.serve_forever, daemon=True) t.start() def on_post_message(self, path, sender, dest, msg: ComputationMessage): self.logger.debug("Http message received %s - %s %s", path, sender, dest) self.messaging.post_msg(msg.src_comp, msg.dest_comp, msg.msg, msg.msg_type) @property def address(self) -> Tuple[str, int]: """ An address that can be used to sent messages to this communication layer. :return the address as a (ip, port) tuple """ return self._address def send_msg( self, src_agent: str, dest_agent: str, msg: ComputationMessage, on_error=None ): """ Send msg from src_agent to dest_agent. :param src_agent: :param dest_agent: :param msg: the message to send :param on_error: how to handle failure when sending the message. When used, this parameter overrides the behavior set when building the HttpCommunicationLayer. :return: """ on_error = on_error if on_error is not None else self._on_error try: server, port = self.discovery.agent_address(dest_agent) except UnknownAgent: return self._on_send_error( src_agent, dest_agent, msg, on_error, UnknownAgent ) dest_address = "http://{}:{}/pydcop".format(server, port) msg_repr = simple_repr(msg.msg) try: r = requests.post( dest_address, headers={ "sender-agent": src_agent, "dest-agent": dest_agent, "sender-comp": msg.src_comp, "dest-comp": msg.dest_comp, "type": str(msg.msg_type), }, json=msg_repr, timeout=0.5, ) except ConnectionError: # Could not reach the target agent: connection refused or name # or service not known return self._on_send_error( src_agent, dest_agent, msg, on_error, UnreachableAgent ) if r is not None and r.status_code == 404: # It seems that the target computation of this message is not # hosted on the agent return self._on_send_error( src_agent, dest_agent, msg, on_error, UnknownComputation ) return True def __str__(self): return "HttpCommunicationLayer({}:{})".format(*self._address) class MPCHttpHandler(BaseHTTPRequestHandler): def do_POST(self): sender, dest = None, None type = MSG_ALGO if "sender-agent" in self.headers: sender = self.headers["sender-agent"] if "dest-agent" in self.headers: dest = self.headers["dest-agent"] src_comp, dest_comp = None, None if "sender-comp" in self.headers: src_comp = self.headers["sender-comp"] if "dest-comp" in self.headers: dest_comp = self.headers["dest-comp"] if "type" in self.headers: type = self.headers["type"] content_length = int(self.headers["Content-Length"]) post_data = self.rfile.read(content_length) try: content = json.loads(str(post_data, "utf-8")) except JSONDecodeError as jde: print(jde) print(post_data) raise jde comp_msg = ComputationMessage( src_comp, dest_comp, from_repr(content), int(type) ) try: self.server.comm.on_post_message(self.path, sender, dest, comp_msg) # Always answer 200, as the actual message is not processed yet by # the target computation. self.send_response(200) self.send_header("Content-type", "text/plain") self.end_headers() except UnknownComputation as e: # if the requested computation is not hosted here self.send_response(404, str(e)) self.send_header("Content-type", "text/plain") self.end_headers() def log_request(self, code="-", size="-"): # Avoid logging all requests to stdout pass MSG_MGT = 10 MSG_VALUE = 15 MSG_ALGO = 20 class Messaging(object): """ A `Messaging` instance is responsible for all messaged-based communication (sending and receiving messages) for an agent. Received messages a stored in a queue and can be fetched using `next_msg`. When sending messages, using `post_msg`, messages are dispatched either internally (directly to the queue) when the target is registered on this Messaging instance, or the actual sending is delegated to a CommunicationLayer instance (which implement a network communication protocol). Also accumulates metrics on messages sending. Parameters ---------- agent_name: str name of the agent this Messaging instance will send message for. comm: CommunicationLayer a concrete implementation of the CommunicationLayer protocol, it will be used to send messages to other agents. delay: int an optional delay between message delivery, in second. This delay only applies to algorithm's messages and is useful when you want to observe (for example with the GUI) the behavior of the algorithm at runtime. """ def __init__(self, agent_name: str, comm: CommunicationLayer, delay: float = None): self._queue = PriorityQueue() self._local_agent = agent_name self.discovery = comm.discovery self._comm = comm self._comm.messaging = self self._delay = delay self.logger = logging.getLogger(f"infrastructure.communication.{agent_name}") # Keep track of failer messages to retry later self._failed = [] # Containers for metrics on sent messages: self.count_ext_msg = defaultdict(lambda: 0) # type: Dict[str, int] self.size_ext_msg = defaultdict(lambda: 0) # type: Dict[str, int] self.last_msg_time = 0 self.msg_queue_count = 0 self._shutdown = False @property def communication(self) -> CommunicationLayer: return self._comm @property def local_agent(self) -> str: """ The name of the local agent. Returns ------- The name of the agent this Messaging instance is sending messages for. """ return self._local_agent @property def count_all_ext_msg(self) -> int: """ Count of all non-management external messages sent. :return: """ return sum(v for v in self.count_ext_msg.values()) @property def size_all_ext_msg(self) -> int: """ Size of all non-management external messages sent. :return: """ return sum(v for v in self.size_ext_msg.values()) def next_msg(self, timeout: float = 0): try: msg_type, _, t, full_msg = self._queue.get(block=True, timeout=timeout) if self._delay and msg_type == MSG_ALGO: sleep(self._delay) return full_msg, t except Empty: return None, None def post_msg( self, src_computation: str, dest_computation: str, msg, msg_type: int = MSG_ALGO, on_error=None, ): """ Send a message `msg` from computation `src_computation` to computation `dest_computation`. Messages can be sent * either to one of our local computations * or to a computation hosted on another agent If the message is for a local computation, deliver it directly, otherwise, we delegate to he communication layer. Notes ----- priority level : messages are sent with a priority level. If the agent hosting the target computation is not known, we will retry sending as soon as the hosting is registered. There is currently no time limit for this, meaning that a message can stay in fail state forever and never been delivered if the corresponding computation is never registered. TODO: implement some kind of timeout mechanism to report an error if message stay in the failed stay for too long. Parameters ---------- src_computation: str name of the computation sending the messages dest_computation: str name of the computation the message is sent to. msg: the message msg_type: int the type of the message, like MSG_ADM or MSG_ALGO. Defaults to MSG_ALGO. Used to send messages with an higher priority first. on_error: ?? """ if self._shutdown: return msg_type = MSG_ALGO if msg_type is None else msg_type try: dest_agent = self.discovery.computation_agent(dest_computation) except UnknownComputation: if self.logger.isEnabledFor(logging.WARNING): self.logger.warning( f"Cannot send msg from {src_computation} to unknown " f"comp {dest_computation}, will retry later : {msg}" ) self.discovery.subscribe_computation( dest_computation, self._on_computation_registration, one_shot=True ) self._failed.append( (src_computation, dest_computation, msg, msg_type, on_error) ) return full_msg = ComputationMessage(src_computation, dest_computation, msg, msg_type) if dest_agent == self._local_agent: if self.logger.isEnabledFor(logging.DEBUG): self.logger.debug( f"Posting local message {src_computation} -> " f"{dest_computation} : {msg}" ) now = perf_counter() if msg_type != MSG_MGT: self.last_msg_time = now # When putting the message in the queue we add the type, # a monotonic msg counter and the time of reception. As the queue # is a priority queue, putting type and counter first ensure # that the # tuple will always be orderable. The time is # useful to measure the delay between reception and handling # of a message. self.msg_queue_count += 1 self._queue.put((msg_type, self.msg_queue_count, now, full_msg)) else: if self.logger.isEnabledFor(logging.DEBUG): self.logger.debug( f"Posting remote message {src_computation} -> " f"{dest_computation} : {msg}" ) # If the destination is on another agent, it means that the # message source must be one of our local computation and we # should know about it. # NOTE: the computation might have been removed, but that's considered as a # bug, a computation should not send message once removed try: self.discovery.computation_agent(src_computation) except: self.logger.error(f"Could not find src computation {src_computation} " f" when posting msg {msg} to {dest_computation} " f"{dest_agent}, {self._local_agent}) ") raise # send using Communication Layer if msg_type != MSG_MGT: self.count_ext_msg[src_computation] += 1 self.size_ext_msg[src_computation] += msg.size self._comm.send_msg( self._local_agent, dest_agent, full_msg, on_error=on_error ) def shutdown(self): """Shutdown messaging No new message will be sent and any new message posted will be silently dropped. However it is still possible to call ``next_msg`` to empty the queue and handle all message received before ``shutdown` was called. """ self._shutdown = True def _on_computation_registration(self, evt: str, computation: str, agent: str): """ Callback for DeploymentInfo on computatino registration. Called when a new computation-agent is registered. """ if evt == "computation_added": for failed in self._failed[:]: src, dest, msg, msg_type, on_error = failed if dest != computation: continue self.logger.info( "Retrying failed message to %s on %s : %s", dest, agent, msg ) self.post_msg(src, dest, msg, msg_type, on_error) self._failed.remove(failed) def __str__(self): return "Messaging({})".format(self._local_agent)
main.py
import websockets.exceptions from fastapi import FastAPI, WebSocket, WebSocketDisconnect from .redis_manager import RedisManager from .user_instance import UserInstance from .message_executors import UserMsgExecutor, ServerMsgExecutor from .consts import WAITING_CHANNEL from queue import Queue from . import auth import threading import asyncio import json app = FastAPI() rd_manager = RedisManager() # redis 관련 메소드 ume = UserMsgExecutor(rd_manager) # 유저 메시지 파싱, 실행 sme = ServerMsgExecutor(rd_manager) # 서버 메시지 파싱, 실행 message_queue = Queue() players_dict = {} # players connected to this worker process # 시작시 메시지 리슨, 메시지 프로세스 스레드 시작 @app.on_event('startup') async def on_startup(): ml = threading.Thread(target=server_message_listen, daemon=True) ml.start() # 메시지 리스너 스레드 시작 loop = asyncio.get_running_loop() # server_message_process 에 넘겨줄 async event loop mp = threading.Thread(target=server_message_process, args=(loop,), daemon=True) # 메시지 프로세스 스레드 mp.start() # 메시지 프로세스 스레드 시작 # redis 에서 받은 메시지를 큐에 넣음. 스레드로 사용할것 def server_message_listen(): for msg in rd_manager.msg_pubsub.listen(): if msg.get('type') == 'message': message_queue.put(msg) # 큐에서 메시지를 꺼내서 작업 진행. 스레드로 사용할것 def server_message_process(loop): asyncio.set_event_loop(loop) # 인자로 넘겨받은 이벤트 루프를 set while True: msg = message_queue.get() # 블로킹 큐 loop.create_task(server_message_exec(msg)) # 비동기 이벤트 루프에 task 추가 # 메시지 브로커에게 받은 메시지 명령 실행, UserInstance 를 특정해서 넘겨줌. async def server_message_exec(msg): msg_type = msg.get('type') channel: str = msg.get('channel') # user_id 가 채널 if channel != WAITING_CHANNEL: try: user: UserInstance = players_dict[channel] # user_id 에 매핑된 플레이어 커넥션 객체 if msg_type == 'message': await sme.server_msg_exec(user, msg) except KeyError: print('player connection object does not exist') else: for user in players_dict.values(): await sme.send_waiters(user) @app.websocket("/ws") async def websocket_connection(websocket: WebSocket): await websocket.accept() # 연결 수락 player_auth_req: dict = await websocket.receive_json() is_valid_player: bool = await init_auth(player_auth_req) if is_valid_player: user: UserInstance = await user_instance_create(websocket, player_auth_req['id']) # 연결시 플레이어 커넥션 객체를 생성하고 반환함. await user_message_receive(websocket, user) # 클라이언트에서 보내는 데이터를 연결이 끝나기 전까지 받아옴. else: await websocket.close() async def init_auth(player_auth_json: dict) -> bool: # try: is_valid_player: bool = await auth.is_jwt_valid(player_auth_json.get('id'), player_auth_json.get('jwt')) # except auth.ValidateError: # is_valid_player: bool = False return is_valid_player async def user_instance_create(websocket: WebSocket, player_id) -> UserInstance: rd_manager.msg_pubsub.subscribe(player_id) # redis player_id 채널 구독 user_instance = UserInstance(player_id=player_id, websocket=websocket) players_dict[player_id] = user_instance rd_manager.msg_broker.publish(WAITING_CHANNEL, '') # 대기자 목록 업데이트 return user_instance async def user_message_receive(websocket, user: UserInstance): try: while True: try: msg: dict = await websocket.receive_json() # 받은 json 형식 데이터, 딕셔너리로 자동 변환됨. await ume.user_msg_exec(user, msg) # 메시지 처리 except json.decoder.JSONDecodeError: print('not json type data') except WebSocketDisconnect or websockets.exceptions.ConnectionClosedError: # 연결 종료시 await on_connection_lost(user) async def on_connection_lost(user: UserInstance): print(f'player {user.player_id} disconnected') rd_manager.msg_pubsub.unsubscribe(user.player_id) # 플레이어 채널 구독 해제 await rd_manager.user_connection_closed(user.player_id) if user.approached_to is not None: await rd_manager.approacher_del(user.player_id, user.approached_to) players_dict.pop(user.player_id) # 딕셔너리에서 유저 객체 제거. 유저 객체는 이 메소드 종료시 GC가 알아서 제거할것이라 생각됨. rd_manager.msg_broker.publish(channel=WAITING_CHANNEL, message='') # 클라이언트 대기열 갱신
gdbclientutils.py
import os import os.path import subprocess import threading import socket import lldb from lldbsuite.support import seven from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbtest_config def checksum(message): """ Calculate the GDB server protocol checksum of the message. The GDB server protocol uses a simple modulo 256 sum. """ check = 0 for c in message: check += ord(c) return check % 256 def frame_packet(message): """ Create a framed packet that's ready to send over the GDB connection channel. Framing includes surrounding the message between $ and #, and appending a two character hex checksum. """ return "$%s#%02x" % (message, checksum(message)) def escape_binary(message): """ Escape the binary message using the process described in the GDB server protocol documentation. Most bytes are sent through as-is, but $, #, and { are escaped by writing a { followed by the original byte mod 0x20. """ out = "" for c in message: d = ord(c) if d in (0x23, 0x24, 0x7d): out += chr(0x7d) out += chr(d ^ 0x20) else: out += c return out def hex_encode_bytes(message): """ Encode the binary message by converting each byte into a two-character hex string. """ out = "" for c in message: out += "%02x" % ord(c) return out def hex_decode_bytes(hex_bytes): """ Decode the hex string into a binary message by converting each two-character hex string into a single output byte. """ out = "" hex_len = len(hex_bytes) while i < hex_len - 1: out += chr(int(hex_bytes[i:i + 2]), 16) i += 2 return out class MockGDBServerResponder: """ A base class for handling client packets and issuing server responses for GDB tests. This handles many typical situations, while still allowing subclasses to completely customize their responses. Most subclasses will be interested in overriding the other() method, which handles any packet not recognized in the common packet handling code. """ registerCount = 40 packetLog = None def __init__(self): self.packetLog = [] def respond(self, packet): """ Return the unframed packet data that the server should issue in response to the given packet received from the client. """ self.packetLog.append(packet) if packet is MockGDBServer.PACKET_INTERRUPT: return self.interrupt() if packet == "c": return self.cont() if packet.startswith("vCont;c"): return self.vCont(packet) if packet[0] == "g": return self.readRegisters() if packet[0] == "G": return self.writeRegisters(packet[1:]) if packet[0] == "p": regnum = packet[1:].split(';')[0] return self.readRegister(int(regnum, 16)) if packet[0] == "P": register, value = packet[1:].split("=") return self.readRegister(int(register, 16), value) if packet[0] == "m": addr, length = [int(x, 16) for x in packet[1:].split(',')] return self.readMemory(addr, length) if packet[0] == "M": location, encoded_data = packet[1:].split(":") addr, length = [int(x, 16) for x in location.split(',')] return self.writeMemory(addr, encoded_data) if packet[0:7] == "qSymbol": return self.qSymbol(packet[8:]) if packet[0:10] == "qSupported": return self.qSupported(packet[11:].split(";")) if packet == "qfThreadInfo": return self.qfThreadInfo() if packet == "qsThreadInfo": return self.qsThreadInfo() if packet == "qC": return self.qC() if packet == "QEnableErrorStrings": return self.QEnableErrorStrings() if packet == "?": return self.haltReason() if packet == "s": return self.haltReason() if packet[0] == "H": return self.selectThread(packet[1], int(packet[2:], 16)) if packet[0:6] == "qXfer:": obj, read, annex, location = packet[6:].split(":") offset, length = [int(x, 16) for x in location.split(',')] data, has_more = self.qXferRead(obj, annex, offset, length) if data is not None: return self._qXferResponse(data, has_more) return "" if packet.startswith("vAttach;"): pid = packet.partition(';')[2] return self.vAttach(int(pid, 16)) if packet[0] == "Z": return self.setBreakpoint(packet) if packet.startswith("qThreadStopInfo"): threadnum = int (packet[15:], 16) return self.threadStopInfo(threadnum) if packet == "QThreadSuffixSupported": return self.QThreadSuffixSupported() if packet == "QListThreadsInStopReply": return self.QListThreadsInStopReply() if packet.startswith("qMemoryRegionInfo:"): return self.qMemoryRegionInfo() return self.other(packet) def interrupt(self): raise self.UnexpectedPacketException() def cont(self): raise self.UnexpectedPacketException() def vCont(self, packet): raise self.UnexpectedPacketException() def readRegisters(self): return "00000000" * self.registerCount def readRegister(self, register): return "00000000" def writeRegisters(self, registers_hex): return "OK" def writeRegister(self, register, value_hex): return "OK" def readMemory(self, addr, length): return "00" * length def writeMemory(self, addr, data_hex): return "OK" def qSymbol(self, symbol_args): return "OK" def qSupported(self, client_supported): return "qXfer:features:read+;PacketSize=3fff;QStartNoAckMode+" def qfThreadInfo(self): return "l" def qsThreadInfo(self): return "l" def qC(self): return "QC0" def QEnableErrorStrings(self): return "OK" def haltReason(self): # SIGINT is 2, return type is 2 digit hex string return "S02" def qXferRead(self, obj, annex, offset, length): return None, False def _qXferResponse(self, data, has_more): return "%s%s" % ("m" if has_more else "l", escape_binary(data)) def vAttach(self, pid): raise self.UnexpectedPacketException() def selectThread(self, op, thread_id): return "OK" def setBreakpoint(self, packet): raise self.UnexpectedPacketException() def threadStopInfo(self, threadnum): return "" def other(self, packet): # empty string means unsupported return "" def QThreadSuffixSupported(self): return "" def QListThreadsInStopReply(self): return "" def qMemoryRegionInfo(self): return "" """ Raised when we receive a packet for which there is no default action. Override the responder class to implement behavior suitable for the test at hand. """ class UnexpectedPacketException(Exception): pass class MockGDBServer: """ A simple TCP-based GDB server that can test client behavior by receiving commands and issuing custom-tailored responses. Responses are generated via the .responder property, which should be an instance of a class based on MockGDBServerResponder. """ responder = None port = 0 _socket = None _client = None _thread = None _receivedData = None _receivedDataOffset = None _shouldSendAck = True def __init__(self, port = 0): self.responder = MockGDBServerResponder() self.port = port self._socket = socket.socket() def start(self): # Block until the socket is up, so self.port is available immediately. # Then start a thread that waits for a client connection. addr = ("127.0.0.1", self.port) self._socket.bind(addr) self.port = self._socket.getsockname()[1] self._socket.listen(1) self._thread = threading.Thread(target=self._run) self._thread.start() def stop(self): self._socket.close() self._thread.join() self._thread = None def _run(self): # For testing purposes, we only need to worry about one client # connecting just one time. try: # accept() is stubborn and won't fail even when the socket is # shutdown, so we'll use a timeout self._socket.settimeout(2.0) client, client_addr = self._socket.accept() self._client = client # The connected client inherits its timeout from self._socket, # but we'll use a blocking socket for the client self._client.settimeout(None) except: return self._shouldSendAck = True self._receivedData = "" self._receivedDataOffset = 0 data = None while True: try: data = seven.bitcast_to_string(self._client.recv(4096)) if data is None or len(data) == 0: break self._receive(data) except Exception as e: self._client.close() break def _receive(self, data): """ Collects data, parses and responds to as many packets as exist. Any leftover data is kept for parsing the next time around. """ self._receivedData += data try: packet = self._parsePacket() while packet is not None: self._handlePacket(packet) packet = self._parsePacket() except self.InvalidPacketException: self._client.close() def _parsePacket(self): """ Reads bytes from self._receivedData, returning: - a packet's contents if a valid packet is found - the PACKET_ACK unique object if we got an ack - None if we only have a partial packet Raises an InvalidPacketException if unexpected data is received or if checksums fail. Once a complete packet is found at the front of self._receivedData, its data is removed form self._receivedData. """ data = self._receivedData i = self._receivedDataOffset data_len = len(data) if data_len == 0: return None if i == 0: # If we're looking at the start of the received data, that means # we're looking for the start of a new packet, denoted by a $. # It's also possible we'll see an ACK here, denoted by a + if data[0] == '+': self._receivedData = data[1:] return self.PACKET_ACK if ord(data[0]) == 3: self._receivedData = data[1:] return self.PACKET_INTERRUPT if data[0] == '$': i += 1 else: raise self.InvalidPacketException( "Unexpected leading byte: %s" % data[0]) # If we're looking beyond the start of the received data, then we're # looking for the end of the packet content, denoted by a #. # Note that we pick up searching from where we left off last time while i < data_len and data[i] != '#': i += 1 # If there isn't enough data left for a checksum, just remember where # we left off so we can pick up there the next time around if i > data_len - 3: self._receivedDataOffset = i return None # If we have enough data remaining for the checksum, extract it and # compare to the packet contents packet = data[1:i] i += 1 try: check = int(data[i:i + 2], 16) except ValueError: raise self.InvalidPacketException("Checksum is not valid hex") i += 2 if check != checksum(packet): raise self.InvalidPacketException( "Checksum %02x does not match content %02x" % (check, checksum(packet))) # remove parsed bytes from _receivedData and reset offset so parsing # can start on the next packet the next time around self._receivedData = data[i:] self._receivedDataOffset = 0 return packet def _handlePacket(self, packet): if packet is self.PACKET_ACK: # Ignore ACKs from the client. For the future, we can consider # adding validation code to make sure the client only sends ACKs # when it's supposed to. return response = "" # We'll handle the ack stuff here since it's not something any of the # tests will be concerned about, and it'll get turned off quickly anyway. if self._shouldSendAck: self._client.sendall(seven.bitcast_to_bytes('+')) if packet == "QStartNoAckMode": self._shouldSendAck = False response = "OK" elif self.responder is not None: # Delegate everything else to our responder response = self.responder.respond(packet) # Handle packet framing since we don't want to bother tests with it. if response is not None: framed = frame_packet(response) self._client.sendall(seven.bitcast_to_bytes(framed)) PACKET_ACK = object() PACKET_INTERRUPT = object() class InvalidPacketException(Exception): pass class GDBRemoteTestBase(TestBase): """ Base class for GDB client tests. This class will setup and start a mock GDB server for the test to use. It also provides assertPacketLogContains, which simplifies the checking of packets sent by the client. """ NO_DEBUG_INFO_TESTCASE = True mydir = TestBase.compute_mydir(__file__) server = None def setUp(self): TestBase.setUp(self) self.server = MockGDBServer() self.server.start() def tearDown(self): # TestBase.tearDown will kill the process, but we need to kill it early # so its client connection closes and we can stop the server before # finally calling the base tearDown. if self.process() is not None: self.process().Kill() self.server.stop() TestBase.tearDown(self) def createTarget(self, yaml_path): """ Create a target by auto-generating the object based on the given yaml instructions. This will track the generated object so it can be automatically removed during tearDown. """ yaml_base, ext = os.path.splitext(yaml_path) obj_path = self.getBuildArtifact(yaml_base) self.yaml2obj(yaml_path, obj_path) return self.dbg.CreateTarget(obj_path) def connect(self, target): """ Create a process by connecting to the mock GDB server. Includes assertions that the process was successfully created. """ listener = self.dbg.GetListener() error = lldb.SBError() url = "connect://localhost:%d" % self.server.port process = target.ConnectRemote(listener, url, "gdb-remote", error) self.assertTrue(error.Success(), error.description) self.assertTrue(process, PROCESS_IS_VALID) return process def assertPacketLogContains(self, packets): """ Assert that the mock server's packet log contains the given packets. The packet log includes all packets sent by the client and received by the server. This fuction makes it easy to verify that the client sent the expected packets to the server. The check does not require that the packets be consecutive, but does require that they are ordered in the log as they ordered in the arg. """ i = 0 j = 0 log = self.server.responder.packetLog while i < len(packets) and j < len(log): if log[j] == packets[i]: i += 1 j += 1 if i < len(packets): self.fail(u"Did not receive: %s\nLast 10 packets:\n\t%s" % (packets[i], u'\n\t'.join(log)))
utilities.py
""" Utility functions """ from __future__ import absolute_import import glob import socket import os import logging import uuid import datetime import shlex import re import sys import threading import time from subprocess import Popen, PIPE, STDOUT import yaml try: from yaml import CDumper as Dumper except ImportError: from yaml import Dumper from .. import package_info from .constants import InsightsConstants as constants from .collection_rules import InsightsUploadConf, load_yaml try: from insights_client.constants import InsightsConstants as wrapper_constants except ImportError: wrapper_constants = None logger = logging.getLogger(__name__) def determine_hostname(display_name=None): """ Find fqdn if we can """ if display_name: # if display_name is provided, just return the given name return display_name else: socket_gethostname = socket.gethostname() socket_fqdn = socket.getfqdn() try: socket_ex = socket.gethostbyname_ex(socket_gethostname)[0] except (LookupError, socket.gaierror): socket_ex = '' gethostname_len = len(socket_gethostname) fqdn_len = len(socket_fqdn) ex_len = len(socket_ex) if fqdn_len > gethostname_len or ex_len > gethostname_len: if "localhost" not in socket_ex and len(socket_ex): return socket_ex if "localhost" not in socket_fqdn: return socket_fqdn return socket_gethostname def get_time(): return datetime.datetime.isoformat(datetime.datetime.now()) def write_registered_file(): delete_unregistered_file() for f in constants.registered_files: if os.path.lexists(f): if os.path.islink(f): # kill symlinks and regenerate os.remove(f) write_to_disk(f) else: write_to_disk(f) def write_unregistered_file(date=None): """ Write .unregistered out to disk """ delete_registered_file() if date is None: date = get_time() for f in constants.unregistered_files: if os.path.lexists(f): if os.path.islink(f): # kill symlinks and regenerate os.remove(f) write_to_disk(f, content=str(date)) else: write_to_disk(f, content=str(date)) def delete_registered_file(): for f in constants.registered_files: write_to_disk(f, delete=True) def delete_unregistered_file(): for f in constants.unregistered_files: write_to_disk(f, delete=True) def delete_cache_files(): for f in glob.glob(os.path.join(constants.insights_core_lib_dir, "*.json")): os.remove(f) def write_to_disk(filename, delete=False, content=get_time()): """ Write filename out to disk """ if not os.path.exists(os.path.dirname(filename)): return if delete: if os.path.lexists(filename): os.remove(filename) else: with open(filename, 'wb') as f: f.write(content.encode('utf-8')) def generate_machine_id(new=False, destination_file=constants.machine_id_file): """ Generate a machine-id if /etc/insights-client/machine-id does not exist """ machine_id = None machine_id_file = None logging_name = 'machine-id' if os.path.isfile(destination_file) and not new: logger.debug('Found %s', destination_file) with open(destination_file, 'r') as machine_id_file: machine_id = machine_id_file.read() else: logger.debug('Could not find %s file, creating', logging_name) machine_id = str(uuid.uuid4()) logger.debug("Creating %s", destination_file) write_to_disk(destination_file, content=machine_id) machine_id = str(machine_id).strip() try: uuid.UUID(machine_id, version=4) return machine_id except ValueError as e: logger.error("Invalid machine ID: %s", machine_id) logger.error("Error details: %s", str(e)) logger.error("Remove %s and a new one will be generated.\nRerun the client with --register", destination_file) sys.exit(constants.sig_kill_bad) def _expand_paths(path): """ Expand wildcarded paths """ dir_name = os.path.dirname(path) paths = [] logger.debug("Attempting to expand %s", path) if os.path.isdir(dir_name): files = os.listdir(dir_name) match = os.path.basename(path) for file_path in files: if re.match(match, file_path): expanded_path = os.path.join(dir_name, file_path) paths.append(expanded_path) logger.debug("Expanded paths %s", paths) return paths else: logger.debug("Could not expand %s", path) def validate_remove_file(config): """ Validate the remove file and tags file """ return InsightsUploadConf(config).validate() def write_data_to_file(data, filepath): ''' Write data to file ''' try: os.makedirs(os.path.dirname(filepath), 0o700) except OSError: pass write_to_disk(filepath, content=data) def magic_plan_b(filename): ''' Use this in instances where python-magic is MIA and can't be installed for whatever reason ''' cmd = shlex.split('file --mime-type --mime-encoding ' + filename) stdout, stderr = Popen(cmd, stdout=PIPE).communicate() stdout = stdout.decode("utf-8") mime_str = stdout.split(filename + ': ')[1].strip() return mime_str def run_command_get_output(cmd): proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=STDOUT) stdout, stderr = proc.communicate() return { 'status': proc.returncode, 'output': stdout.decode('utf-8', 'ignore') } def modify_config_file(updates): ''' Update the config file with certain things ''' cmd = '/bin/sed ' for key in updates: cmd = cmd + '-e \'s/^#*{key}.*=.*$/{key}={value}/\' '.format(key=key, value=updates[key]) cmd = cmd + constants.default_conf_file status = run_command_get_output(cmd) write_to_disk(constants.default_conf_file, content=status['output']) def get_version_info(): ''' Get the insights client and core versions for archival ''' try: client_version = wrapper_constants.version except AttributeError: # wrapper_constants is None or has no attribute "version" client_version = None version_info = {} version_info['core_version'] = '%s-%s' % (package_info['VERSION'], package_info['RELEASE']) version_info['client_version'] = client_version return version_info def print_egg_versions(): ''' Log all available eggs' versions ''' versions = get_version_info() logger.debug('Client version: %s', versions['client_version']) logger.debug('Core version: %s', versions['core_version']) logger.debug('All egg versions:') eggs = [ os.getenv('EGG'), '/var/lib/insights/newest.egg', '/var/lib/insights/last_stable.egg', '/etc/insights-client/rpm.egg', ] if not sys.executable: logger.debug('Python executable not found.') return for egg in eggs: if egg is None: logger.debug('ENV egg not defined.') continue if not os.path.exists(egg): logger.debug('%s not found.', egg) continue try: proc = Popen([sys.executable, '-c', 'from insights import package_info; print(\'%s-%s\' % (package_info[\'VERSION\'], package_info[\'RELEASE\']))'], env={'PYTHONPATH': egg, 'PATH': os.getenv('PATH')}, stdout=PIPE, stderr=STDOUT) except OSError: logger.debug('Could not start python.') return stdout, stderr = proc.communicate() version = stdout.decode('utf-8', 'ignore').strip() logger.debug('%s: %s', egg, version) def read_pidfile(): ''' Read the pidfile we wrote at launch ''' pid = None try: with open(constants.pidfile) as pidfile: pid = pidfile.read() except IOError: logger.debug('Could not open pidfile for reading.') return pid def _systemd_notify(pid): ''' Ping the systemd watchdog with the main PID so that the watchdog doesn't kill the process ''' try: proc = Popen(['/usr/bin/systemd-notify', '--pid=' + str(pid), 'WATCHDOG=1']) except OSError as e: logger.debug('Could not launch systemd-notify: %s', str(e)) return False stdout, stderr = proc.communicate() if proc.returncode != 0: logger.debug('systemd-notify returned %s', proc.returncode) return False return True def systemd_notify_init_thread(): ''' Use a thread to periodically ping systemd instead of calling it on a per-command basis ''' pid = read_pidfile() if not pid: logger.debug('No PID specified.') return if not os.getenv('NOTIFY_SOCKET'): # running standalone, not via systemd job return if not os.path.exists('/usr/bin/systemd-notify'): # RHEL 6, no systemd return def _sdnotify_loop(): while True: # run sdnotify every 30 seconds if not _systemd_notify(pid): # end the loop if something goes wrong break time.sleep(30) sdnotify_thread = threading.Thread(target=_sdnotify_loop, args=()) sdnotify_thread.daemon = True sdnotify_thread.start() def get_tags(tags_file_path=constants.default_tags_file): ''' Load tag data from the tags file. Returns: a dict containing tags defined on the host. ''' tags = None if os.path.isfile(tags_file_path): try: tags = load_yaml(tags_file_path) except RuntimeError: logger.error("Invalid YAML. Unable to load %s", tags_file_path) return None else: logger.debug("%s does not exist", tags_file_path) return tags def write_tags(tags, tags_file_path=constants.default_tags_file): """ Writes tags to tags_file_path Arguments: - tags (dict): the tags to write - tags_file_path (string): path to which tag data will be written Returns: None """ with open(tags_file_path, mode="w+") as f: data = yaml.dump(tags, Dumper=Dumper, default_flow_style=False) f.write(data) def migrate_tags(): ''' We initially released the tags feature with the tags file set as tags.conf, but soon after switched it over to tags.yaml. There may be installations out there with tags.conf files, so rename the files. ''' tags_conf = os.path.join(constants.default_conf_dir, 'tags.conf') tags_yaml = os.path.join(constants.default_conf_dir, 'tags.yaml') if os.path.exists(tags_yaml): # current default file exists, do nothing return if os.path.exists(tags_conf): # old file exists and current does not logger.info('Tags file %s detected. This filename is deprecated; please use %s. The file will be renamed automatically.', tags_conf, tags_yaml) try: os.rename(tags_conf, tags_yaml) except OSError as e: logger.error(e)
client.py
# Copyright 2017 D-Wave Systems Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ D-Wave API clients handle communications with :term:`solver` resources: problem submittal, monitoring, samples retrieval, etc. Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, submits a :term:`QUBO` problem (a Boolean NOT gate represented by a penalty model), and samples 5 times. >>> from dwave.cloud import Client >>> Q = {(0, 0): -1, (0, 4): 0, (4, 0): 2, (4, 4): -1} >>> with Client.from_config() as client: # doctest: +SKIP ... solver = client.get_solver() ... computation = solver.sample_qubo(Q, num_reads=5) ... >>> for i in range(5): # doctest: +SKIP ... print(computation.samples[i][0], computation.samples[i][4]) ... (1, 0) (1, 0) (0, 1) (0, 1) (0, 1) """ from __future__ import division, absolute_import import re import sys import time import json import logging import threading import requests import warnings import operator import collections import base64 import hashlib import codecs import concurrent.futures from itertools import chain from functools import partial, wraps from concurrent.futures import ThreadPoolExecutor from dateutil.parser import parse as parse_datetime from plucky import pluck from six.moves import queue, range import six from dwave.cloud.package_info import __packagename__, __version__ from dwave.cloud.exceptions import * from dwave.cloud.computation import Future from dwave.cloud.config import load_config, legacy_load_config, parse_float from dwave.cloud.solver import Solver, available_solvers from dwave.cloud.concurrency import PriorityThreadPoolExecutor from dwave.cloud.upload import ChunkedData from dwave.cloud.events import dispatch_event from dwave.cloud.utils import ( TimeoutingHTTPAdapter, BaseUrlSession, user_agent, datetime_to_timestamp, utcnow, epochnow, cached, retried) __all__ = ['Client'] logger = logging.getLogger(__name__) class Client(object): """ Base client class for all D-Wave API clients. Used by QPU and software :term:`sampler` classes. Manages workers and handles thread pools for submitting problems, cancelling tasks, polling problem status, and retrieving results. Args: endpoint (str): D-Wave API endpoint URL. token (str): Authentication token for the D-Wave API. solver (dict/str): Default solver features (or simply solver name). proxy (str): Proxy URL to be used for accessing the D-Wave API. permissive_ssl (bool, default=False): Disables SSL verification. request_timeout (float, default=60): Connect and read timeout (in seconds) for all requests to the D-Wave API. polling_timeout (float, default=None): Problem status polling timeout (in seconds), after which polling is aborted. connection_close (bool, default=False): Force HTTP(S) connection close after each request. headers (dict/str): Additional HTTP headers. Other Parameters: Unrecognized keys (str): All unrecognized keys are passed through to the appropriate client class constructor as string keyword arguments. An explicit key value overrides an identical user-defined key value loaded from a configuration file. Examples: This example directly initializes a :class:`~dwave.cloud.client.Client`. Direct initialization uses class constructor arguments, the minimum being a value for `token`. >>> from dwave.cloud import Client >>> client = Client(token='secret') # doctest: +SKIP >>> # code that uses client >>> client.close() # doctest: +SKIP """ # The status flags that a problem can have STATUS_IN_PROGRESS = 'IN_PROGRESS' STATUS_PENDING = 'PENDING' STATUS_COMPLETE = 'COMPLETED' STATUS_FAILED = 'FAILED' STATUS_CANCELLED = 'CANCELLED' # Default API endpoint DEFAULT_API_ENDPOINT = 'https://cloud.dwavesys.com/sapi/' # Cases when multiple status flags qualify ANY_STATUS_ONGOING = [STATUS_IN_PROGRESS, STATUS_PENDING] ANY_STATUS_NO_RESULT = [STATUS_FAILED, STATUS_CANCELLED] # Number of problems to include in a submit/status query _SUBMIT_BATCH_SIZE = 20 _STATUS_QUERY_SIZE = 100 # Number of worker threads for each problem processing task _SUBMISSION_THREAD_COUNT = 5 _UPLOAD_PROBLEM_THREAD_COUNT = 1 _UPLOAD_PART_THREAD_COUNT = 10 _ENCODE_PROBLEM_THREAD_COUNT = _UPLOAD_PROBLEM_THREAD_COUNT _CANCEL_THREAD_COUNT = 1 _POLL_THREAD_COUNT = 2 _LOAD_THREAD_COUNT = 5 # Poll back-off interval [sec] _POLL_BACKOFF_MIN = 1 _POLL_BACKOFF_MAX = 60 # Tolerance for server-client clocks difference (approx) [sec] _CLOCK_DIFF_MAX = 1 # Poll grouping time frame; two scheduled polls are grouped if closer than [sec]: _POLL_GROUP_TIMEFRAME = 2 # Downloaded solver definition cache maxage [sec] _SOLVERS_CACHE_MAXAGE = 300 # Multipart upload parameters _UPLOAD_PART_SIZE_BYTES = 5 * 1024 * 1024 _UPLOAD_PART_RETRIES = 2 _UPLOAD_REQUEST_RETRIES = 2 _UPLOAD_RETRIES_BACKOFF = lambda retry: 2 ** retry @classmethod def from_config(cls, config_file=None, profile=None, client=None, endpoint=None, token=None, solver=None, proxy=None, headers=None, legacy_config_fallback=False, **kwargs): """Client factory method to instantiate a client instance from configuration. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`from_config()` 2. Values specified as environment variables 3. Values specified in the configuration file Configuration-file format is described in :mod:`dwave.cloud.config`. If the location of the configuration file is not specified, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If a configuration file explicitly specified, via an argument or environment variable, does not exist or is unreadable, loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is readable but invalid as a configuration file. Similarly, if a profile explicitly specified, via an argument or environment variable, is not present in the loaded configuration, loading fails with :exc:`ValueError`. Explicit profile selection also fails if the configuration file is not explicitly specified, detected on the system, or defined via an environment variable. Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``, ``DWAVE_API_HEADERS``. Environment variables are described in :mod:`dwave.cloud.config`. Args: config_file (str/[str]/None/False/True, default=None): Path to configuration file. If ``None``, the value is taken from ``DWAVE_CONFIG_FILE`` environment variable if defined. If the environment variable is undefined or empty, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If ``False``, loading from file is skipped; if ``True``, forces auto-detection (regardless of the ``DWAVE_CONFIG_FILE`` environment variable). profile (str, default=None): Profile name (name of the profile section in the configuration file). If undefined, inferred from ``DWAVE_PROFILE`` environment variable if defined. If the environment variable is undefined or empty, a profile is selected in the following order: 1. From the default section if it includes a profile key. 2. The first section (after the default section). 3. If no other section is defined besides ``[defaults]``, the defaults section is promoted and selected. client (str, default=None): Client type used for accessing the API. Supported values are ``qpu`` for :class:`dwave.cloud.qpu.Client` and ``sw`` for :class:`dwave.cloud.sw.Client`. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (dict/str, default=None): Default :term:`solver` features to use in :meth:`~dwave.cloud.client.Client.get_solver`. Defined via dictionary of solver feature constraints (see :meth:`~dwave.cloud.client.Client.get_solvers`). For backward compatibility, a solver name, as a string, is also accepted and converted to ``{"name": <solver name>}``. If undefined, :meth:`~dwave.cloud.client.Client.get_solver` uses a solver definition from environment variables, a configuration file, or falls back to the first available online solver. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses the system-level proxy, if defined, or connects directly to the API. headers (dict/str, default=None): Newline-separated additional HTTP headers to include with each API request, or a dictionary of (key, value) pairs. legacy_config_fallback (bool, default=False): If True and loading from a standard D-Wave Cloud Client configuration file (``dwave.conf``) fails, tries loading a legacy configuration file (``~/.dwrc``). Other Parameters: Unrecognized keys (str): All unrecognized keys are passed through to the appropriate client class constructor as string keyword arguments. An explicit key value overrides an identical user-defined key value loaded from a configuration file. Returns: :class:`~dwave.cloud.client.Client` (:class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`, default=:class:`dwave.cloud.qpu.Client`): Appropriate instance of a QPU or software client. Raises: :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples: A variety of examples are given in :mod:`dwave.cloud.config`. This example initializes :class:`~dwave.cloud.client.Client` from an explicitly specified configuration file, "~/jane/my_path_to_config/my_cloud_conf.conf":: >>> from dwave.cloud import Client >>> client = Client.from_config(config_file='~/jane/my_path_to_config/my_cloud_conf.conf') # doctest: +SKIP >>> # code that uses client >>> client.close() # doctest: +SKIP """ # try loading configuration from a preferred new config subsystem # (`./dwave.conf`, `~/.config/dwave/dwave.conf`, etc) config = load_config( config_file=config_file, profile=profile, client=client, endpoint=endpoint, token=token, solver=solver, proxy=proxy, headers=headers) logger.debug("Config loaded: %r", config) # fallback to legacy `.dwrc` if key variables missing if legacy_config_fallback: warnings.warn( "'legacy_config_fallback' is deprecated, and it will be removed " "in 0.7.0. please convert your legacy .dwrc file to the new " "config format.", DeprecationWarning) if not config.get('token'): config = legacy_load_config( profile=profile, client=client, endpoint=endpoint, token=token, solver=solver, proxy=proxy, headers=headers) logger.debug("Legacy config loaded: %r", config) # manual override of other (client-custom) arguments config.update(kwargs) from dwave.cloud import qpu, sw _clients = {'qpu': qpu.Client, 'sw': sw.Client, 'base': cls} _client = config.pop('client', None) or 'base' logger.debug("Final config used for %s.Client(): %r", _client, config) return _clients[_client](**config) def __init__(self, endpoint=None, token=None, solver=None, proxy=None, permissive_ssl=False, request_timeout=60, polling_timeout=None, connection_close=False, headers=None, **kwargs): """To setup the connection a pipeline of queues/workers is constructed. There are five interactions with the server the connection manages: 1. Downloading solver information. 2. Submitting problem data. 3. Polling problem status. 4. Downloading problem results. 5. Canceling problems Loading solver information is done synchronously. The other four tasks are performed by asynchronously workers. For 2, 3, and 5 the workers gather tasks in batches. """ args = dict( endpoint=endpoint, token=token, solver=solver, proxy=proxy, permissive_ssl=permissive_ssl, request_timeout=request_timeout, polling_timeout=polling_timeout, connection_close=connection_close, headers=headers, kwargs=kwargs) dispatch_event('before_client_init', obj=self, args=args) if not endpoint: endpoint = self.DEFAULT_API_ENDPOINT if not token: raise ValueError("API token not defined") logger.debug( "Creating a client for (endpoint=%r, token=%r, solver=%r, proxy=%r, " "permissive_ssl=%r, request_timeout=%r, polling_timeout=%r, " "connection_close=%r, headers=%r, **kwargs=%r)", endpoint, token, solver, proxy, permissive_ssl, request_timeout, polling_timeout, connection_close, headers, kwargs ) # parse solver if not solver: solver_def = {} elif isinstance(solver, collections.Mapping): solver_def = solver elif isinstance(solver, six.string_types): # support features dict encoded as JSON in our config INI file # TODO: push this decoding to the config module, once we switch to a # richer config format (JSON or YAML) try: solver_def = json.loads(solver) except Exception: # unparseable json, assume string name for solver # we'll deprecate this eventually, but for now just convert it to # features dict (equality constraint on full solver name) logger.debug("Invalid solver JSON, assuming string name: %r", solver) solver_def = dict(name__eq=solver) else: raise ValueError("Expecting a features dictionary or a string name for 'solver'") logger.debug("Parsed solver=%r", solver_def) # parse headers if not headers: headers_dict = {} elif isinstance(headers, collections.Mapping): headers_dict = headers elif isinstance(headers, six.string_types): try: # valid headers = "Field-1: value-1\nField-2: value-2" headers_dict = {key.strip(): val.strip() for key, val in [line.split(':') for line in headers.strip().split('\n')]} except Exception as e: logger.debug("Invalid headers: %r", headers) headers_dict = {} else: raise ValueError("HTTP headers expected in a dict, or a string") logger.debug("Parsed headers=%r", headers_dict) # Store connection/session parameters self.endpoint = endpoint self.default_solver = solver_def self.token = token self.request_timeout = parse_float(request_timeout) self.polling_timeout = parse_float(polling_timeout) self.proxy = proxy self.headers = headers_dict self.permissive_ssl = permissive_ssl self.connection_close = connection_close # Create session for main thread only self.session = self.create_session() # Build the problem submission queue, start its workers self._submission_queue = queue.Queue() self._submission_workers = [] for _ in range(self._SUBMISSION_THREAD_COUNT): worker = threading.Thread(target=self._do_submit_problems) worker.daemon = True worker.start() self._submission_workers.append(worker) # Build the cancel problem queue, start its workers self._cancel_queue = queue.Queue() self._cancel_workers = [] for _ in range(self._CANCEL_THREAD_COUNT): worker = threading.Thread(target=self._do_cancel_problems) worker.daemon = True worker.start() self._cancel_workers.append(worker) # Build the problem status polling queue, start its workers self._poll_queue = queue.PriorityQueue() self._poll_workers = [] for _ in range(self._POLL_THREAD_COUNT): worker = threading.Thread(target=self._do_poll_problems) worker.daemon = True worker.start() self._poll_workers.append(worker) # Build the result loading queue, start its workers self._load_queue = queue.Queue() self._load_workers = [] for _ in range(self._LOAD_THREAD_COUNT): worker = threading.Thread(target=self._do_load_results) worker.daemon = True worker.start() self._load_workers.append(worker) # Setup multipart upload executors self._upload_problem_executor = \ ThreadPoolExecutor(self._UPLOAD_PROBLEM_THREAD_COUNT) self._upload_part_executor = \ PriorityThreadPoolExecutor(self._UPLOAD_PART_THREAD_COUNT) self._encode_problem_executor = \ ThreadPoolExecutor(self._ENCODE_PROBLEM_THREAD_COUNT) dispatch_event( 'after_client_init', obj=self, args=args, return_value=None) def create_session(self): """Create a new requests session based on client's (self) params. Note: since `requests.Session` is NOT thread-safe, every thread should create and use an isolated session. """ # allow endpoint path to not end with / endpoint = self.endpoint if not endpoint.endswith('/'): endpoint += '/' session = BaseUrlSession(base_url=endpoint) session.mount('http://', TimeoutingHTTPAdapter(timeout=self.request_timeout)) session.mount('https://', TimeoutingHTTPAdapter(timeout=self.request_timeout)) if self.headers: session.headers.update(self.headers) session.headers.update({'X-Auth-Token': self.token, 'User-Agent': user_agent(__packagename__, __version__)}) session.proxies = {'http': self.proxy, 'https': self.proxy} if self.permissive_ssl: session.verify = False if self.connection_close: session.headers.update({'Connection': 'close'}) # Debug-log headers logger.debug("create_session(session.headers=%r)", session.headers) return session def close(self): """Perform a clean shutdown. Waits for all the currently scheduled work to finish, kills the workers, and closes the connection pool. .. note:: Ensure your code does not submit new work while the connection is closing. Where possible, it is recommended you use a context manager (a :code:`with Client.from_config(...) as` construct) to ensure your code properly closes all resources. Examples: This example creates a client (based on an auto-detected configuration file), executes some code (represented by a placeholder comment), and then closes the client. >>> from dwave.cloud import Client >>> client = Client.from_config() # doctest: +SKIP >>> # code that uses client >>> client.close() # doctest: +SKIP """ # Finish all the work that requires the connection logger.debug("Joining submission queue") self._submission_queue.join() logger.debug("Joining cancel queue") self._cancel_queue.join() logger.debug("Joining poll queue") self._poll_queue.join() logger.debug("Joining load queue") self._load_queue.join() logger.debug("Shutting down problem upload executor") self._upload_problem_executor.shutdown() logger.debug("Shutting down problem part upload executor") self._upload_part_executor.shutdown() logger.debug("Shutting down problem encoder executor") self._encode_problem_executor.shutdown() # Send kill-task to all worker threads # Note: threads can't be 'killed' in Python, they have to die by # natural causes for _ in self._submission_workers: self._submission_queue.put(None) for _ in self._cancel_workers: self._cancel_queue.put(None) for _ in self._poll_workers: self._poll_queue.put((-1, None)) for _ in self._load_workers: self._load_queue.put(None) # Wait for threads to die for worker in chain(self._submission_workers, self._cancel_workers, self._poll_workers, self._load_workers): worker.join() # Close the main thread's session self.session.close() def __enter__(self): """Let connections be used in with blocks.""" return self def __exit__(self, *args): """At the end of a with block perform a clean shutdown of the connection.""" self.close() return False @staticmethod def is_solver_handled(solver): """Determine if the specified solver should be handled by this client. Default implementation accepts all solvers (always returns True). Override this predicate function with a subclass if you want to specialize your client for a particular type of solvers. Examples: This function accepts only solvers named "My_Solver_*". .. code:: python @staticmethod def is_solver_handled(solver): return solver and solver.id.startswith('My_Solver_') """ return True @cached(maxage=_SOLVERS_CACHE_MAXAGE) def _fetch_solvers(self, name=None): if name is not None: logger.info("Fetching definition of a solver with name=%r", name) url = 'solvers/remote/{}/'.format(name) else: logger.info("Fetching definitions of all available solvers") url = 'solvers/remote/' try: response = self.session.get(url) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError if name is not None and response.status_code == 404: raise SolverNotFoundError("No solver with name={!r} available".format(name)) response.raise_for_status() data = response.json() if name is not None: data = [data] logger.debug("Received solver data for %d solver(s).", len(data)) logger.trace("Solver data received for solver name=%r: %r", name, data) solvers = [] for solver_desc in data: for solver_class in available_solvers: try: logger.debug("Trying to instantiate %r", solver_class.__name__) solver = solver_class(self, solver_desc) if self.is_solver_handled(solver): solvers.append(solver) logger.info("Adding solver %r", solver) break else: logger.debug("Skipping solver %r (not handled by this client)", solver) except UnsupportedSolverError as e: logger.debug("Skipping solver due to %r", e) # propagate all other/decoding errors, like InvalidAPIResponseError, etc. return solvers def retrieve_answer(self, id_): """Retrieve a problem by id. Args: id_ (str): As returned by :attr:`Future.id`. Returns: :class:`Future` """ future = Future(None, id_) self._load(future) return future def get_solvers(self, refresh=False, order_by='avg_load', **filters): """Return a filtered list of solvers handled by this client. Args: refresh (bool, default=False): Force refresh of cached list of solvers/properties. order_by (callable/str/None, default='avg_load'): Solver sorting key function (or :class:`Solver` attribute/item dot-separated path). By default, solvers are sorted by average load. To explicitly not sort the solvers (and use the API-returned order), set ``order_by=None``. Signature of the `key` `callable` is:: key :: (Solver s, Ord k) => s -> k Basic structure of the `key` string path is:: "-"? (attr|item) ( "." (attr|item) )* For example, to use solver property named ``max_anneal_schedule_points``, available in ``Solver.properties`` dict, you can either specify a callable `key`:: key=lambda solver: solver.properties['max_anneal_schedule_points'] or, you can use a short string path based key:: key='properties.max_anneal_schedule_points' Solver derived properties, available as :class:`Solver` properties can also be used (e.g. ``num_active_qubits``, ``online``, ``avg_load``, etc). Ascending sort order is implied, unless the key string path does not start with ``-``, in which case descending sort is used. Note: the sort used for ordering solvers by `key` is **stable**, meaning that if multiple solvers have the same value for the key, their relative order is preserved, and effectively they are in the same order as returned by the API. Note: solvers with ``None`` for key appear last in the list of solvers. When providing a key callable, ensure all values returned are of the same type (particularly in Python 3). For solvers with undefined key value, return ``None``. **filters: See `Filtering forms` and `Operators` below. Solver filters are defined, similarly to Django QuerySet filters, with keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`. Each `<operator>` is a predicate (boolean) function that acts on two arguments: value of feature `<name>` (described with keys path `<key1.key2...keyN>`) and the required `<value>`. Feature `<name>` can be: 1) a derived solver property, available as an identically named :class:`Solver`'s property (`name`, `qpu`, `software`, `online`, `num_active_qubits`, `avg_load`) 2) a solver parameter, available in :obj:`Solver.parameters` 3) a solver property, available in :obj:`Solver.properties` 4) a path describing a property in nested dictionaries Filtering forms are: * <derived_property>__<operator> (object <value>) * <derived_property> (bool) This form ensures the value of solver's property bound to `derived_property`, after applying `operator` equals the `value`. The default operator is `eq`. For example:: >>> client.get_solvers(avg_load__gt=0.5) but also:: >>> client.get_solvers(online=True) >>> # identical to: >>> client.get_solvers(online__eq=True) * <parameter>__<operator> (object <value>) * <parameter> (bool) This form ensures that the solver supports `parameter`. General operator form can be used but usually does not make sense for parameters, since values are human-readable descriptions. The default operator is `available`. Example:: >>> client.get_solvers(flux_biases=True) >>> # identical to: >>> client.get_solvers(flux_biases__available=True) * <property>__<operator> (object <value>) * <property> (bool) This form ensures the value of the solver's `property`, after applying `operator` equals the righthand side `value`. The default operator is `eq`. Note: if a non-existing parameter/property name/key given, the default operator is `eq`. Operators are: * `available` (<name>: str, <value>: bool): Test availability of <name> feature. * `eq`, `lt`, `lte`, `gt`, `gte` (<name>: str, <value>: any): Standard relational operators that compare feature <name> value with <value>. * `regex` (<name>: str, <value>: str): Test regular expression matching feature value. * `covers` (<name>: str, <value>: single value or range expressed as 2-tuple/list): Test feature <name> value (which should be a *range*) covers a given value or a subrange. * `within` (<name>: str, <value>: range expressed as 2-tuple/list): Test feature <name> value (which can be a *single value* or a *range*) is within a given range. * `in` (<name>: str, <value>: container type): Test feature <name> value is *in* <value> container. * `contains` (<name>: str, <value>: any): Test feature <name> value (container type) *contains* <value>. * `issubset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a subset of <value>. * `issuperset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a superset of <value>. Derived properies are: * `name` (str): Solver name/id. * `qpu` (bool): Solver is a QPU? * `software` (bool): Solver is a software solver? * `online` (bool, default=True): Is solver online? * `num_active_qubits` (int): Number of active qubits. Less then or equal to `num_qubits`. * `avg_load` (float): Solver's average load (similar to Unix load average). Common solver parameters are: * `flux_biases`: Should solver accept flux biases? * `anneal_schedule`: Should solver accept anneal schedule? Common solver properties are: * `num_qubits` (int): Number of qubits available. * `vfyc` (bool): Should solver work on "virtual full-yield chip"? * `max_anneal_schedule_points` (int): Piecewise linear annealing schedule points. * `h_range` ([int,int]), j_range ([int,int]): Biases/couplings values range. * `num_reads_range` ([int,int]): Range of allowed values for `num_reads` parameter. Returns: list[Solver]: List of all solvers that satisfy the conditions. Note: Client subclasses (e.g. :class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`) already filter solvers by resource type, so for `qpu` and `software` filters to have effect, call :meth:`.get_solvers` on base class :class:`~dwave.cloud.client.Client`. Examples:: client.get_solvers( num_qubits__gt=2000, # we need more than 2000 qubits num_qubits__lt=4000, # ... but fewer than 4000 qubits num_qubits__within=(2000, 4000), # an alternative to the previous two lines num_active_qubits=1089, # we want a particular number of active qubits vfyc=True, # we require a fully yielded Chimera vfyc__in=[False, None], # inverse of the previous filter vfyc__available=False, # we want solvers that do not advertize the vfyc property anneal_schedule=True, # we need support for custom anneal schedule max_anneal_schedule_points__gte=4, # we need at least 4 points for our anneal schedule num_reads_range__covers=1000, # our solver must support returning 1000 reads extended_j_range__covers=[-2, 2], # we need extended J range to contain subrange [-2,2] couplers__contains=[0, 128], # coupler (edge between) qubits (0,128) must exist couplers__issuperset=[[0,128], [0,4]], # two couplers required: (0,128) and (0,4) qubits__issuperset={0, 4, 215}, # qubits 0, 4 and 215 must exist supported_problem_types__issubset={'ising', 'qubo'}, # require Ising, QUBO or both to be supported name='DW_2000Q_5', # full solver name/ID match name__regex='.*2000.*', # partial/regex-based solver name match chip_id__regex='DW_.*', # chip ID prefix must be DW_ topology__type__eq="chimera" # topology.type must be chimera ) """ args = dict(refresh=refresh, order_by=order_by, filters=filters) dispatch_event('before_get_solvers', obj=self, args=args) def covers_op(prop, val): """Does LHS `prop` (range) fully cover RHS `val` (range or item)?""" # `prop` must be a 2-element list/tuple range. if not isinstance(prop, (list, tuple)) or not len(prop) == 2: raise ValueError("2-element list/tuple range required for LHS value") llo, lhi = min(prop), max(prop) # `val` can be a single value, or a range (2-list/2-tuple). if isinstance(val, (list, tuple)) and len(val) == 2: # val range within prop range? rlo, rhi = min(val), max(val) return llo <= rlo and lhi >= rhi else: # val item within prop range? return llo <= val <= lhi def within_op(prop, val): """Is LHS `prop` (range or item) fully covered by RHS `val` (range)?""" try: return covers_op(val, prop) except ValueError: raise ValueError("2-element list/tuple range required for RHS value") def _set(iterable): """Like set(iterable), but works for lists as items in iterable. Before constructing a set, lists are converted to tuples. """ first = next(iter(iterable)) if isinstance(first, list): return set(tuple(x) for x in iterable) return set(iterable) def with_valid_lhs(op): @wraps(op) def _wrapper(prop, val): if prop is None: return False return op(prop, val) return _wrapper # available filtering operators ops = { 'lt': with_valid_lhs(operator.lt), 'lte': with_valid_lhs(operator.le), 'gt': with_valid_lhs(operator.gt), 'gte': with_valid_lhs(operator.ge), 'eq': operator.eq, 'available': lambda prop, val: prop is not None if val else prop is None, 'regex': with_valid_lhs(lambda prop, val: re.match("^{}$".format(val), prop)), # range operations 'covers': with_valid_lhs(covers_op), 'within': with_valid_lhs(within_op), # membership tests 'in': lambda prop, val: prop in val, 'contains': with_valid_lhs(lambda prop, val: val in prop), # set tests 'issubset': with_valid_lhs(lambda prop, val: _set(prop).issubset(_set(val))), 'issuperset': with_valid_lhs(lambda prop, val: _set(prop).issuperset(_set(val))), } def predicate(solver, query, val): # needs to handle kwargs like these: # key=val # key__op=val # key__key=val # key__key__op=val # LHS is split on __ in `query` assert len(query) >= 1 potential_path, potential_op_name = query[:-1], query[-1] if potential_op_name in ops: # op is explicit, and potential path is correct op_name = potential_op_name else: # op is implied and depends on property type, path is the whole query op_name = None potential_path = query path = '.'.join(potential_path) if path in solver.derived_properties: op = ops[op_name or 'eq'] return op(getattr(solver, path), val) elif pluck(solver.parameters, path, None) is not None: op = ops[op_name or 'available'] return op(pluck(solver.parameters, path), val) elif pluck(solver.properties, path, None) is not None: op = ops[op_name or 'eq'] return op(pluck(solver.properties, path), val) else: op = ops[op_name or 'eq'] return op(None, val) # param validation sort_reverse = False if not order_by: sort_key = None elif isinstance(order_by, six.string_types): if order_by[0] == '-': sort_reverse = True order_by = order_by[1:] if not order_by: sort_key = None else: sort_key = lambda solver: pluck(solver, order_by, None) elif callable(order_by): sort_key = order_by else: raise TypeError("expected string or callable for 'order_by'") # default filters: filters.setdefault('online', True) predicates = [] for lhs, val in filters.items(): query = lhs.split('__') predicates.append(partial(predicate, query=query, val=val)) logger.debug("Filtering solvers with predicates=%r", predicates) # optimization for case when exact solver name/id is known: # we can fetch only that solver # NOTE: in future, complete feature-based filtering will be on server-side query = dict(refresh_=refresh) if 'name' in filters: query['name'] = filters['name'] if 'name__eq' in filters: query['name'] = filters['name__eq'] # filter solvers = self._fetch_solvers(**query) solvers = [s for s in solvers if all(p(s) for p in predicates)] # sort: undefined (None) key values go last if sort_key is not None: solvers_with_keys = [(sort_key(solver), solver) for solver in solvers] solvers_with_invalid_keys = [(key, solver) for key, solver in solvers_with_keys if key is None] solvers_with_valid_keys = [(key, solver) for key, solver in solvers_with_keys if key is not None] solvers_with_valid_keys.sort(key=operator.itemgetter(0)) solvers = [solver for key, solver in chain(solvers_with_valid_keys, solvers_with_invalid_keys)] # reverse if necessary (as a separate step from sorting, so it works for invalid keys # and plain list reverse without sorting) if sort_reverse: solvers.reverse() dispatch_event( 'after_get_solvers', obj=self, args=args, return_value=solvers) return solvers def solvers(self, refresh=False, **filters): """Deprecated in favor of :meth:`.get_solvers`.""" warnings.warn("'solvers' is deprecated in favor of 'get_solvers'.", DeprecationWarning) return self.get_solvers(refresh=refresh, **filters) def get_solver(self, name=None, refresh=False, **filters): """Load the configuration for a single solver. Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}` is a URL configured for the client, and returns a :class:`.Solver` instance that can be used to submit sampling problems to the D-Wave API and retrieve results. Args: name (str): ID of the requested solver. ``None`` returns the default solver. If default solver is not configured, ``None`` returns the first available solver in ``Client``'s class (QPU/software/base). **filters (keyword arguments, optional): Dictionary of filters over features this solver has to have. For a list of feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`. order_by (callable/str, default='id'): Solver sorting key function (or :class:`Solver` attribute name). By default, solvers are sorted by ID/name. refresh (bool): Return solver from cache (if cached with ``get_solvers()``), unless set to ``True``. Returns: :class:`.Solver` Examples: This example creates two solvers for a client instantiated from a local system's auto-detected default configuration file, which configures a connection to a D-Wave resource that provides two solvers. The first uses the default solver, the second explicitly selects another solver. >>> from dwave.cloud import Client >>> client = Client.from_config() # doctest: +SKIP >>> client.get_solvers() # doctest: +SKIP [Solver(id='2000Q_ONLINE_SOLVER1'), Solver(id='2000Q_ONLINE_SOLVER2')] >>> solver1 = client.get_solver() # doctest: +SKIP >>> solver2 = client.get_solver(name='2000Q_ONLINE_SOLVER2') # doctest: +SKIP >>> solver1.id # doctest: +SKIP '2000Q_ONLINE_SOLVER1' >>> solver2.id # doctest: +SKIP '2000Q_ONLINE_SOLVER2' >>> # code that uses client >>> client.close() # doctest: +SKIP """ logger.debug("Requested a solver that best matches feature filters=%r", filters) # backward compatibility: name as the first feature if name is not None: filters.setdefault('name', name) # in absence of other filters, config/env solver filters/name are used if not filters and self.default_solver: filters = self.default_solver # get the first solver that satisfies all filters try: logger.debug("Fetching solvers according to filters=%r", filters) return self.get_solvers(refresh=refresh, **filters)[0] except IndexError: raise SolverNotFoundError("Solver with the requested features not available") def _submit(self, body, future): """Enqueue a problem for submission to the server. This method is thread safe. """ self._submission_queue.put(self._submit.Message(body, future)) _submit.Message = collections.namedtuple('Message', ['body', 'future']) def _do_submit_problems(self): """Pull problems from the submission queue and submit them. Note: This method is always run inside of a daemon thread. """ def task_done(): self._submission_queue.task_done() def filter_ready(item): """Pass-through ready (encoded) problems, re-enqueue ones for which the encoding is in progress, and fail the ones for which encoding failed. """ # body is a `concurrent.futures.Future`, so make sure # it's ready for submitting if item.body.done(): exc = item.body.exception() if exc: # encoding failed, submit should fail as well logger.info("Problem encoding prior to submit " "failed with: %r", exc) item.future._set_error(exc) task_done() else: # problem ready for submit return [item] else: # body not ready, return the item to queue self._submission_queue.put(item) task_done() return [] session = self.create_session() try: while True: # Pull as many problems as we can, block on the first one, # but once we have one problem, switch to non-blocking then # submit without blocking again. # `None` task is used to signal thread termination item = self._submission_queue.get() if item is None: task_done() break ready_problems = filter_ready(item) while len(ready_problems) < self._SUBMIT_BATCH_SIZE: try: item = self._submission_queue.get_nowait() except queue.Empty: break ready_problems.extend(filter_ready(item)) if not ready_problems: continue # Submit the problems logger.debug("Submitting %d problems", len(ready_problems)) try: body = '[' + ','.join(mess.body.result() for mess in ready_problems) + ']' try: response = session.post('problems/', body) localtime_of_response = epochnow() except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() response.raise_for_status() message = response.json() logger.debug("Finished submitting %d problems", len(ready_problems)) except BaseException as exception: logger.debug("Submit failed for %d problems", len(ready_problems)) if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) for mess in ready_problems: mess.future._set_error(exception, sys.exc_info()) task_done() continue # Pass on the information for submission, res in zip(ready_problems, message): submission.future._set_clock_diff(response, localtime_of_response) self._handle_problem_status(res, submission.future) task_done() # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except BaseException as err: logger.exception(err) finally: session.close() def _handle_problem_status(self, message, future): """Handle the results of a problem submission or results request. This method checks the status of the problem and puts it in the correct queue. Args: message (dict): Update message from the SAPI server wrt. this problem. future (:class:`dwave.cloud.computation.Future`: future corresponding to the problem Note: This method is always run inside of a daemon thread. """ try: logger.trace("Handling response: %r", message) logger.debug("Handling response for %s with status %s", message.get('id'), message.get('status')) # Handle errors in batch mode if 'error_code' in message and 'error_msg' in message: raise SolverFailureError(message['error_msg']) if 'status' not in message: raise InvalidAPIResponseError("'status' missing in problem description response") if 'id' not in message: raise InvalidAPIResponseError("'id' missing in problem description response") future.id = message['id'] future.remote_status = status = message['status'] # The future may not have the ID set yet with future._single_cancel_lock: # This handles the case where cancel has been called on a future # before that future received the problem id if future._cancel_requested: if not future._cancel_sent and status == self.STATUS_PENDING: # The problem has been canceled but the status says its still in queue # try to cancel it self._cancel(message['id'], future) # If a cancel request could meaningfully be sent it has been now future._cancel_sent = True if not future.time_received and message.get('submitted_on'): future.time_received = parse_datetime(message['submitted_on']) if not future.time_solved and message.get('solved_on'): future.time_solved = parse_datetime(message['solved_on']) if not future.eta_min and message.get('earliest_estimated_completion'): future.eta_min = parse_datetime(message['earliest_estimated_completion']) if not future.eta_max and message.get('latest_estimated_completion'): future.eta_max = parse_datetime(message['latest_estimated_completion']) if status == self.STATUS_COMPLETE: # TODO: find a better way to differentiate between # `completed-on-submit` and `completed-on-poll`. # Loading should happen only once, not every time when response # doesn't contain 'answer'. # If the message is complete, forward it to the future object if 'answer' in message: # If the future does not know which solver it's associated # with, we get it from the info provided from the server. # An alternative to making this call here would be to pass # self in with the message if future.solver is None: future.solver = self.get_solver(name=message['solver']) future._set_message(message) # If the problem is complete, but we don't have the result data # put the problem in the queue for loading results. else: self._load(future) elif status in self.ANY_STATUS_ONGOING: # If the response is pending add it to the queue. self._poll(future) elif status == self.STATUS_CANCELLED: # If canceled return error raise CanceledFutureError() else: # Return an error to the future object errmsg = message.get('error_message', 'An unknown error has occurred.') if 'solver is offline' in errmsg.lower(): raise SolverOfflineError(errmsg) else: raise SolverFailureError(errmsg) except Exception as error: # If there were any unhandled errors we need to release the # lock in the future, otherwise deadlock occurs. future._set_error(error, sys.exc_info()) def _cancel(self, id_, future): """Enqueue a problem to be canceled. This method is thread safe. """ self._cancel_queue.put((id_, future)) def _do_cancel_problems(self): """Pull ids from the cancel queue and submit them. Note: This method is always run inside of a daemon thread. """ session = self.create_session() try: while True: # Pull as many problems as we can, block when none are available. # `None` task is used to signal thread termination item = self._cancel_queue.get() if item is None: break item_list = [item] while True: try: item_list.append(self._cancel_queue.get_nowait()) except queue.Empty: break # Submit the problems, attach the ids as a json list in the # body of the delete query. try: body = [item[0] for item in item_list] try: session.delete('problems/', json=body) except requests.exceptions.Timeout: raise RequestTimeout except Exception as err: for _, future in item_list: if future is not None: future._set_error(err, sys.exc_info()) # Mark all the ids as processed regardless of success or failure. [self._cancel_queue.task_done() for _ in item_list] # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except Exception as err: logger.exception(err) finally: session.close() def _is_clock_diff_acceptable(self, future): if not future or future.clock_diff is None: return False logger.debug("Detected (server,client) clock offset: approx. %.2f sec. " "Acceptable offset is: %.2f sec", future.clock_diff, self._CLOCK_DIFF_MAX) return future.clock_diff <= self._CLOCK_DIFF_MAX def _poll(self, future): """Enqueue a problem to poll the server for status.""" if future._poll_backoff is None: # on first poll, start with minimal back-off future._poll_backoff = self._POLL_BACKOFF_MIN else: # on subsequent polls, do exponential back-off, clipped to a range future._poll_backoff = \ max(self._POLL_BACKOFF_MIN, min(future._poll_backoff * 2, self._POLL_BACKOFF_MAX)) # for poll priority we use timestamp of next scheduled poll at = time.time() + future._poll_backoff now = utcnow() future_age = (now - future.time_created).total_seconds() logger.debug("Polling scheduled at %.2f with %.2f sec new back-off for: %s (future's age: %.2f sec)", at, future._poll_backoff, future.id, future_age) # don't enqueue for next poll if polling_timeout is exceeded by then future_age_on_next_poll = future_age + (at - datetime_to_timestamp(now)) if self.polling_timeout is not None and future_age_on_next_poll > self.polling_timeout: logger.debug("Polling timeout exceeded before next poll: %.2f sec > %.2f sec, aborting polling!", future_age_on_next_poll, self.polling_timeout) raise PollingTimeout self._poll_queue.put((at, future)) def _do_poll_problems(self): """Poll the server for the status of a set of problems. Note: This method is always run inside of a daemon thread. """ session = self.create_session() try: # grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME) frame_futures = {} def task_done(): self._poll_queue.task_done() def add(future): # add future to query frame_futures # returns: worker lives on? # `None` task signifies thread termination if future is None: task_done() return False if future.id not in frame_futures and not future.done(): frame_futures[future.id] = future else: task_done() return True while True: frame_futures.clear() # blocking add first scheduled frame_earliest, future = self._poll_queue.get() if not add(future): return # try grouping if scheduled within grouping timeframe while len(frame_futures) < self._STATUS_QUERY_SIZE: try: task = self._poll_queue.get_nowait() except queue.Empty: break at, future = task if at - frame_earliest <= self._POLL_GROUP_TIMEFRAME: if not add(future): return else: task_done() self._poll_queue.put(task) break # build a query string with ids of all futures in this frame ids = [future.id for future in frame_futures.values()] logger.debug("Polling for status of futures: %s", ids) query_string = 'problems/?id=' + ','.join(ids) # if futures were cancelled while `add`ing, skip empty frame if not ids: continue # wait until `frame_earliest` before polling delay = frame_earliest - time.time() if delay > 0: logger.debug("Pausing polling %.2f sec for futures: %s", delay, ids) time.sleep(delay) else: logger.trace("Skipping non-positive delay of %.2f sec", delay) # execute and handle the polling request try: logger.trace("Executing poll API request") try: response = session.get(query_string) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() # assume 5xx errors are transient, and don't abort polling if 500 <= response.status_code < 600: logger.warning( "Received an internal server error response on " "problem status polling request (%s). Assuming " "error is transient, and resuming polling.", response.status_code) # add all futures in this frame back to the polling queue # XXX: logic split between `_handle_problem_status` and here for future in frame_futures.values(): self._poll(future) else: # otherwise, fail response.raise_for_status() # or handle a successful request statuses = response.json() for status in statuses: self._handle_problem_status(status, frame_futures[status['id']]) except BaseException as exception: if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) for id_ in frame_futures.keys(): frame_futures[id_]._set_error(IOError(exception), sys.exc_info()) for id_ in frame_futures.keys(): task_done() time.sleep(0) except Exception as err: logger.exception(err) finally: session.close() def _load(self, future): """Enqueue a problem to download results from the server. Args: future (:class:`~dwave.cloud.computation.Future`): Future object corresponding to the remote computation. This method is thread-safe. """ self._load_queue.put(future) def _do_load_results(self): """Submit a query asking for the results for a particular problem. To request the results of a problem: ``GET /problems/{problem_id}/`` Note: This method is always run inside of a daemon thread. """ session = self.create_session() try: while True: # Select a problem future = self._load_queue.get() # `None` task signifies thread termination if future is None: break logger.debug("Loading results of: %s", future.id) # Submit the query query_string = 'problems/{}/'.format(future.id) try: try: response = session.get(query_string) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() response.raise_for_status() message = response.json() except BaseException as exception: if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) future._set_error(IOError(exception), sys.exc_info()) continue # Dispatch the results, mark the task complete self._handle_problem_status(message, future) self._load_queue.task_done() # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except Exception as err: logger.error('Load result error: ' + str(err)) finally: session.close() def upload_problem_encoded(self, problem, problem_id=None): """Initiate multipart problem upload, returning the Problem ID in a :class:`~concurrent.futures.Future`. Args: problem (bytes-like/file-like): Encoded problem data to upload. problem_id (str, optional): Problem ID. If provided, problem will be re-uploaded. Previously uploaded parts, with a matching checksum, are skipped. Returns: :class:`concurrent.futures.Future`[str]: Problem ID in a Future. Problem ID can be used to submit problems by reference. Note: For a higher-level interface, use upload/submit solver methods. """ return self._upload_problem_executor.submit( self._upload_problem_worker, problem=problem, problem_id=problem_id) @staticmethod @retried(_UPLOAD_REQUEST_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF) def _initiate_multipart_upload(session, size): """Sync http request using `session`.""" logger.debug("Initiating problem multipart upload (size=%r)", size) path = 'bqm/multipart' body = dict(size=size) logger.trace("session.post(path=%r, json=%r)", path, body) try: response = session.post(path, json=body) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() else: logger.trace("Multipart upload initiate response: %r", response.text) response.raise_for_status() try: problem_id = response.json()['id'] except KeyError: raise InvalidAPIResponseError("problem ID missing") logger.debug("Multipart upload initiated (problem_id=%r)", problem_id) return problem_id @staticmethod def _digest(data): # data: bytes => md5(data): bytes return hashlib.md5(data).digest() @staticmethod def _checksum_b64(digest): # digest: bytes => base64(digest): str return base64.b64encode(digest).decode('ascii') @staticmethod def _checksum_hex(digest): # digest: bytes => hex(digest): str return codecs.encode(digest, 'hex').decode('ascii') @staticmethod def _combined_checksum(checksums): # TODO: drop this requirement server-side # checksums: Dict[int, str] => hex(md5(cat(digests))): str combined = ''.join(h for _, h in sorted(checksums.items())) digest = codecs.decode(combined, 'hex') return Client._checksum_hex(Client._digest(digest)) @staticmethod @retried(_UPLOAD_PART_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF) def _upload_multipart_part(session, problem_id, part_id, part_stream, uploaded_part_checksum=None): """Upload one problem part. Sync http request. Args: session (:class:`requests.Session`): Session used for all API requests. problem_id (str): Problem id. part_id (int): Part number/id. part_stream (:class:`io.BufferedIOBase`/binary-stream-like): Problem part data container that supports `read` operation. uploaded_part_checksum (str/None): Checksum of previously uploaded part. Optional, but if specified checksum is verified, and part is uploaded only if checksums don't match. Returns: Hex digest of part data MD5 checksum. """ logger.debug("Uploading part_id=%r of problem_id=%r", part_id, problem_id) # TODO: work-around to get a checksum of a binary stream (avoid 2x read) data = part_stream.read() digest = Client._digest(data) b64digest = Client._checksum_b64(digest) hexdigest = Client._checksum_hex(digest) del data if uploaded_part_checksum is not None: if hexdigest == uploaded_part_checksum: logger.debug("Uploaded part checksum matches. " "Skipping upload for part_id=%r.", part_id) return hexdigest else: logger.debug("Uploaded part checksum does not match. " "Re-uploading part_id=%r.", part_id) # rewind the stream after read part_stream.seek(0) path = 'bqm/multipart/{problem_id}/part/{part_id}'.format( problem_id=problem_id, part_id=part_id) headers = { 'Content-MD5': b64digest, 'Content-Type': 'application/octet-stream', } logger.trace("session.put(path=%r, data=%r, headers=%r)", path, part_stream, headers) try: response = session.put(path, data=part_stream, headers=headers) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() else: logger.trace("Part upload response: %r", response.text) response.raise_for_status() logger.debug("Uploaded part_id=%r of problem_id=%r", part_id, problem_id) return hexdigest @staticmethod @retried(_UPLOAD_REQUEST_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF) def _get_multipart_upload_status(session, problem_id): logger.debug("Checking upload status of problem_id=%r", problem_id) path = 'bqm/multipart/{problem_id}/status'.format(problem_id=problem_id) logger.trace("session.get(path=%r)", path) try: response = session.get(path) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() else: logger.trace("Upload status response: %r", response.text) response.raise_for_status() try: problem_status = response.json() problem_status['status'] problem_status['parts'] except KeyError: raise InvalidAPIResponseError("'status' and/or 'parts' missing") logger.debug("Got upload status=%r for problem_id=%r", problem_status['status'], problem_id) return problem_status @staticmethod def _failsafe_get_multipart_upload_status(session, problem_id): try: return Client._get_multipart_upload_status(session, problem_id) except Exception as e: logger.debug("Upload status check failed with %r", e) return {"status": "UNDEFINED", "parts": []} @staticmethod @retried(_UPLOAD_REQUEST_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF) def _combine_uploaded_parts(session, problem_id, checksum): logger.debug("Combining uploaded parts of problem_id=%r", problem_id) path = 'bqm/multipart/{problem_id}/combine'.format(problem_id=problem_id) body = dict(checksum=checksum) logger.trace("session.post(path=%r, json=%r)", path, body) try: response = session.post(path, json=body) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() else: logger.trace("Combine parts response: %r", response.text) response.raise_for_status() logger.debug("Issued a combine command for problem_id=%r", problem_id) @staticmethod def _uploaded_parts_from_problem_status(problem_status): uploaded_parts = {} if problem_status.get('status') == 'UPLOAD_IN_PROGRESS': for part in problem_status.get('parts', ()): part_no = part.get('part_number') checksum = part.get('checksum', '').strip('"') # fix double-quoting bug uploaded_parts[part_no] = checksum return uploaded_parts def _upload_part_worker(self, problem_id, part_no, chunk_stream, uploaded_part_checksum=None): with self.create_session() as session: part_checksum = self._upload_multipart_part( session, problem_id, part_id=part_no, part_stream=chunk_stream, uploaded_part_checksum=uploaded_part_checksum) return part_no, part_checksum def _upload_problem_worker(self, problem, problem_id=None): """Upload a problem to SAPI using multipart upload interface. Args: problem (bytes/str/file-like): Problem description. problem_id (str, optional): Problem ID under which to upload the problem. If omitted, a new problem is created. """ # in python 3.7+ we could create the session once, on thread init, # via executor initializer with self.create_session() as session: chunks = ChunkedData(problem, chunk_size=self._UPLOAD_PART_SIZE_BYTES) size = len(chunks.view) if problem_id is None: try: problem_id = self._initiate_multipart_upload(session, size) except Exception as e: errmsg = ("Multipart upload initialization failed " "with {!r}.".format(e)) logger.error(errmsg) raise ProblemUploadError(errmsg) # check problem status, so we only upload parts missing or invalid problem_status = \ self._failsafe_get_multipart_upload_status(session, problem_id) if problem_status.get('status') == 'UPLOAD_COMPLETED': logger.debug("Problem already uploaded.") return problem_id uploaded_parts = \ self._uploaded_parts_from_problem_status(problem_status) # enqueue all parts, worker skips if checksum matches parts = {} streams = collections.OrderedDict(enumerate(chunks)) for chunk_no, chunk_stream in streams.items(): part_no = chunk_no + 1 part_future = self._upload_part_executor.submit( self._upload_part_worker, problem_id, part_no, chunk_stream, uploaded_part_checksum=uploaded_parts.get(part_no)) parts[part_no] = part_future # wait for parts to upload/fail concurrent.futures.wait(parts.values()) # verify all parts uploaded without error for part_no, part_future in parts.items(): try: part_future.result() except Exception as e: errmsg = ("Multipart upload of problem_id={!r} failed for " "part_no={!r} with {!r}.".format(problem_id, part_no, e)) logger.error(errmsg) raise ProblemUploadError(errmsg) # verify all parts uploaded via status call # (check remote checksum matches the local one) final_problem_status = \ self._failsafe_get_multipart_upload_status(session, problem_id) final_uploaded_parts = \ self._uploaded_parts_from_problem_status(final_problem_status) if len(final_uploaded_parts) != len(parts): errmsg = "Multipart upload unexpectedly failed for some parts." logger.error(errmsg) logger.debug("problem_id=%r, expected_parts=%r, uploaded_parts=%r", problem_id, parts.keys(), final_uploaded_parts.keys()) raise ProblemUploadError(errmsg) for part_no, part_future in parts.items(): _, part_checksum = part_future.result() remote_checksum = final_uploaded_parts[part_no] if part_checksum != remote_checksum: errmsg = ("Checksum mismatch for part_no={!r} " "(local {!r} != remote {!r})".format( part_no, part_checksum, remote_checksum)) logger.error(errmsg) raise ProblemUploadError(errmsg) # send parts combine request combine_checksum = Client._combined_checksum(final_uploaded_parts) try: self._combine_uploaded_parts(session, problem_id, combine_checksum) except Exception as e: errmsg = ("Multipart upload of problem_id={!r} failed on parts " "combine with {!r}".format(problem_id, e)) logger.error(errmsg) raise ProblemUploadError(errmsg) return problem_id
LidarTestPlot.py
import threading import PyLidar3 import matplotlib.pyplot as plt import math import time def draw(): global is_plot while is_plot: plt.figure(1) plt.cla() plt.ylim(-9000,9000) plt.xlim(-9000,9000) plt.scatter(x,y,c='r',s=8) plt.pause(0.001) plt.close("all") is_plot = True x=[] y=[] for _ in range(360): x.append(0) y.append(0) port = input("Enter port name which lidar is connected:") #windows Obj = PyLidar3.YdLidarX4(port) #PyLidar3.your_version_of_lidar(port,chunk_size) threading.Thread(target=draw).start() if(Obj.Connect()): print(Obj.GetDeviceInfo()) gen = Obj.StartScanning() t = time.time() # start time while (time.time() - t) < 30: #scan for 30 seconds data = next(gen) for angle in range(0,360): if(data[angle]>1000): x[angle] = data[angle] * math.cos(math.radians(angle)) y[angle] = data[angle] * math.sin(math.radians(angle)) is_plot = False Obj.StopScanning() Obj.Disconnect() else: print("Error connecting to device")
device.py
from time import sleep from flask import Flask, Response, jsonify, render_template, request from actuator import Actuator from sensor import Sensor import json import pika from enum import Enum from threading import Thread import os def start_all(): if (type == DeviceType.BOTH): sensor.start() actuator.start() elif (type == DeviceType.SENSOR): sensor.start() else: actuator.start() def stop_all(): if (type == DeviceType.BOTH): sensor.stop() actuator.stop() elif (type == DeviceType.SENSOR): sensor.stop() else: actuator.stop() def get_status(): if (type == DeviceType.BOTH): return {"name": name, "description": description, "sensor": sensor.get_last_sample(), "actuator": actuator.get_status()} elif (type == DeviceType.SENSOR): return {"name": name, "description": description, "sensor": sensor.get_last_sample()} else: return {"name": name, "description": description, "actuator": actuator.get_status()} def get_status_sensor(): return {"name": name, "description": description, "sensor": sensor.get_last_sample()} def get_status_actuator(): return {"name": name, "description": description, "actuator": actuator.get_status()} def connect_to_server(name): connection = pika.BlockingConnection( pika.ConnectionParameters(host='rabbitmq')) channel = connection.channel() channel.queue_declare(queue=name, durable=True) return channel def try_to_connect_to_server(name): try: channel = connect_to_server(name) return channel except: sleep(1) channel = try_to_connect_to_server(name) return channel def send_update_to_server(): info = json.dumps(get_status()) connection = pika.BlockingConnection( pika.ConnectionParameters(host='rabbitmq')) channel = connection.channel() channel.queue_declare(queue='device_queue', durable=True) channel.basic_publish( exchange='', routing_key='device_queue', body=info, properties=pika.BasicProperties( delivery_mode=2, )) connection.close() def subscriber_thread_function(channel): channel.basic_qos(prefetch_count=1) channel.basic_consume(queue=name, on_message_callback=server_message_callback) channel.start_consuming() def server_message_callback(ch, method, properties, body): cmd = body.decode() print("Received from server: %s" % cmd) ch.basic_ack(delivery_tag=method.delivery_tag) class DeviceType(Enum): SENSOR = 1 ACTUATOR = 2 BOTH = 3 app = Flask(__name__) # D_NAME name = os.getenv("D_NAME") if name == None: raise RuntimeError("Name not set") # D_TYPE type = os.getenv("D_TYPE", DeviceType.BOTH) if (type != DeviceType.BOTH): if (type == "1"): type = DeviceType.SENSOR elif (type == "2"): type = DeviceType.ACTUATOR else: type = DeviceType.BOTH # D_DESC description = os.getenv("D_DESC", "") print("Name: " + name + ", Type: " + str(type) + ", Description: " + description) print("Connecting to queue...") channel = try_to_connect_to_server(name) print("Connected to queue") # start thread that listens to commands from the server subscriber_thread = Thread(target=subscriber_thread_function, args=(channel,)) subscriber_thread.start() sensor = Sensor() actuator = Actuator() start_all() send_update_to_server() @app.route("/") def hello_world(): return "<p>Hello, World!</p>" @app.route("/start", methods=['POST']) def start(): print("Starting component(s) of device " + name) start_all() send_update_to_server() return jsonify(get_status()) @app.route("/stop", methods=['POST']) def stop(): print("Stopping component(s) of device " + name) stop_all() send_update_to_server() return jsonify(get_status()) @app.route("/start_sensor", methods=['POST']) def start_sensor(): print("Starting sensor of device" + name) sensor.start() send_update_to_server() return jsonify(get_status()) @app.route("/stop_sensor", methods=['POST']) def stop_sensor(): print("Stopping sensor of device " + name) sensor.stop() send_update_to_server() return jsonify(get_status()) @app.route("/start_actuator", methods=['POST']) def start_actuator(): print("Starting actuator of device " + name) actuator.start() send_update_to_server() return jsonify(get_status()) @app.route("/stop_actuator", methods=['POST']) def stop_actuator(): print("Stopping actuator of device " + name) actuator.stop() send_update_to_server() return jsonify(get_status()) @app.route("/status") def status(): return jsonify(get_status()) @app.route("/status_view") def status_view(): if (sensor.active == True): sensor_status = "active" else: sensor_status = "inactive" if (actuator.active == True): actuator_status = "active" else: actuator_status = "inactive" sample = sensor.get_last_sample() if (type == DeviceType.BOTH): return render_template("status.html", name=name, description=description, has_sensor=True, sensor_name=sensor.name, sensor_status=sensor_status, sensor_timestamp=sample.get("timestamp"), sensor_metric=sensor.metric.title(), sensor_value=sample.get(sensor.metric), sensor_unit=sample.get("unit"), has_actuator=True, actuator_name=actuator.name, actuator_status=actuator_status, actuator_state=actuator.state, actuator_unit=actuator.unit ) elif (type == DeviceType.SENSOR): return render_template("status.html", name=name, description=description, has_sensor=True, sensor_name=sensor.name, sensor_status=sensor_status, sensor_timestamp=sample.get("timestamp"), sensor_metric=sensor.metric.title(), sensor_value=sample.get(sensor.metric), sensor_unit=sample.get("unit"), has_actuator=False ) else: return render_template("status.html", name=name, description=description, has_sensor=False, has_actuator=True, actuator_name=actuator.name, actuator_status=actuator_status, actuator_state=actuator.state, actuator_unit=actuator.unit ) @app.route("/status_sensor") def status_sensor(): return jsonify(get_status_sensor()) @app.route("/status_actuator") def status_actuator(): return jsonify(get_status_actuator()) @app.route("/set_actuator", methods=['POST']) def set_actuator(): state = request.form["state"] if (actuator.active == False): return Response(json.dumps({"error": "actuator not active"}), status=400, mimetype='application/json') if (actuator.set_state(state) == False): return Response(json.dumps({"error": "invalid state parameter"}), status=400, mimetype='application/json') send_update_to_server() return jsonify(get_status_actuator()) if __name__ == "__main__": app.run(host="0.0.0.0")
pipeclient.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Automate Audacity via mod-script-pipe. Pipe Client may be used as a command-line script to send commands to Audacity via the mod-script-pipe interface, or loaded as a module. Requires Python 2.7 or later. Python 3 strongly recommended. ====================== Command Line Interface ====================== usage: pipeclient.py [-h] [-t] [-s ] [-d] Arguments --------- -h,--help: optional show short help and exit -t, --timeout: float, optional timeout for reply in seconds (default: 10) -s, --show-time: bool, optional show command execution time (default: True) -d, --docs: optional show this documentation and exit Example ------- $ python3 pipeclient.py -t 20 -s False Launches command line interface with 20 second time-out for returned message, and don't show the execution time. When prompted, enter the command to send (not quoted), or 'Q' to quit. $ Enter command or 'Q' to quit: GetInfo: Type=Tracks Format=LISP ============ Module Usage ============ Note that on a critical error (such as broken pipe), the module just exits. If a more graceful shutdown is required, replace the sys.exit()'s with exceptions. Example ------- # Import the module: >>> import pipeclient # Create a client instance: >>> client = pipeclient.PipeClient() # Send a command: >>> client.write("Command", timer=True) # Read the last reply: >>> print(client.read()) See Also -------- PipeClient.write : Write a command to _write_pipe. PipeClient.read : Read Audacity's reply from pipe. Copyright Steve Daulton 2018 Released under terms of the GNU General Public License version 2: <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html /> """ import os import sys import threading import time import errno import argparse if sys.version_info[0] < 3 and sys.version_info[1] < 7: sys.exit('PipeClient Error: Python 2.7 or later required') # Platform specific constants if sys.platform == 'win32': WRITE_NAME = '\\\\.\\pipe\\ToSrvPipe' READ_NAME = '\\\\.\\pipe\\FromSrvPipe' EOL = '\r\n\0' else: # Linux or Mac PIPE_BASE = '/tmp/audacity_script_pipe.' WRITE_NAME = PIPE_BASE + 'to.' + str(os.getuid()) READ_NAME = PIPE_BASE + 'from.' + str(os.getuid()) EOL = '\n' class PipeClient(): """Write / read client access to Audacity via named pipes. Normally there should be just one instance of this class. If more instances are created, they all share the same state. __init__ calls _write_thread_start() and _read_thread_start() on first instantiation. Parameters ---------- None Attributes ---------- reader_pipe_broken : event object Set if pipe reader fails. Audacity may have crashed reply_ready : event object flag cleared when command sent and set when response received timer : bool When true, time the command execution (default False) reply : string message received when Audacity completes the command See Also -------- write : Write a command to _write_pipe. read : Read Audacity's reply from pipe. """ reader_pipe_broken = threading.Event() reply_ready = threading.Event() _shared_state = {} def __new__(cls, *p, **k): self = object.__new__(cls, *p, **k) self.__dict__ = cls._shared_state return self def __init__(self): self.timer = False self._start_time = 0 self._write_pipe = None self.reply = '' if not self._write_pipe: self._write_thread_start() self._read_thread_start() def _write_thread_start(self): """Start _write_pipe thread""" # Pipe is opened in a new thread so that we don't # freeze if Audacity is not running. write_thread = threading.Thread(target=self._write_pipe_open) write_thread.daemon = True write_thread.start() # Allow a little time for connection to be made. time.sleep(0.1) if not self._write_pipe: sys.exit('PipeClientError: Write pipe cannot be opened.') def _write_pipe_open(self): """Open _write_pipe.""" self._write_pipe = open(WRITE_NAME, 'w') def _read_thread_start(self): """Start read_pipe thread.""" read_thread = threading.Thread(target=self._reader) read_thread.daemon = True read_thread.start() def write(self, command, timer=False): """Write a command to _write_pipe. Parameters ---------- command : string The command to send to Audacity timer : bool, optional If true, time the execution of the command Example ------- write("GetInfo: Type=Labels", timer=True): """ self.timer = timer print(('Sending command:', command)) self._write_pipe.write(command + EOL) # Check that read pipe is alive if PipeClient.reader_pipe_broken.isSet(): sys.exit('PipeClient: Read-pipe error.') try: self._write_pipe.flush() if self.timer: self._start_time = time.time() self.reply = '' PipeClient.reply_ready.clear() except IOError as err: if err.errno == errno.EPIPE: sys.exit('PipeClient: Write-pipe error.') else: raise def _reader(self): """Read FIFO in worker thread.""" # Thread will wait at this read until it connects. # Connection should occur as soon as _write_pipe has connected. read_pipe = open(READ_NAME, 'r') message = '' pipe_ok = True while pipe_ok: line = read_pipe.readline() # Stop timer as soon as we get first line of response. stop_time = time.time() while pipe_ok and line != '\n': message += line line = read_pipe.readline() if line == '': # No data in read_pipe indicates that the pipe is broken # (Audacity may have crashed). PipeClient.reader_pipe_broken.set() pipe_ok = False if self.timer: xtime = (stop_time - self._start_time) * 1000 message += 'Execution time: {0:.2f}ms'.format(xtime) self.reply = message PipeClient.reply_ready.set() message = '' read_pipe.close() def read(self): """Read Audacity's reply from pipe. Returns ------- string The reply from the last command sent to Audacity, or null string if reply not received. Null string usually indicates that Audacity is still processing the last command. """ if not PipeClient.reply_ready.isSet(): return '' return self.reply def bool_from_string(strval): """Return boolean value from string""" if strval.lower() in ('true', 't', '1', 'yes', 'y'): return True if strval.lower() in ('false', 'f', '0', 'no', 'n'): return False raise argparse.ArgumentTypeError('Boolean value expected.') def main(): """Interactive command-line for PipeClient""" parser = argparse.ArgumentParser() parser.add_argument('-t', '--timeout', type=float, metavar='', default=10, help="timeout for reply in seconds (default: 10") parser.add_argument('-s', '--show-time', metavar='True/False', nargs='?', type=bool_from_string, const='t', default='t', dest='show', help='show command execution time (default: True)') parser.add_argument('-d', '--docs', action='store_true', help='show documentation and exit') args = parser.parse_args() if args.docs: print(__doc__) sys.exit(0) client = PipeClient() while True: reply = '' if sys.version_info[0] < 3: message = eval(input("\nEnter command or 'Q' to quit: ")) else: message = eval(input( "\nEnter command or 'Q' to quit: ")) start = time.time() if message.upper() == 'Q': sys.exit(0) elif message == '': pass else: client.write(message, timer=args.show) while reply == '': time.sleep(0.1) # allow time for reply if time.time() - start > args.timeout: reply = 'PipeClient: Reply timed-out.' else: reply = client.read() print(reply) if __name__ == '__main__': main()
build.py
import itertools import multiprocessing import os import re import shutil import sys import time from glob import glob from multiprocessing import Pool from subprocess import PIPE, Popen, check_call from pp.components import component_factory from pp.config import CONFIG, logging from pp.doe import load_does def run_python(filename): """ Run a python script and keep track of some context """ logging.debug("Running `{}`.".format(filename)) command = ["python", filename] # Run the process t = time.time() process = Popen(command, stdout=PIPE, stderr=PIPE) stdout, _ = process.communicate() total_time = time.time() - t if process.returncode == 0: logging.info("v {} ({:.1f}s)".format(os.path.relpath(filename), total_time)) else: logging.info( "! Error in {} {:.1f}s)".format(os.path.relpath(filename), total_time) ) # message = "! Error in `{}`".format(basename(filename)) # logging.error(message, exc_info=(Exception, stderr.strip(), None)) if len(stdout.decode().strip()) > 0: logging.debug("Output of python {}:\n{}".format(filename, stdout.strip())) return filename, process.returncode def build_devices(regex=".*", overwrite=True): """ Builds all the python files in devices/ """ # Avoid accidentally rebuilding devices if ( os.path.isdir(CONFIG["gds_directory"]) and os.listdir(CONFIG["gds_directory"]) and not overwrite ): print("Run `make clean` to remove already built devices.") sys.exit(0) # Collect all the files to run. all_files = [ os.path.join(dp, f) for dp, dn, filenames in os.walk(CONFIG["devices_directory"]) for f in filenames if os.path.splitext(f)[1] == ".py" ] all_files = sorted(all_files) all_files = [f for f in all_files if re.search(regex, f)] # Notify user logging.info( "Building splits on {} threads. {} files to run.".format( multiprocessing.cpu_count(), len(all_files) ) ) logging.info( "Debug information at {}".format( os.path.relpath(os.path.join(CONFIG["log_directory"], "debug.log")) ) ) # Now run all the files in batches of $CPU_SIZE. with Pool(processes=multiprocessing.cpu_count()) as pool: for filename, rc in pool.imap_unordered(run_python, all_files): logging.debug("Finished {} {}".format(filename, rc)) # Report on what we did. devices = glob(os.path.join(CONFIG["gds_directory"], "*.gds")) countmsg = "There are now {} GDS files in {}.".format( len(devices), os.path.relpath(CONFIG["gds_directory"]) ) logging.info("Finished building devices. {}".format(countmsg)) def build_clean(): """ Cleans generated files such as build/. """ target = CONFIG["build_directory"] if os.path.exists(target): shutil.rmtree(target) print(("Deleted {}".format(os.path.abspath(target)))) def build_cache_pull(): """ Pull devices from the cache """ if CONFIG.get("cache_url"): logging.info("Loading devices from cache...") check_call( [ "rsync", "-rv", "--delete", CONFIG["cache_url"], CONFIG["build_directory"] + "/", ] ) def build_cache_push(): """ Push devices to the cache """ if not os.listdir(CONFIG["build_directory"]): logging.info("Nothing to push") return if CONFIG.get("cache_url"): logging.info("Uploading devices to cache...") check_call( [ "rsync", "-rv", CONFIG["build_directory"] + "/", CONFIG["cache_url"], "--delete", ] ) def _build_doe(doe_name, config, component_factory=component_factory): from pp.write_doe import write_doe doe = config["does"][doe_name] component_type = doe.get("component") component_function = component_factory[component_type] write_doe( component_type=component_function, doe_name=doe_name, do_permutations=doe.get("do_permutations", True), list_settings=doe.get("settings"), description=doe.get("description"), analysis=doe.get("analysis"), test=doe.get("test"), functions=doe.get("functions"), ) def build_does(filepath, component_factory=component_factory): """ this function is depreacted Writes DOE settings from config.yml file and writes GDS into build_directory If you want to use cache use pp.generate_does instead Write For each DOE: - GDS - json metadata - ports CSV - markdown report, with DOE settings """ does = load_does(filepath) doe_names = does.keys() doe_params = zip( doe_names, itertools.repeat(filepath), itertools.repeat(component_factory) ) p = multiprocessing.Pool(multiprocessing.cpu_count()) p.starmap(_build_doe, doe_params) # for doe_name in doe_names: # p = multiprocessing.Process(target=_build_doe, args=(doe_name, config, component_factory=component_factory)) # p.start() if __name__ == "__main__": does_path = CONFIG["samples_path"] / "mask" / "does.yml" build_does(does_path) # run_python("name.py")
test.py
import argparse import json import os from pathlib import Path from threading import Thread import numpy as np import torch import yaml from tqdm import tqdm from models.experimental import attempt_load from utils.datasets import create_dataloader from utils.general import ( coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr, ) from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_synchronized def test( data, weights=None, batch_size=32, imgsz=1024, conf_thres=0.007, iou_thres=0.4, # for NMS save_json=False, single_cls=False, augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(""), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, wandb_logger=None, compute_loss=None, is_coco=False, ): # Initialize/load model and set device training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) # Directories save_dir = Path( increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) ) # increment run (save_dir / "labels" if save_txt else save_dir).mkdir( parents=True, exist_ok=True ) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check img_size # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 # if device.type != 'cpu' and torch.cuda.device_count() > 1: # model = nn.DataParallel(model) # Half half = device.type != "cpu" # half precision only supported on CUDA if half: model.half() # Configure model.eval() if isinstance(data, str): is_coco = data.endswith("coco.yaml") with open(data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) check_dataset(data) # check nc = 1 if single_cls else int(data["nc"]) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Logging log_imgs = 0 if wandb_logger and wandb_logger.wandb: log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != "cpu": model( torch.zeros(1, 3, imgsz, imgsz) .to(device) .type_as(next(model.parameters())) ) # run once task = ( opt.task if opt.task in ("train", "val", "test") else "val" ) # path to train/val/test images dataloader = create_dataloader( data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, prefix=colorstr(f"{task}: "), )[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = { k: v for k, v in enumerate( model.names if hasattr(model, "names") else model.module.names ) } coco91class = coco80_to_coco91_class() s = ("%20s" + "%12s" * 6) % ( "Class", "Images", "Labels", "P", "R", "mAP@.5", "mAP@.5:.95", ) p, r, f1, mp, mr, map50, map, t0, t1 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model( img, augment=augment ) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][ :3 ] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to( device ) # to pixels lb = ( [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] ) # for autolabelling t = time_synchronized() out = non_max_suppression( out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True, # merge=opt.merge, ) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append( ( torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls, ) ) continue # Predictions predn = pred.clone() scale_coords( img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1] ) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[ [1, 0, 1, 0] ] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = ( (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn) .view(-1) .tolist() ) # normalized xywh line = ( (cls, *xywh, conf) if save_conf else (cls, *xywh) ) # label format with open(save_dir / "labels" / (path.stem + ".txt"), "a") as f: f.write(("%g " * len(line)).rstrip() % line + "\n") # W&B logging - Media Panel Plots if ( len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0 ): # Check for test operation if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: box_data = [ { "position": { "minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3], }, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel", } for *xyxy, conf, cls in pred.tolist() ] boxes = { "predictions": {"box_data": box_data, "class_labels": names} } # inference-space wandb_images.append( wandb_logger.wandb.Image( img[si], boxes=boxes, caption=path.name ) ) wandb_logger.log_training_progress( predn, path, names ) # logs dsviz tables # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append( { "image_id": image_id, "category_id": coco91class[int(p[5])] if is_coco else int(p[5]), "bbox": [round(x, 3) for x in b], "score": round(p[4], 5), } ) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords( img[si].shape[1:], tbox, shapes[si][0], shapes[si][1] ) # native-space labels if plots: confusion_matrix.process_batch( predn, torch.cat((labels[:, 0:1], tbox), 1) ) # Per target class for cls in torch.unique(tcls_tensor): ti = ( (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) ) # prediction indices pi = ( (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) ) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(predn[pi, :4], tbox[ti]).max( 1 ) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if ( len(detected) == nl ): # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f"test_batch{batch_i}_labels.jpg" # labels Thread( target=plot_images, args=(img, targets, paths, f, names), daemon=True ).start() f = save_dir / f"test_batch{batch_i}_pred.jpg" # predictions Thread( target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True, ).start() # Compute statistics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): p, r, ap, f1, ap_class = ap_per_class( *stats, plot=plots, save_dir=save_dir, names=names ) ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() nt = np.bincount( stats[3].astype(np.int64), minlength=nc ) # number of targets per class else: nt = torch.zeros(1) # Print results pf = "%20s" + "%12i" * 2 + "%12.3g" * 4 # print format print(pf % ("all", seen, nt.sum(), mp, mr, map50, map)) # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(ap_class): print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds t = tuple(x / seen * 1e3 for x in (t0, t1, t0 + t1)) + ( imgsz, imgsz, batch_size, ) # tuple if not training: print( "Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g" % t ) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) if wandb_logger and wandb_logger.wandb: val_batches = [ wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob("test*.jpg")) ] wandb_logger.log({"Validation": val_batches}) if wandb_images: wandb_logger.log({"Bounding Box Debugger/Images": wandb_images}) # Save JSON if save_json and len(jdict): w = ( Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else "" ) # weights anno_json = "../coco/annotations/instances_val2017.json" # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json print("\nEvaluating pycocotools mAP... saving %s..." % pred_json) with open(pred_json, "w") as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api eval = COCOeval(anno, pred, "bbox") if is_coco: eval.params.imgIds = [ int(Path(x).stem) for x in dataloader.dataset.img_files ] # image IDs to evaluate eval.evaluate() eval.accumulate() eval.summarize() map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) except Exception as e: print(f"pycocotools unable to run: {e}") # Return results model.float() # for training if not training: s = ( f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else "" ) print(f"Results saved to {save_dir}{s}") maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t if __name__ == "__main__": parser = argparse.ArgumentParser(prog="test.py") parser.add_argument( "--weights", nargs="+", type=str, default="yolov5s.pt", help="model.pt path(s)" ) parser.add_argument( "--data", type=str, default="/data/minki/kaggle/vinbigdata-cxr/yolov5", help="*.data path", ) # 'data/coco128.yaml' # FIXME: parser.add_argument( "--fold", type=str, required=True, help="0..6 data folds", ) # 'data/coco128.yaml' parser.add_argument( "--batch-size", type=int, default=32, help="size of each image batch" ) parser.add_argument( "--img-size", type=int, default=1024, help="inference size (pixels)" ) parser.add_argument( "--conf-thres", type=float, default=0.007, help="object confidence threshold" ) parser.add_argument( "--iou-thres", type=float, default=0.4, help="IOU threshold for NMS" ) parser.add_argument( "--task", default="val", help="train, val, test, speed or study" ) parser.add_argument( "--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu" ) parser.add_argument( "--single-cls", action="store_true", help="treat as single-class dataset" ) parser.add_argument("--augment", action="store_true", help="augmented inference") parser.add_argument("--verbose", action="store_true", help="report mAP by class") parser.add_argument("--save-txt", action="store_true", help="save results to *.txt") parser.add_argument( "--save-hybrid", action="store_true", help="save label+prediction hybrid results to *.txt", ) parser.add_argument( "--save-conf", action="store_true", help="save confidences in --save-txt labels" ) parser.add_argument( "--save-json", action="store_true", help="save a cocoapi-compatible JSON results file", ) parser.add_argument( "--project", default="/data/minki/kaggle/vinbigdata-cxr/yolov5/runs/test", help="save to project/name", ) parser.add_argument("--name", default="exp", help="save to project/name") parser.add_argument( "--exist-ok", action="store_true", help="existing project/name ok, do not increment", ) # parser.add_argument( # "--merge", # action="store_true", # help="nms + wbf", # ) opt = parser.parse_args() opt.save_json |= opt.data.endswith("coco.yaml") # FIXME: opt.data = f"{opt.data}/config{opt.fold}.yaml" opt.data = check_file(opt.data) # check file print(opt) check_requirements() if opt.task in ("train", "val", "test"): # run normally test( opt.data, opt.weights, opt.batch_size, opt.img_size, opt.conf_thres, opt.iou_thres, opt.save_json, opt.single_cls, opt.augment, opt.verbose, save_txt=opt.save_txt | opt.save_hybrid, save_hybrid=opt.save_hybrid, save_conf=opt.save_conf, ) elif opt.task == "speed": # speed benchmarks for w in opt.weights: test( opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, ) elif opt.task == "study": # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) for w in opt.weights: f = f"study_{Path(opt.data).stem}_{Path(w).stem}.txt" # filename to save to y = [] # y axis for i in x: # img-size print(f"\nRunning {f} point {i}...") r, _, t = test( opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, plots=False, ) y.append(r + t) # results and times np.savetxt(f, y, fmt="%10.4g") # save os.system("zip -r study.zip study_*.txt") plot_study_txt(x=x) # plot
data_util.py
''' this file is modified from keras implemention of data process multi-threading, see https://github.com/fchollet/keras/blob/master/keras/utils/data_utils.py ''' import time import numpy as np import threading import multiprocessing try: import queue except ImportError: import Queue as queue class GeneratorEnqueuer(): """Builds a queue out of a data generator. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. # Arguments generator: a generator function which endlessly yields data use_multiprocessing: use multiprocessing if True, otherwise threading wait_time: time to sleep in-between calls to `put()` random_seed: Initial seed for workers, will be incremented by one for each workers. """ def __init__(self, generator, use_multiprocessing=False, wait_time=0.05, random_seed=None): self.wait_time = wait_time self._generator = generator self._use_multiprocessing = use_multiprocessing self._threads = [] self._stop_event = None self.queue = None self.random_seed = random_seed def start(self, workers=1, max_queue_size=10): """Kicks off threads which add data from the generator into the queue. # Arguments workers: number of worker threads max_queue_size: queue size (when full, threads could block on `put()`) """ def data_generator_task(): while not self._stop_event.is_set(): try: if self._use_multiprocessing or self.queue.qsize() < max_queue_size: generator_output = next(self._generator) self.queue.put(generator_output) else: time.sleep(self.wait_time) except Exception: self._stop_event.set() raise try: if self._use_multiprocessing: self.queue = multiprocessing.Queue(maxsize=max_queue_size) self._stop_event = multiprocessing.Event() else: self.queue = queue.Queue() self._stop_event = threading.Event() for _ in range(workers): if self._use_multiprocessing: # Reset random seed else all children processes # share the same seed np.random.seed(self.random_seed) thread = multiprocessing.Process(target=data_generator_task) thread.daemon = True if self.random_seed is not None: self.random_seed += 1 else: thread = threading.Thread(target=data_generator_task) self._threads.append(thread) thread.start() except: self.stop() raise def is_running(self): return self._stop_event is not None and not self._stop_event.is_set() def stop(self, timeout=None): """Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called `start()`. # Arguments timeout: maximum time to wait on `thread.join()`. """ if self.is_running(): self._stop_event.set() for thread in self._threads: if thread.is_alive(): if self._use_multiprocessing: thread.terminate() else: thread.join(timeout) if self._use_multiprocessing: if self.queue is not None: self.queue.close() self._threads = [] self._stop_event = None self.queue = None def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. # Returns A generator """ while self.is_running(): if not self.queue.empty(): inputs = self.queue.get() if inputs is not None: yield inputs else: time.sleep(self.wait_time)
socket_client.py
### RPGOnline ### A Synergy Studios Project import socket from packet import Packet from threading import Thread from datetime import datetime class SocketClient: """The socket client which provides a base for sending and loading data.""" def __init__(self, host, port): self.SERVER_HOST = host self.SERVER_PORT = port self.MY_IP = socket.gethostbyname(socket.gethostname()) self.st = '%' self.format = 'UTF-8' self.s = socket.socket() self.log = {} self.state = '~init' def add_log(self, l): """Adds a specific value to the log.""" time = datetime.now() self.log[f'{time.hour}:{time.minute}:{time.second}'] = l print(str(list(self.log.keys())[-1]) + ': ' + str(list(self.log.values())[-1])) def change_state(self, sta): """Changes the state of the server to the selected state, and adds a log.""" self.state = sta self.add_log(f'[/] <Client> state changed to {sta}') def start_client(self): """Starts the connection to the host.""" self.add_log(f'[*] <Client> Obj @ {self} initialised succesfully') self.change_state('~setup') try: self.add_log(f'[*] <Client> attempting connection to {self.SERVER_HOST}{self.SERVER_PORT}') self.s.connect((self.SERVER_HOST, self.SERVER_PORT)) except BaseException as e: self.exit_client(e) self.add_log(f'[+] Successfully connected to {self.SERVER_HOST}{self.SERVER_PORT}!') self.loop() def exit_client(self, error = None): """Exits the connection to the server, with an optional error.""" self.add_log(f'[!] <Client> Socket shutdown, opt error: {error}') s.close() def send(self, packet): """Sends a packet of data to the server, as encoded in the specified format.""" self.s.send(packet.unwrap(self.st).encode(self.format)) def listen_for_messages(self): """Listens for messages from the host/server.""" while True: try: msg = self.s.recv(1024).decode(self.format) self.add_log(f'[>] Received message {msg} from the server.') except BaseException as e: self.exit_client(e) break def loop(self): """The main loop for the client.""" self.change_state('~running') t = Thread(target = self.listen_for_messages) t.daemon = True t.start() while True: if self.state == '~running': pass test_userid = 'Toblobs#1234' test_sessionid = 'ewhfe_jefefe!' s = SocketClient('127.0.0.1', 4545) s.start_client()
test_profile.py
import pytest import sys import time from tlz import first import threading from distributed.compatibility import WINDOWS from distributed import metrics from distributed.profile import ( process, merge, create, call_stack, identifier, watch, llprocess, ll_get_stack, plot_data, ) def test_basic(): def test_g(): time.sleep(0.01) def test_h(): time.sleep(0.02) def test_f(): for i in range(100): test_g() test_h() thread = threading.Thread(target=test_f) thread.daemon = True thread.start() state = create() for i in range(100): time.sleep(0.02) frame = sys._current_frames()[thread.ident] process(frame, None, state) assert state["count"] == 100 d = state while len(d["children"]) == 1: d = first(d["children"].values()) assert d["count"] == 100 assert "test_f" in str(d["description"]) g = [c for c in d["children"].values() if "test_g" in str(c["description"])][0] h = [c for c in d["children"].values() if "test_h" in str(c["description"])][0] assert g["count"] < h["count"] assert 95 < g["count"] + h["count"] <= 100 pd = plot_data(state) assert len(set(map(len, pd.values()))) == 1 # all same length assert len(set(pd["color"])) > 1 # different colors @pytest.mark.skipif( WINDOWS, reason="no low-level profiler support for Windows available" ) def test_basic_low_level(): pytest.importorskip("stacktrace") state = create() for i in range(100): time.sleep(0.02) frame = sys._current_frames()[threading.get_ident()] llframes = {threading.get_ident(): ll_get_stack(threading.get_ident())} for f in llframes.values(): if f is not None: llprocess(f, None, state) assert state["count"] == 100 children = state.get("children") assert children expected = "<low-level>" for k, v in zip(children.keys(), children.values()): desc = v.get("description") assert desc filename = desc.get("filename") assert expected in k and filename == expected def test_merge(): a1 = { "count": 5, "identifier": "root", "description": "a", "children": { "b": { "count": 3, "description": "b-func", "identifier": "b", "children": {}, }, "c": { "count": 2, "description": "c-func", "identifier": "c", "children": {}, }, }, } a2 = { "count": 4, "description": "a", "identifier": "root", "children": { "d": { "count": 2, "description": "d-func", "children": {}, "identifier": "d", }, "c": { "count": 2, "description": "c-func", "children": {}, "identifier": "c", }, }, } expected = { "count": 9, "identifier": "root", "description": "a", "children": { "b": { "count": 3, "description": "b-func", "identifier": "b", "children": {}, }, "d": { "count": 2, "description": "d-func", "identifier": "d", "children": {}, }, "c": { "count": 4, "description": "c-func", "identifier": "c", "children": {}, }, }, } assert merge(a1, a2) == expected def test_merge_empty(): assert merge() == create() assert merge(create()) == create() assert merge(create(), create()) == create() def test_call_stack(): frame = sys._current_frames()[threading.get_ident()] L = call_stack(frame) assert isinstance(L, list) assert all(isinstance(s, str) for s in L) assert "test_call_stack" in str(L[-1]) def test_identifier(): frame = sys._current_frames()[threading.get_ident()] assert identifier(frame) == identifier(frame) assert identifier(None) == identifier(None) def test_watch(): start = metrics.time() def stop(): return metrics.time() > start + 0.500 start_threads = threading.active_count() log = watch(interval="10ms", cycle="50ms", stop=stop) start = metrics.time() # wait until thread starts up while threading.active_count() <= start_threads: assert metrics.time() < start + 2 time.sleep(0.01) time.sleep(0.5) assert 1 < len(log) < 10 start = metrics.time() while threading.active_count() > start_threads: assert metrics.time() < start + 2 time.sleep(0.01)
convert_tfrecords.py
# Copyright 2018 Changan Wang # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime import os import random import sys import threading import xml.etree.ElementTree as xml_tree import numpy as np import six import tensorflow as tf import dataset_common '''How to organize your dataset folder: VOCROOT/ |->VOC2007/ | |->Annotations/ | |->ImageSets/ | |->... |->VOC2012/ | |->Annotations/ | |->ImageSets/ | |->... |->VOC2007TEST/ | |->Annotations/ | |->... ''' tf.app.flags.DEFINE_string('dataset_directory', '/media/rs/7A0EE8880EE83EAF/Detections/PASCAL/VOC', 'All datas directory') tf.app.flags.DEFINE_string('train_splits', 'VOC2007, VOC2012', 'Comma-separated list of the training data sub-directory') tf.app.flags.DEFINE_string('validation_splits', 'VOC2007TEST', 'Comma-separated list of the validation data sub-directory') tf.app.flags.DEFINE_string('output_directory', '/media/rs/7A0EE8880EE83EAF/Detections/SSD/dataset/tfrecords', 'Output data directory') tf.app.flags.DEFINE_integer('train_shards', 16, 'Number of shards in training TFRecord files.') tf.app.flags.DEFINE_integer('validation_shards', 16, 'Number of shards in validation TFRecord files.') tf.app.flags.DEFINE_integer('num_threads', 8, 'Number of threads to preprocess the images.') RANDOM_SEED = 180428 FLAGS = tf.app.flags.FLAGS def _int64_feature(value): """Wrapper for inserting int64 features into Example proto.""" if not isinstance(value, list): value = [value] return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def _float_feature(value): """Wrapper for inserting float features into Example proto.""" if not isinstance(value, list): value = [value] return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def _bytes_list_feature(value): """Wrapper for inserting a list of bytes features into Example proto. """ if not isinstance(value, list): value = [value] return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) def _bytes_feature(value): """Wrapper for inserting bytes features into Example proto.""" if isinstance(value, six.string_types): value = six.binary_type(value, encoding='utf-8') return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _convert_to_example(filename, image_name, image_buffer, bboxes, labels, labels_text, difficult, truncated, height, width): """Build an Example proto for an example. Args: filename: string, path to an image file, e.g., '/path/to/example.JPG' image_buffer: string, JPEG encoding of RGB image bboxes: List of bounding boxes for each image labels: List of labels for bounding box labels_text: List of labels' name for bounding box difficult: List of ints indicate the difficulty of that bounding box truncated: List of ints indicate the truncation of that bounding box height: integer, image height in pixels width: integer, image width in pixels Returns: Example proto """ ymin = [] xmin = [] ymax = [] xmax = [] for b in bboxes: assert len(b) == 4 # pylint: disable=expression-not-assigned [l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)] # pylint: enable=expression-not-assigned channels = 3 image_format = 'JPEG' example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': _int64_feature(height), 'image/width': _int64_feature(width), 'image/channels': _int64_feature(channels), 'image/shape': _int64_feature([height, width, channels]), 'image/object/bbox/xmin': _float_feature(xmin), 'image/object/bbox/xmax': _float_feature(xmax), 'image/object/bbox/ymin': _float_feature(ymin), 'image/object/bbox/ymax': _float_feature(ymax), 'image/object/bbox/label': _int64_feature(labels), 'image/object/bbox/label_text': _bytes_list_feature(labels_text), 'image/object/bbox/difficult': _int64_feature(difficult), 'image/object/bbox/truncated': _int64_feature(truncated), 'image/format': _bytes_feature(image_format), 'image/filename': _bytes_feature(image_name.encode('utf8')), 'image/encoded': _bytes_feature(image_buffer)})) return example class ImageCoder(object): """Helper class that provides TensorFlow image coding utilities.""" def __init__(self): # Create a single Session to run all image coding calls. self._sess = tf.Session() # Initializes function that converts PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that converts CMYK JPEG data to RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3 assert image.shape[2] == 3 return image def _process_image(filename, coder): """Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide TensorFlow image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels. """ # Read the image file. with tf.gfile.FastGFile(filename, 'rb') as f: image_data = f.read() # Decode the RGB JPEG. image = coder.decode_jpeg(image_data) # Check that image converted to RGB assert len(image.shape) == 3 height = image.shape[0] width = image.shape[1] assert image.shape[2] == 3 return image_data, height, width def _find_image_bounding_boxes(directory, cur_record): """Find the bounding boxes for a given image file. Args: directory: string; the path of all datas. cur_record: list of strings; the first of which is the sub-directory of cur_record, the second is the image filename. Returns: bboxes: List of bounding boxes for each image. labels: List of labels for bounding box. labels_text: List of labels' name for bounding box. difficult: List of ints indicate the difficulty of that bounding box. truncated: List of ints indicate the truncation of that bounding box. """ anna_file = os.path.join(directory, cur_record[0], 'Annotations', cur_record[1].replace('jpg', 'xml')) tree = xml_tree.parse(anna_file) root = tree.getroot() # Image shape. size = root.find('size') shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)] # Find annotations. bboxes = [] labels = [] labels_text = [] difficult = [] truncated = [] for obj in root.findall('object'): label = obj.find('name').text labels.append(int(dataset_common.VOC_LABELS[label][0])) labels_text.append(label.encode('ascii')) isdifficult = obj.find('difficult') if isdifficult is not None: difficult.append(int(isdifficult.text)) else: difficult.append(0) istruncated = obj.find('truncated') if istruncated is not None: truncated.append(int(istruncated.text)) else: truncated.append(0) bbox = obj.find('bndbox') bboxes.append((float(bbox.find('ymin').text) / shape[0], float(bbox.find('xmin').text) / shape[1], float(bbox.find('ymax').text) / shape[0], float(bbox.find('xmax').text) / shape[1] )) return bboxes, labels, labels_text, difficult, truncated def _process_image_files_batch(coder, thread_index, ranges, name, directory, all_records, num_shards): """Processes and saves list of images as TFRecord in 1 thread. Args: coder: instance of ImageCoder to provide TensorFlow image coding utils. thread_index: integer, unique batch to run index is within [0, len(ranges)). ranges: list of pairs of integers specifying ranges of each batches to analyze in parallel. name: string, unique identifier specifying the data set directory: string; the path of all datas all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename. num_shards: integer number of shards for this data set. """ # Each thread produces N shards where N = int(num_shards / num_threads). # For instance, if num_shards = 128, and the num_threads = 2, then the first # thread would produce shards [0, 64). num_threads = len(ranges) assert not num_shards % num_threads num_shards_per_batch = int(num_shards / num_threads) shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1], num_shards_per_batch + 1).astype(int) num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] counter = 0 for s in range(num_shards_per_batch): # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' shard = thread_index * num_shards_per_batch + s output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) output_file = os.path.join(FLAGS.output_directory, output_filename) writer = tf.python_io.TFRecordWriter(output_file) shard_counter = 0 files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) for i in files_in_shard: cur_record = all_records[i] filename = os.path.join(directory, cur_record[0], 'JPEGImages', cur_record[1]) bboxes, labels, labels_text, difficult, truncated = _find_image_bounding_boxes(directory, cur_record) image_buffer, height, width = _process_image(filename, coder) example = _convert_to_example(filename, cur_record[1], image_buffer, bboxes, labels, labels_text, difficult, truncated, height, width) writer.write(example.SerializeToString()) shard_counter += 1 counter += 1 if not counter % 1000: print('%s [thread %d]: Processed %d of %d images in thread batch.' % (datetime.now(), thread_index, counter, num_files_in_thread)) sys.stdout.flush() writer.close() print('%s [thread %d]: Wrote %d images to %s' % (datetime.now(), thread_index, shard_counter, output_file)) sys.stdout.flush() shard_counter = 0 print('%s [thread %d]: Wrote %d images to %d shards.' % (datetime.now(), thread_index, counter, num_files_in_thread)) sys.stdout.flush() def _process_image_files(name, directory, all_records, num_shards): """Process and save list of images as TFRecord of Example protos. Args: name: string, unique identifier specifying the data set directory: string; the path of all datas all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename. num_shards: integer number of shards for this data set. """ # Break all images into batches with a [ranges[i][0], ranges[i][1]]. spacing = np.linspace(0, len(all_records), FLAGS.num_threads + 1).astype(np.int) ranges = [] threads = [] for i in range(len(spacing) - 1): ranges.append([spacing[i], spacing[i + 1]]) # Launch a thread for each batch. print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) sys.stdout.flush() # Create a mechanism for monitoring when all threads are finished. coord = tf.train.Coordinator() # Create a generic TensorFlow-based utility for converting all image codings. coder = ImageCoder() threads = [] for thread_index in range(len(ranges)): args = (coder, thread_index, ranges, name, directory, all_records, num_shards) t = threading.Thread(target=_process_image_files_batch, args=args) t.start() threads.append(t) # Wait for all the threads to terminate. coord.join(threads) print('%s: Finished writing all %d images in data set.' % (datetime.now(), len(all_records))) sys.stdout.flush() def _process_dataset(name, directory, all_splits, num_shards): """Process a complete data set and save it as a TFRecord. Args: name: string, unique identifier specifying the data set. directory: string, root path to the data set. all_splits: list of strings, sub-path to the data set. num_shards: integer number of shards for this data set. """ all_records = [] for split in all_splits: jpeg_file_path = os.path.join(directory, split, 'JPEGImages') images = tf.gfile.ListDirectory(jpeg_file_path) jpegs = [im_name for im_name in images if im_name.strip()[-3:]=='jpg'] all_records.extend(list(zip([split] * len(jpegs), jpegs))) shuffled_index = list(range(len(all_records))) random.seed(RANDOM_SEED) random.shuffle(shuffled_index) all_records = [all_records[i] for i in shuffled_index] _process_image_files(name, directory, all_records, num_shards) def parse_comma_list(args): return [s.strip() for s in args.split(',')] def main(unused_argv): assert not FLAGS.train_shards % FLAGS.num_threads, ( 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards') assert not FLAGS.validation_shards % FLAGS.num_threads, ( 'Please make the FLAGS.num_threads commensurate with ' 'FLAGS.validation_shards') print('Saving results to %s' % FLAGS.output_directory) # Run it! _process_dataset('val', FLAGS.dataset_directory, parse_comma_list(FLAGS.validation_splits), FLAGS.validation_shards) _process_dataset('train', FLAGS.dataset_directory, parse_comma_list(FLAGS.train_splits), FLAGS.train_shards) if __name__ == '__main__': tf.app.run()
threads_init.py
import threading import time num_threads = 4 def thread_message(message): global num_threads num_threads -= 1 print('Message from thread %s\n' %message) while num_threads > 0: print("I am the %s thread" %num_threads) threading.Thread(target=thread_message("I am the %s thread" %num_threads)).start() time.sleep(0.1)
sync_config.py
import importlib import os import sys import time import datetime import threading import json from xosconfig import Config config_file = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + '/lbaas_config.yaml') Config.init(config_file, 'synchronizer-config-schema.yaml') sys.path.insert(0, "/opt/xos") import lbaas_log as slog from synchronizers.new_base.modelaccessor import * def update_lb_vip_addr(instance_id, vip_address): try: lb = Loadbalancer.objects.get(instance_id=instance_id) lb.vip_address = vip_address lb.save() except Exception as err: slog.error("%s" % str(err)) try: config = LBconfig.objects.get(instance_id=instance_id) config.ansible_update=True config.save() except Exception as err: slog.error("%s" % str(err)) for idx in range(1, 180, 1): config = LBconfig.objects.get(instance_id=instance_id) if config.ansible_update: ins = ServiceInstance.objects.get(id=instance_id) if ins.updated <= ins.enacted: ins.updated = time.time() slog.info("[idx=%s] update time(%s) of instance_id(%s)" % (idx, ins.updated, lb.instance_id)) ins.save() else: break time.sleep(1) slog.info("[Thread] lb.vip_address = %s" % lb.vip_address) def check_lb_vip_address(): while True: time.sleep(5) lbs_list = [] ports_list = [] lbs = Loadbalancer.objects.all() slog.info("[Thread] lbs.count = %s" % len(lbs)) for lb in lbs: lb_info = {} lb_info['id'] = lb.id lb_info['instance_id'] = lb.id lb_info['vip_address'] = lb.vip_address slog.info("[Thread] [Loadbalancer] lb.id=%s, lb.instance_id=%s, lb.vip=%s" \ % (lb.id, lb.instance_id, lb.vip_address)) lbs_list.append(lb_info) if len(lbs) == 0: continue ports = Port.objects.all() slog.info("[Thread] ports.count = %s" % len(ports)) for port in ports: port_info = {} port_info['instance_id'] = port.instance_id port_info['ip'] = port.ip slog.info("[Thread] [Port] port.instance_id=%s, port.ip=%s" % (port.instance_id, port.ip)) ports_list.append(port_info) for lb in lbs_list: for port in ports_list: if lb['instance_id'] == port['instance_id'] and lb['vip_address'] != port['ip']: slog.info("[Thread] instance_id=%s, lb.vip_address=%s, port.ip=%s" \ % (lb['instance_id'], lb['vip_address'], port['ip'])) update_lb_vip_addr(lb['instance_id'], port['ip']) def check_instance_status(): while True: time.sleep(5) instances = Instance.objects.all() slog.info("[Thread] instances.count = %s" % len(instances)) for ins in instances: tag="" provisioning_status="" try: tag = Tag.objects.get(object_id=ins.id, name="chk_container_status") except Exception as err: slog.error("[Thread] Error: object_id(%s) does not exist in Tag table (%s)" % (ins.id, str(err))) continue if ins.backend_status == "0 - Provisioning in progress": provisioning_status="PENDING_UPDATE" elif ins.backend_status == "1 - OK": if tag.value == "": provisioning_status="PENDING_UPDATE" else: try: userData = json.loads(tag.value) create_timestamp = time.mktime(datetime.datetime.strptime(userData['create_date'], "%Y-%m-%d %H:%M:%S").timetuple()) update_timestamp = time.mktime(datetime.datetime.strptime(userData['update_date'], "%Y-%m-%d %H:%M:%S").timetuple()) if userData['result'] == "Initialized": provisioning_status="PENDING_UPDATE" elif userData['expected_result'] != userData['result'] and (float(update_timestamp)-float(create_timestamp)) > 30: provisioning_status="ERROR" else: provisioning_status="ACTIVE" except Exception as err: slog.error("[Thread] Error: json.loads() failed (%s)" % str(err)) else: try: userData = json.loads(tag.value) create_timestamp = time.mktime(datetime.datetime.strptime(userData['create_date'], "%Y-%m-%d %H:%M:%S").timetuple()) update_timestamp = time.mktime(datetime.datetime.strptime(userData['update_date'], "%Y-%m-%d %H:%M:%S").timetuple()) if (float(update_timestamp)-float(create_timestamp)) < 30: provisioning_status="PENDING_UPDATE" else: provisioning_status="ERROR" except Exception as err: slog.error("[Thread] Error: json.loads() failed (%s)" % str(err)) try: lb = Loadbalancer.objects.get(tenantwithcontainer_ptr_id=ins.id) lb.provisioning_status = provisioning_status lb.save() slog.info("[Thread] id=%s, instance_name=%s, lb.provisioning_status=%s" % (ins.id, ins.instance_name, lb.provisioning_status)) except Exception as err: slog.error("[Thread] Error: id(%s) does not exist in Loadbalancer table (%s)" % (ins.id, str(err))) if __name__ == "__main__": models_active = False wait = False while not models_active: try: first_controller = Controller.objects.first() slog.debug("one of controller set: %s" % first_controller.name) first_image = Image.objects.first() slog.debug("one of image set : %s" % first_image.name) models_active = True except Exception,e: slog.info(str(e)) slog.info('Waiting for data model to come up before starting...') time.sleep(3) wait = True slog.debug("Data Model is active (first_controller: %s)" % first_controller) if (wait): time.sleep(5) # Safety factor, seeing that we stumbled waiting for the data model to come up. lb_thr = threading.Thread(target=check_lb_vip_address) lb_thr.start() ins_thr = threading.Thread(target=check_instance_status) ins_thr.start()
pyGBFsummon.py
# 批量下载GBF游戏资源-召唤石 from queue import Queue import os import time import threading import urllib.request import urllib.error import datetime import sys sys.path.append(".") import pyDownload as download dirname = os.getcwd() print_lock = threading.Lock() data_q = Queue() SAVELINK = False DEBUG = False # chara[R/SR/SSR/skin] quest[r/sr/ssr/extra] summon[n/r/sr/ssr] zoom[r/sr/ssr/skin] mypage[r/sr/ssr/skin] class cover bg chara[extra] zoom[extra] groupstack = [-1,-1,-1,-1] grouptop = [0,0,0,0] prefix1 = "http://game-a.granbluefantasy.jp/assets/img/sp/assets/summon/b/" groupstr = ["201","202","203","204"] # chara[R/SR/SSR/skin] quest[r/sr/ssr/extra] summon[n/r/sr/ssr] zoom[r/sr/ssr/skin] mypage[r/sr/ssr/skin] class cover groupdir = ["img\\summon\\N","img\\summon\\R","img\\summon\\SR","img\\summon\\SSR"] #quest extra needs big step groupstep = [20,20,20,20] grouplink = ["link\\smm-n.txt","link\\smm-r.txt","link\\smm-sr.txt","link\\smm-ssr.txt"] MaxThread = 40 def smmimglist(groupid): list = [] # 3040001000 for index in range(groupstack[groupid]+1, groupstack[groupid]+1+groupstep[groupid]): list.append(imgName(index, groupid, 2)) return list def mkdir(path): tmppath = os.getcwd()+"\\"+path try: os.makedirs(tmppath) except: pass return tmppath class imgName: id = 0 groupid = 0 suffix = 1 def __init__(self, id, groupid, suffix = 1): self.id = id self.groupid = groupid #self.dir = dir self.suffix = suffix def __str__(self): thisstr = "["+str(self.id)+","+str(self.groupid)+"]" return thisstr def saveIndex(imgData): time.sleep(0.1) with print_lock: imgName = groupstr[imgData.groupid] + str(imgData.id).zfill(4)+"000" dir = groupdir[imgData.groupid] count = 0 try: url = prefix1 + imgName +".png" #print(url) if(download.saveImg(url,dir)): count+=1 if(SAVELINK): with open(grouplink[imgData.groupid],"a") as linkfile: linkfile.write(url+"\n") except: pass #update logic if(count >0): if(imgData.id > groupstack[imgData.groupid]): print("update list " + groupdir[imgData.groupid]) groupstack[imgData.groupid] += groupstep[imgData.groupid] simglist = [] simglist = smmimglist(imgData.groupid) for iimg in simglist: data_q.put(iimg) simglist.clear() if(imgData.id>grouptop[imgData.groupid]): grouptop[imgData.groupid] = imgData.id def worker(): while True: imgData1 = data_q.get() #print(imgData1) saveIndex(imgData1) data_q.task_done() def main(): #socket.setdefaulttimeout(10) if(sys.version_info.major != 3): print("This script only works for python3") return try: logdata = "" with open("img\\summon\\log.txt") as logfile: lines = logfile.readlines() logdata = lines[1] if (logdata != ""): data = logdata.split(',') numgroup = len(groupstack) + 1 if (len(data) == numgroup): print("download start from latest") for i in range(0, numgroup): groupstack[i] = int(data[i]) grouptop[i] = int(data[i]) except: pass for x in range(MaxThread): t = threading.Thread(target = worker) t.daemon = True t.start() for idir in groupdir: mkdir(idir) mkdir("link") start = time.time() simglist = [] # summon stone for index in range(0,4): simglist = smmimglist(index) for iimg in simglist: data_q.put(iimg) simglist.clear() data_q.join() print("entire job took:", time.time()-start) # today = str(datetime.date.today()) with open("img\\summon\\log.txt", "w", encoding='utf-8') as logfile: istr = "summon[n/r/sr/ssr]\n" logfile.write(istr) for ilog in grouptop: istr = str(ilog)+"," logfile.write(istr) logfile.write("\n") if __name__ == '__main__': main() os.system("pause") #appendix #image set #character origin zoom #skin #3710001000 # http://game-a.granbluefantasy.jp/assets/img/sp/assets/npc/zoom/3040010000_01.png #http://game-a.granbluefantasy.jp/assets/img/sp/assets/npc/b/3030007000_01.png #class #http://game-a1.granbluefantasy.jp/assets/img/sp/assets/leader/job_change/120001_wa_1_01.png #http://game-a1.granbluefantasy.jp/assets/img/sp/cjs/job_release_180001_1_c.png #quest character 2 3 4 99 #http://game-a1.granbluefantasy.jp/assets/img/sp/quest/scene/character/body/3040022000.png #summon 1 2 3 4 #http://game-a.granbluefantasy.jp/assets/img/sp/assets/summon/b/2030011000.png #mypage class&sr #http://game-a1.granbluefantasy.jp/assets/img/sp/assets/npc/my/3040058000_02.png #http://game-a1.granbluefantasy.jp/assets/img/sp/assets/leader/my/140201_kn_1_01.png #not used #http://game-a1.granbluefantasy.jp/assets/img/sp/assets/npc/npc_evolution/main/3040071000_02.png
client_length_mt_fixed_char.py
#!/usr/bin/python import getopt import socket import sys import datetime import threading MAX_LENGTH = 4096 #HOST = '1.1.2.20' #HOST = '1.1.1.1' #HOST = '172.16.0.14' def length_test(hostIPAddr, threadId): HOST = hostIPAddr PORT = 5001 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect( (HOST, PORT) ) str_send = '' char = str( threadId % 10 ) for i in range(535): #print 'Thread', threadId, 'Testing length', i #generating outgoing string str_send += char #if char == 9: # char = 0 #else: # char += 1 start = datetime.datetime.now() #print( str_send ) s.sendall( str_send ) #receiving data recv_bytes = 0 recv_data = '' data = '' while recv_bytes != i+1: data = s.recv(MAX_LENGTH) recv_bytes += len( data ) recv_data += data if recv_bytes > i+1: print 'Thread %d ERROR: Receiving unexpected data, i = %d, recv_bytes = %d' % (threadId, i, recv_bytes) s.close() sys.exit() #verifying data if recv_data != str_send: print 'Thread %d ERROR: Data error in length %d: \nstr_send:\n%s \nrecv_data: \n%s' % (threadId, i, str_send, recv_data) s.close() sys.exit() stop = datetime.datetime.now() diff = stop - start #print diff.microseconds, 'us' print 'Thread %d Exiting...' % (threadId) s.close() def usage(): print str(sys.argv[0]), '[--ip=1.1.1.1] [--numThread=2] [-h]' def main(): try: opts, args = getopt.getopt( sys.argv[1:], 'h', ["ip=", "numThread="] ) except getopt.GetoptError as err: print str(err) usage() sys.exit(2) hostIPAddr = '1.1.1.1' numThread = 2 for (o, a) in opts: if o == "--ip": hostIPAddr = a elif o == "--numThread": numThread = int(a) elif o == "-h": usage() sys.exit() # while 1: #print hostIPAddr, numThread threads = [] for i in range(numThread): t = threading.Thread(target=length_test, args=(hostIPAddr, i) ) threads.append(t) start = datetime.datetime.now() for i in range(numThread): threads[i].start() for i in range(numThread): threads[i].join() stop = datetime.datetime.now() diff = stop - start print numThread, 'threads run for', diff.seconds, 's' if __name__ == '__main__': main()
test_ssl.py
# Test the support for SSL and sockets import sys import unittest import unittest.mock from test import support import socket import select import time import datetime import gc import os import errno import pprint import urllib.request import threading import traceback import asyncore import weakref import platform import sysconfig import functools try: import ctypes except ImportError: ctypes = None ssl = support.import_module("ssl") from ssl import TLSVersion, _TLSContentType, _TLSMessageType PROTOCOLS = sorted(ssl._PROTOCOL_NAMES) HOST = support.HOST IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL') IS_OPENSSL_1_1_0 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0) IS_OPENSSL_1_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1) PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS') PROTOCOL_TO_TLS_VERSION = {} for proto, ver in ( ("PROTOCOL_SSLv23", "SSLv3"), ("PROTOCOL_TLSv1", "TLSv1"), ("PROTOCOL_TLSv1_1", "TLSv1_1"), ): try: proto = getattr(ssl, proto) ver = getattr(ssl.TLSVersion, ver) except AttributeError: continue PROTOCOL_TO_TLS_VERSION[proto] = ver def data_file(*name): return os.path.join(os.path.dirname(__file__), *name) # The custom key and certificate files used in test_ssl are generated # using Lib/test/make_ssl_certs.py. # Other certificates are simply fetched from the Internet servers they # are meant to authenticate. CERTFILE = data_file("keycert.pem") BYTES_CERTFILE = os.fsencode(CERTFILE) ONLYCERT = data_file("ssl_cert.pem") ONLYKEY = data_file("ssl_key.pem") BYTES_ONLYCERT = os.fsencode(ONLYCERT) BYTES_ONLYKEY = os.fsencode(ONLYKEY) CERTFILE_PROTECTED = data_file("keycert.passwd.pem") ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem") KEY_PASSWORD = "somepass" CAPATH = data_file("capath") BYTES_CAPATH = os.fsencode(CAPATH) CAFILE_NEURONIO = data_file("capath", "4e1295a3.0") CAFILE_CACERT = data_file("capath", "5ed36f99.0") CERTFILE_INFO = { 'issuer': ((('countryName', 'XY'),), (('localityName', 'Castle Anthrax'),), (('organizationName', 'Python Software Foundation'),), (('commonName', 'localhost'),)), 'notAfter': 'Aug 26 14:23:15 2028 GMT', 'notBefore': 'Aug 29 14:23:15 2018 GMT', 'serialNumber': '98A7CF88C74A32ED', 'subject': ((('countryName', 'XY'),), (('localityName', 'Castle Anthrax'),), (('organizationName', 'Python Software Foundation'),), (('commonName', 'localhost'),)), 'subjectAltName': (('DNS', 'localhost'),), 'version': 3 } # empty CRL CRLFILE = data_file("revocation.crl") # Two keys and certs signed by the same CA (for SNI tests) SIGNED_CERTFILE = data_file("keycert3.pem") SIGNED_CERTFILE_HOSTNAME = 'localhost' SIGNED_CERTFILE_INFO = { 'OCSP': ('http://testca.pythontest.net/testca/ocsp/',), 'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',), 'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',), 'issuer': ((('countryName', 'XY'),), (('organizationName', 'Python Software Foundation CA'),), (('commonName', 'our-ca-server'),)), 'notAfter': 'Jul 7 14:23:16 2028 GMT', 'notBefore': 'Aug 29 14:23:16 2018 GMT', 'serialNumber': 'CB2D80995A69525C', 'subject': ((('countryName', 'XY'),), (('localityName', 'Castle Anthrax'),), (('organizationName', 'Python Software Foundation'),), (('commonName', 'localhost'),)), 'subjectAltName': (('DNS', 'localhost'),), 'version': 3 } SIGNED_CERTFILE2 = data_file("keycert4.pem") SIGNED_CERTFILE2_HOSTNAME = 'fakehostname' SIGNED_CERTFILE_ECC = data_file("keycertecc.pem") SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc' # Same certificate as pycacert.pem, but without extra text in file SIGNING_CA = data_file("capath", "ceff1710.0") # cert with all kinds of subject alt names ALLSANFILE = data_file("allsans.pem") IDNSANSFILE = data_file("idnsans.pem") REMOTE_HOST = "self-signed.pythontest.net" EMPTYCERT = data_file("nullcert.pem") BADCERT = data_file("badcert.pem") NONEXISTINGCERT = data_file("XXXnonexisting.pem") BADKEY = data_file("badkey.pem") NOKIACERT = data_file("nokia.pem") NULLBYTECERT = data_file("nullbytecert.pem") TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem") DHFILE = data_file("ffdh3072.pem") BYTES_DHFILE = os.fsencode(DHFILE) # Not defined in all versions of OpenSSL OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0) OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0) OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0) OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0) OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0) def has_tls_protocol(protocol): """Check if a TLS protocol is available and enabled :param protocol: enum ssl._SSLMethod member or name :return: bool """ if isinstance(protocol, str): assert protocol.startswith('PROTOCOL_') protocol = getattr(ssl, protocol, None) if protocol is None: return False if protocol in { ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT }: # auto-negotiate protocols are always available return True name = protocol.name return has_tls_version(name[len('PROTOCOL_'):]) @functools.lru_cache def has_tls_version(version): """Check if a TLS/SSL version is enabled :param version: TLS version name or ssl.TLSVersion member :return: bool """ if version == "SSLv2": # never supported and not even in TLSVersion enum return False if isinstance(version, str): version = ssl.TLSVersion.__members__[version] # check compile time flags like ssl.HAS_TLSv1_2 if not getattr(ssl, f'HAS_{version.name}'): return False # check runtime and dynamic crypto policy settings. A TLS version may # be compiled in but disabled by a policy or config option. ctx = ssl.SSLContext() if ( hasattr(ctx, 'minimum_version') and ctx.minimum_version != ssl.TLSVersion.MINIMUM_SUPPORTED and version < ctx.minimum_version ): return False if ( hasattr(ctx, 'maximum_version') and ctx.maximum_version != ssl.TLSVersion.MAXIMUM_SUPPORTED and version > ctx.maximum_version ): return False return True def requires_tls_version(version): """Decorator to skip tests when a required TLS version is not available :param version: TLS version name or ssl.TLSVersion member :return: """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kw): if not has_tls_version(version): raise unittest.SkipTest(f"{version} is not available.") else: return func(*args, **kw) return wrapper return decorator requires_minimum_version = unittest.skipUnless( hasattr(ssl.SSLContext, 'minimum_version'), "required OpenSSL >= 1.1.0g" ) def handle_error(prefix): exc_format = ' '.join(traceback.format_exception(*sys.exc_info())) if support.verbose: sys.stdout.write(prefix + exc_format) def can_clear_options(): # 0.9.8m or higher return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15) def no_sslv2_implies_sslv3_hello(): # 0.9.7h or higher return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15) def have_verify_flags(): # 0.9.8 or higher return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15) def _have_secp_curves(): if not ssl.HAS_ECDH: return False ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) try: ctx.set_ecdh_curve("secp384r1") except ValueError: return False else: return True HAVE_SECP_CURVES = _have_secp_curves() def utc_offset(): #NOTE: ignore issues like #1647654 # local time = utc time + utc offset if time.daylight and time.localtime().tm_isdst > 0: return -time.altzone # seconds return -time.timezone def asn1time(cert_time): # Some versions of OpenSSL ignore seconds, see #18207 # 0.9.8.i if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15): fmt = "%b %d %H:%M:%S %Y GMT" dt = datetime.datetime.strptime(cert_time, fmt) dt = dt.replace(second=0) cert_time = dt.strftime(fmt) # %d adds leading zero but ASN1_TIME_print() uses leading space if cert_time[4] == "0": cert_time = cert_time[:4] + " " + cert_time[5:] return cert_time needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test") def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *, cert_reqs=ssl.CERT_NONE, ca_certs=None, ciphers=None, certfile=None, keyfile=None, **kwargs): context = ssl.SSLContext(ssl_version) if cert_reqs is not None: if cert_reqs == ssl.CERT_NONE: context.check_hostname = False context.verify_mode = cert_reqs if ca_certs is not None: context.load_verify_locations(ca_certs) if certfile is not None or keyfile is not None: context.load_cert_chain(certfile, keyfile) if ciphers is not None: context.set_ciphers(ciphers) return context.wrap_socket(sock, **kwargs) def testing_context(server_cert=SIGNED_CERTFILE): """Create context client_context, server_context, hostname = testing_context() """ if server_cert == SIGNED_CERTFILE: hostname = SIGNED_CERTFILE_HOSTNAME elif server_cert == SIGNED_CERTFILE2: hostname = SIGNED_CERTFILE2_HOSTNAME else: raise ValueError(server_cert) client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) client_context.load_verify_locations(SIGNING_CA) server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) server_context.load_cert_chain(server_cert) server_context.load_verify_locations(SIGNING_CA) return client_context, server_context, hostname class BasicSocketTests(unittest.TestCase): def test_constants(self): ssl.CERT_NONE ssl.CERT_OPTIONAL ssl.CERT_REQUIRED ssl.OP_CIPHER_SERVER_PREFERENCE ssl.OP_SINGLE_DH_USE if ssl.HAS_ECDH: ssl.OP_SINGLE_ECDH_USE if ssl.OPENSSL_VERSION_INFO >= (1, 0): ssl.OP_NO_COMPRESSION self.assertIn(ssl.HAS_SNI, {True, False}) self.assertIn(ssl.HAS_ECDH, {True, False}) ssl.OP_NO_SSLv2 ssl.OP_NO_SSLv3 ssl.OP_NO_TLSv1 ssl.OP_NO_TLSv1_3 if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1): ssl.OP_NO_TLSv1_1 ssl.OP_NO_TLSv1_2 self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23) def test_private_init(self): with self.assertRaisesRegex(TypeError, "public constructor"): with socket.socket() as s: ssl.SSLSocket(s) def test_str_for_enums(self): # Make sure that the PROTOCOL_* constants have enum-like string # reprs. proto = ssl.PROTOCOL_TLS self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS') ctx = ssl.SSLContext(proto) self.assertIs(ctx.protocol, proto) def test_random(self): v = ssl.RAND_status() if support.verbose: sys.stdout.write("\n RAND_status is %d (%s)\n" % (v, (v and "sufficient randomness") or "insufficient randomness")) data, is_cryptographic = ssl.RAND_pseudo_bytes(16) self.assertEqual(len(data), 16) self.assertEqual(is_cryptographic, v == 1) if v: data = ssl.RAND_bytes(16) self.assertEqual(len(data), 16) else: self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16) # negative num is invalid self.assertRaises(ValueError, ssl.RAND_bytes, -5) self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5) if hasattr(ssl, 'RAND_egd'): self.assertRaises(TypeError, ssl.RAND_egd, 1) self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1) ssl.RAND_add("this is a random string", 75.0) ssl.RAND_add(b"this is a random bytes object", 75.0) ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0) @unittest.skipUnless(os.name == 'posix', 'requires posix') def test_random_fork(self): status = ssl.RAND_status() if not status: self.fail("OpenSSL's PRNG has insufficient randomness") rfd, wfd = os.pipe() pid = os.fork() if pid == 0: try: os.close(rfd) child_random = ssl.RAND_pseudo_bytes(16)[0] self.assertEqual(len(child_random), 16) os.write(wfd, child_random) os.close(wfd) except BaseException: os._exit(1) else: os._exit(0) else: os.close(wfd) self.addCleanup(os.close, rfd) _, status = os.waitpid(pid, 0) self.assertEqual(status, 0) child_random = os.read(rfd, 16) self.assertEqual(len(child_random), 16) parent_random = ssl.RAND_pseudo_bytes(16)[0] self.assertEqual(len(parent_random), 16) self.assertNotEqual(child_random, parent_random) maxDiff = None def test_parse_cert(self): # note that this uses an 'unofficial' function in _ssl.c, # provided solely for this test, to exercise the certificate # parsing code self.assertEqual( ssl._ssl._test_decode_cert(CERTFILE), CERTFILE_INFO ) self.assertEqual( ssl._ssl._test_decode_cert(SIGNED_CERTFILE), SIGNED_CERTFILE_INFO ) # Issue #13034: the subjectAltName in some certificates # (notably projects.developer.nokia.com:443) wasn't parsed p = ssl._ssl._test_decode_cert(NOKIACERT) if support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['subjectAltName'], (('DNS', 'projects.developer.nokia.com'), ('DNS', 'projects.forum.nokia.com')) ) # extra OCSP and AIA fields self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',)) self.assertEqual(p['caIssuers'], ('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',)) self.assertEqual(p['crlDistributionPoints'], ('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',)) def test_parse_cert_CVE_2019_5010(self): p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP) if support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual( p, { 'issuer': ( (('countryName', 'UK'),), (('commonName', 'cody-ca'),)), 'notAfter': 'Jun 14 18:00:58 2028 GMT', 'notBefore': 'Jun 18 18:00:58 2018 GMT', 'serialNumber': '02', 'subject': ((('countryName', 'UK'),), (('commonName', 'codenomicon-vm-2.test.lal.cisco.com'),)), 'subjectAltName': ( ('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),), 'version': 3 } ) def test_parse_cert_CVE_2013_4238(self): p = ssl._ssl._test_decode_cert(NULLBYTECERT) if support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") subject = ((('countryName', 'US'),), (('stateOrProvinceName', 'Oregon'),), (('localityName', 'Beaverton'),), (('organizationName', 'Python Software Foundation'),), (('organizationalUnitName', 'Python Core Development'),), (('commonName', 'null.python.org\x00example.org'),), (('emailAddress', 'python-dev@python.org'),)) self.assertEqual(p['subject'], subject) self.assertEqual(p['issuer'], subject) if ssl._OPENSSL_API_VERSION >= (0, 9, 8): san = (('DNS', 'altnull.python.org\x00example.com'), ('email', 'null@python.org\x00user@example.org'), ('URI', 'http://null.python.org\x00http://example.org'), ('IP Address', '192.0.2.1'), ('IP Address', '2001:DB8:0:0:0:0:0:1\n')) else: # OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName san = (('DNS', 'altnull.python.org\x00example.com'), ('email', 'null@python.org\x00user@example.org'), ('URI', 'http://null.python.org\x00http://example.org'), ('IP Address', '192.0.2.1'), ('IP Address', '<invalid>')) self.assertEqual(p['subjectAltName'], san) def test_parse_all_sans(self): p = ssl._ssl._test_decode_cert(ALLSANFILE) self.assertEqual(p['subjectAltName'], ( ('DNS', 'allsans'), ('othername', '<unsupported>'), ('othername', '<unsupported>'), ('email', 'user@example.org'), ('DNS', 'www.example.org'), ('DirName', ((('countryName', 'XY'),), (('localityName', 'Castle Anthrax'),), (('organizationName', 'Python Software Foundation'),), (('commonName', 'dirname example'),))), ('URI', 'https://www.python.org/'), ('IP Address', '127.0.0.1'), ('IP Address', '0:0:0:0:0:0:0:1\n'), ('Registered ID', '1.2.3.4.5') ) ) def test_DER_to_PEM(self): with open(CAFILE_CACERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) d2 = ssl.PEM_cert_to_DER_cert(p2) self.assertEqual(d1, d2) if not p2.startswith(ssl.PEM_HEADER + '\n'): self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2) if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'): self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2) def test_openssl_version(self): n = ssl.OPENSSL_VERSION_NUMBER t = ssl.OPENSSL_VERSION_INFO s = ssl.OPENSSL_VERSION self.assertIsInstance(n, int) self.assertIsInstance(t, tuple) self.assertIsInstance(s, str) # Some sanity checks follow # >= 0.9 self.assertGreaterEqual(n, 0x900000) # < 3.0 self.assertLess(n, 0x30000000) major, minor, fix, patch, status = t self.assertGreaterEqual(major, 0) self.assertLess(major, 3) self.assertGreaterEqual(minor, 0) self.assertLess(minor, 256) self.assertGreaterEqual(fix, 0) self.assertLess(fix, 256) self.assertGreaterEqual(patch, 0) self.assertLessEqual(patch, 63) self.assertGreaterEqual(status, 0) self.assertLessEqual(status, 15) # Version string as returned by {Open,Libre}SSL, the format might change if IS_LIBRESSL: self.assertTrue(s.startswith("LibreSSL {:d}".format(major)), (s, t, hex(n))) else: self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)), (s, t, hex(n))) @support.cpython_only def test_refcycle(self): # Issue #7943: an SSL object doesn't create reference cycles with # itself. s = socket.socket(socket.AF_INET) ss = test_wrap_socket(s) wr = weakref.ref(ss) with support.check_warnings(("", ResourceWarning)): del ss self.assertEqual(wr(), None) def test_wrapped_unconnected(self): # Methods on an unconnected SSLSocket propagate the original # OSError raise by the underlying socket object. s = socket.socket(socket.AF_INET) with test_wrap_socket(s) as ss: self.assertRaises(OSError, ss.recv, 1) self.assertRaises(OSError, ss.recv_into, bytearray(b'x')) self.assertRaises(OSError, ss.recvfrom, 1) self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1) self.assertRaises(OSError, ss.send, b'x') self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0)) self.assertRaises(NotImplementedError, ss.dup) self.assertRaises(NotImplementedError, ss.sendmsg, [b'x'], (), 0, ('0.0.0.0', 0)) self.assertRaises(NotImplementedError, ss.recvmsg, 100) self.assertRaises(NotImplementedError, ss.recvmsg_into, [bytearray(100)]) def test_timeout(self): # Issue #8524: when creating an SSL socket, the timeout of the # original socket should be retained. for timeout in (None, 0.0, 5.0): s = socket.socket(socket.AF_INET) s.settimeout(timeout) with test_wrap_socket(s) as ss: self.assertEqual(timeout, ss.gettimeout()) def test_errors_sslwrap(self): sock = socket.socket() self.assertRaisesRegex(ValueError, "certfile must be specified", ssl.wrap_socket, sock, keyfile=CERTFILE) self.assertRaisesRegex(ValueError, "certfile must be specified for server-side operations", ssl.wrap_socket, sock, server_side=True) self.assertRaisesRegex(ValueError, "certfile must be specified for server-side operations", ssl.wrap_socket, sock, server_side=True, certfile="") with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s: self.assertRaisesRegex(ValueError, "can't connect in server-side mode", s.connect, (HOST, 8080)) with self.assertRaises(OSError) as cm: with socket.socket() as sock: ssl.wrap_socket(sock, certfile=NONEXISTINGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaises(OSError) as cm: with socket.socket() as sock: ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=NONEXISTINGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaises(OSError) as cm: with socket.socket() as sock: ssl.wrap_socket(sock, certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) def bad_cert_test(self, certfile): """Check that trying to use the given client certificate fails""" certfile = os.path.join(os.path.dirname(__file__) or os.curdir, certfile) sock = socket.socket() self.addCleanup(sock.close) with self.assertRaises(ssl.SSLError): test_wrap_socket(sock, certfile=certfile) def test_empty_cert(self): """Wrapping with an empty cert file""" self.bad_cert_test("nullcert.pem") def test_malformed_cert(self): """Wrapping with a badly formatted certificate (syntax error)""" self.bad_cert_test("badcert.pem") def test_malformed_key(self): """Wrapping with a badly formatted key (syntax error)""" self.bad_cert_test("badkey.pem") def test_match_hostname(self): def ok(cert, hostname): ssl.match_hostname(cert, hostname) def fail(cert, hostname): self.assertRaises(ssl.CertificateError, ssl.match_hostname, cert, hostname) # -- Hostname matching -- cert = {'subject': ((('commonName', 'example.com'),),)} ok(cert, 'example.com') ok(cert, 'ExAmple.cOm') fail(cert, 'www.example.com') fail(cert, '.example.com') fail(cert, 'example.org') fail(cert, 'exampleXcom') cert = {'subject': ((('commonName', '*.a.com'),),)} ok(cert, 'foo.a.com') fail(cert, 'bar.foo.a.com') fail(cert, 'a.com') fail(cert, 'Xa.com') fail(cert, '.a.com') # only match wildcards when they are the only thing # in left-most segment cert = {'subject': ((('commonName', 'f*.com'),),)} fail(cert, 'foo.com') fail(cert, 'f.com') fail(cert, 'bar.com') fail(cert, 'foo.a.com') fail(cert, 'bar.foo.com') # NULL bytes are bad, CVE-2013-4073 cert = {'subject': ((('commonName', 'null.python.org\x00example.org'),),)} ok(cert, 'null.python.org\x00example.org') # or raise an error? fail(cert, 'example.org') fail(cert, 'null.python.org') # error cases with wildcards cert = {'subject': ((('commonName', '*.*.a.com'),),)} fail(cert, 'bar.foo.a.com') fail(cert, 'a.com') fail(cert, 'Xa.com') fail(cert, '.a.com') cert = {'subject': ((('commonName', 'a.*.com'),),)} fail(cert, 'a.foo.com') fail(cert, 'a..com') fail(cert, 'a.com') # wildcard doesn't match IDNA prefix 'xn--' idna = 'püthon.python.org'.encode("idna").decode("ascii") cert = {'subject': ((('commonName', idna),),)} ok(cert, idna) cert = {'subject': ((('commonName', 'x*.python.org'),),)} fail(cert, idna) cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)} fail(cert, idna) # wildcard in first fragment and IDNA A-labels in sequent fragments # are supported. idna = 'www*.pythön.org'.encode("idna").decode("ascii") cert = {'subject': ((('commonName', idna),),)} fail(cert, 'www.pythön.org'.encode("idna").decode("ascii")) fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii")) fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii")) fail(cert, 'pythön.org'.encode("idna").decode("ascii")) # Slightly fake real-world example cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT', 'subject': ((('commonName', 'linuxfrz.org'),),), 'subjectAltName': (('DNS', 'linuxfr.org'), ('DNS', 'linuxfr.com'), ('othername', '<unsupported>'))} ok(cert, 'linuxfr.org') ok(cert, 'linuxfr.com') # Not a "DNS" entry fail(cert, '<unsupported>') # When there is a subjectAltName, commonName isn't used fail(cert, 'linuxfrz.org') # A pristine real-world example cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('organizationName', 'Google Inc'),), (('commonName', 'mail.google.com'),))} ok(cert, 'mail.google.com') fail(cert, 'gmail.com') # Only commonName is considered fail(cert, 'California') # -- IPv4 matching -- cert = {'subject': ((('commonName', 'example.com'),),), 'subjectAltName': (('DNS', 'example.com'), ('IP Address', '10.11.12.13'), ('IP Address', '14.15.16.17'), ('IP Address', '127.0.0.1'))} ok(cert, '10.11.12.13') ok(cert, '14.15.16.17') # socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1' fail(cert, '127.1') fail(cert, '14.15.16.17 ') fail(cert, '14.15.16.17 extra data') fail(cert, '14.15.16.18') fail(cert, 'example.net') # -- IPv6 matching -- if support.IPV6_ENABLED: cert = {'subject': ((('commonName', 'example.com'),),), 'subjectAltName': ( ('DNS', 'example.com'), ('IP Address', '2001:0:0:0:0:0:0:CAFE\n'), ('IP Address', '2003:0:0:0:0:0:0:BABA\n'))} ok(cert, '2001::cafe') ok(cert, '2003::baba') fail(cert, '2003::baba ') fail(cert, '2003::baba extra data') fail(cert, '2003::bebe') fail(cert, 'example.net') # -- Miscellaneous -- # Neither commonName nor subjectAltName cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('organizationName', 'Google Inc'),))} fail(cert, 'mail.google.com') # No DNS entry in subjectAltName but a commonName cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('commonName', 'mail.google.com'),)), 'subjectAltName': (('othername', 'blabla'), )} ok(cert, 'mail.google.com') # No DNS entry subjectAltName and no commonName cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('organizationName', 'Google Inc'),)), 'subjectAltName': (('othername', 'blabla'),)} fail(cert, 'google.com') # Empty cert / no cert self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com') self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com') # Issue #17980: avoid denials of service by refusing more than one # wildcard per fragment. cert = {'subject': ((('commonName', 'a*b.example.com'),),)} with self.assertRaisesRegex( ssl.CertificateError, "partial wildcards in leftmost label are not supported"): ssl.match_hostname(cert, 'axxb.example.com') cert = {'subject': ((('commonName', 'www.*.example.com'),),)} with self.assertRaisesRegex( ssl.CertificateError, "wildcard can only be present in the leftmost label"): ssl.match_hostname(cert, 'www.sub.example.com') cert = {'subject': ((('commonName', 'a*b*.example.com'),),)} with self.assertRaisesRegex( ssl.CertificateError, "too many wildcards"): ssl.match_hostname(cert, 'axxbxxc.example.com') cert = {'subject': ((('commonName', '*'),),)} with self.assertRaisesRegex( ssl.CertificateError, "sole wildcard without additional labels are not support"): ssl.match_hostname(cert, 'host') cert = {'subject': ((('commonName', '*.com'),),)} with self.assertRaisesRegex( ssl.CertificateError, r"hostname 'com' doesn't match '\*.com'"): ssl.match_hostname(cert, 'com') # extra checks for _inet_paton() for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']: with self.assertRaises(ValueError): ssl._inet_paton(invalid) for ipaddr in ['127.0.0.1', '192.168.0.1']: self.assertTrue(ssl._inet_paton(ipaddr)) if support.IPV6_ENABLED: for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']: self.assertTrue(ssl._inet_paton(ipaddr)) def test_server_side(self): # server_hostname doesn't work for server sockets ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) with socket.socket() as sock: self.assertRaises(ValueError, ctx.wrap_socket, sock, True, server_hostname="some.hostname") def test_unknown_channel_binding(self): # should raise ValueError for unknown type s = socket.create_server(('127.0.0.1', 0)) c = socket.socket(socket.AF_INET) c.connect(s.getsockname()) with test_wrap_socket(c, do_handshake_on_connect=False) as ss: with self.assertRaises(ValueError): ss.get_channel_binding("unknown-type") s.close() @unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES, "'tls-unique' channel binding not available") def test_tls_unique_channel_binding(self): # unconnected should return None for known type s = socket.socket(socket.AF_INET) with test_wrap_socket(s) as ss: self.assertIsNone(ss.get_channel_binding("tls-unique")) # the same for server-side s = socket.socket(socket.AF_INET) with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss: self.assertIsNone(ss.get_channel_binding("tls-unique")) def test_dealloc_warn(self): ss = test_wrap_socket(socket.socket(socket.AF_INET)) r = repr(ss) with self.assertWarns(ResourceWarning) as cm: ss = None support.gc_collect() self.assertIn(r, str(cm.warning.args[0])) def test_get_default_verify_paths(self): paths = ssl.get_default_verify_paths() self.assertEqual(len(paths), 6) self.assertIsInstance(paths, ssl.DefaultVerifyPaths) with support.EnvironmentVarGuard() as env: env["SSL_CERT_DIR"] = CAPATH env["SSL_CERT_FILE"] = CERTFILE paths = ssl.get_default_verify_paths() self.assertEqual(paths.cafile, CERTFILE) self.assertEqual(paths.capath, CAPATH) @unittest.skipUnless(sys.platform == "win32", "Windows specific") def test_enum_certificates(self): self.assertTrue(ssl.enum_certificates("CA")) self.assertTrue(ssl.enum_certificates("ROOT")) self.assertRaises(TypeError, ssl.enum_certificates) self.assertRaises(WindowsError, ssl.enum_certificates, "") trust_oids = set() for storename in ("CA", "ROOT"): store = ssl.enum_certificates(storename) self.assertIsInstance(store, list) for element in store: self.assertIsInstance(element, tuple) self.assertEqual(len(element), 3) cert, enc, trust = element self.assertIsInstance(cert, bytes) self.assertIn(enc, {"x509_asn", "pkcs_7_asn"}) self.assertIsInstance(trust, (frozenset, set, bool)) if isinstance(trust, (frozenset, set)): trust_oids.update(trust) serverAuth = "1.3.6.1.5.5.7.3.1" self.assertIn(serverAuth, trust_oids) @unittest.skipUnless(sys.platform == "win32", "Windows specific") def test_enum_crls(self): self.assertTrue(ssl.enum_crls("CA")) self.assertRaises(TypeError, ssl.enum_crls) self.assertRaises(WindowsError, ssl.enum_crls, "") crls = ssl.enum_crls("CA") self.assertIsInstance(crls, list) for element in crls: self.assertIsInstance(element, tuple) self.assertEqual(len(element), 2) self.assertIsInstance(element[0], bytes) self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"}) def test_asn1object(self): expected = (129, 'serverAuth', 'TLS Web Server Authentication', '1.3.6.1.5.5.7.3.1') val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1') self.assertEqual(val, expected) self.assertEqual(val.nid, 129) self.assertEqual(val.shortname, 'serverAuth') self.assertEqual(val.longname, 'TLS Web Server Authentication') self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1') self.assertIsInstance(val, ssl._ASN1Object) self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth') val = ssl._ASN1Object.fromnid(129) self.assertEqual(val, expected) self.assertIsInstance(val, ssl._ASN1Object) self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1) with self.assertRaisesRegex(ValueError, "unknown NID 100000"): ssl._ASN1Object.fromnid(100000) for i in range(1000): try: obj = ssl._ASN1Object.fromnid(i) except ValueError: pass else: self.assertIsInstance(obj.nid, int) self.assertIsInstance(obj.shortname, str) self.assertIsInstance(obj.longname, str) self.assertIsInstance(obj.oid, (str, type(None))) val = ssl._ASN1Object.fromname('TLS Web Server Authentication') self.assertEqual(val, expected) self.assertIsInstance(val, ssl._ASN1Object) self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected) self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'), expected) with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"): ssl._ASN1Object.fromname('serverauth') def test_purpose_enum(self): val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1') self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object) self.assertEqual(ssl.Purpose.SERVER_AUTH, val) self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129) self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth') self.assertEqual(ssl.Purpose.SERVER_AUTH.oid, '1.3.6.1.5.5.7.3.1') val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2') self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object) self.assertEqual(ssl.Purpose.CLIENT_AUTH, val) self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130) self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth') self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid, '1.3.6.1.5.5.7.3.2') def test_unsupported_dtls(self): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.addCleanup(s.close) with self.assertRaises(NotImplementedError) as cx: test_wrap_socket(s, cert_reqs=ssl.CERT_NONE) self.assertEqual(str(cx.exception), "only stream sockets are supported") ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) with self.assertRaises(NotImplementedError) as cx: ctx.wrap_socket(s) self.assertEqual(str(cx.exception), "only stream sockets are supported") def cert_time_ok(self, timestring, timestamp): self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp) def cert_time_fail(self, timestring): with self.assertRaises(ValueError): ssl.cert_time_to_seconds(timestring) @unittest.skipUnless(utc_offset(), 'local time needs to be different from UTC') def test_cert_time_to_seconds_timezone(self): # Issue #19940: ssl.cert_time_to_seconds() returns wrong # results if local timezone is not UTC self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0) self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0) def test_cert_time_to_seconds(self): timestring = "Jan 5 09:34:43 2018 GMT" ts = 1515144883.0 self.cert_time_ok(timestring, ts) # accept keyword parameter, assert its name self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts) # accept both %e and %d (space or zero generated by strftime) self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts) # case-insensitive self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts) self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute newyear_ts = 1230768000.0 # leap seconds self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts) # same timestamp self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts) self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899) # allow 60th second (even if it is not a leap second) self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900) # allow 2nd leap second for compatibility with time.strptime() self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901) self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds # no special treatment for the special value: # 99991231235959Z (rfc 5280) self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0) @support.run_with_locale('LC_ALL', '') def test_cert_time_to_seconds_locale(self): # `cert_time_to_seconds()` should be locale independent def local_february_name(): return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0)) if local_february_name().lower() == 'feb': self.skipTest("locale-specific month name needs to be " "different from C locale") # locale-independent self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0) self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT") def test_connect_ex_error(self): server = socket.socket(socket.AF_INET) self.addCleanup(server.close) port = support.bind_port(server) # Reserve port but don't listen s = test_wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.addCleanup(s.close) rc = s.connect_ex((HOST, port)) # Issue #19919: Windows machines or VMs hosted on Windows # machines sometimes return EWOULDBLOCK. errors = ( errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT, errno.EWOULDBLOCK, ) self.assertIn(rc, errors) class ContextTests(unittest.TestCase): def test_constructor(self): for protocol in PROTOCOLS: ssl.SSLContext(protocol) ctx = ssl.SSLContext() self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS) self.assertRaises(ValueError, ssl.SSLContext, -1) self.assertRaises(ValueError, ssl.SSLContext, 42) def test_protocol(self): for proto in PROTOCOLS: ctx = ssl.SSLContext(proto) self.assertEqual(ctx.protocol, proto) def test_ciphers(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.set_ciphers("ALL") ctx.set_ciphers("DEFAULT") with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"): ctx.set_ciphers("^$:,;?*'dorothyx") @unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1, "Test applies only to Python default ciphers") def test_python_ciphers(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ciphers = ctx.get_ciphers() for suite in ciphers: name = suite['name'] self.assertNotIn("PSK", name) self.assertNotIn("SRP", name) self.assertNotIn("MD5", name) self.assertNotIn("RC4", name) self.assertNotIn("3DES", name) @unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old') def test_get_ciphers(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.set_ciphers('AESGCM') names = set(d['name'] for d in ctx.get_ciphers()) self.assertIn('AES256-GCM-SHA384', names) self.assertIn('AES128-GCM-SHA256', names) def test_options(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) # OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3) # SSLContext also enables these by default default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE | OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE | OP_ENABLE_MIDDLEBOX_COMPAT) self.assertEqual(default, ctx.options) ctx.options |= ssl.OP_NO_TLSv1 self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options) if can_clear_options(): ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1) self.assertEqual(default, ctx.options) ctx.options = 0 # Ubuntu has OP_NO_SSLv3 forced on by default self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3) else: with self.assertRaises(ValueError): ctx.options = 0 def test_verify_mode_protocol(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS) # Default value self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) ctx.verify_mode = ssl.CERT_OPTIONAL self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL) ctx.verify_mode = ssl.CERT_REQUIRED self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) ctx.verify_mode = ssl.CERT_NONE self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) with self.assertRaises(TypeError): ctx.verify_mode = None with self.assertRaises(ValueError): ctx.verify_mode = 42 ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertFalse(ctx.check_hostname) ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertTrue(ctx.check_hostname) def test_hostname_checks_common_name(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertTrue(ctx.hostname_checks_common_name) if ssl.HAS_NEVER_CHECK_COMMON_NAME: ctx.hostname_checks_common_name = True self.assertTrue(ctx.hostname_checks_common_name) ctx.hostname_checks_common_name = False self.assertFalse(ctx.hostname_checks_common_name) ctx.hostname_checks_common_name = True self.assertTrue(ctx.hostname_checks_common_name) else: with self.assertRaises(AttributeError): ctx.hostname_checks_common_name = True @requires_minimum_version @unittest.skipIf(IS_LIBRESSL, "see bpo-34001") def test_min_max_version(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # OpenSSL default is MINIMUM_SUPPORTED, however some vendors like # Fedora override the setting to TLS 1.0. minimum_range = { # stock OpenSSL ssl.TLSVersion.MINIMUM_SUPPORTED, # Fedora 29 uses TLS 1.0 by default ssl.TLSVersion.TLSv1, # RHEL 8 uses TLS 1.2 by default ssl.TLSVersion.TLSv1_2 } self.assertIn( ctx.minimum_version, minimum_range ) self.assertEqual( ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED ) ctx.minimum_version = ssl.TLSVersion.TLSv1_1 ctx.maximum_version = ssl.TLSVersion.TLSv1_2 self.assertEqual( ctx.minimum_version, ssl.TLSVersion.TLSv1_1 ) self.assertEqual( ctx.maximum_version, ssl.TLSVersion.TLSv1_2 ) ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED ctx.maximum_version = ssl.TLSVersion.TLSv1 self.assertEqual( ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED ) self.assertEqual( ctx.maximum_version, ssl.TLSVersion.TLSv1 ) ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED self.assertEqual( ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED ) ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED self.assertIn( ctx.maximum_version, {ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3} ) ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED self.assertIn( ctx.minimum_version, {ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3} ) with self.assertRaises(ValueError): ctx.minimum_version = 42 ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1) self.assertIn( ctx.minimum_version, minimum_range ) self.assertEqual( ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED ) with self.assertRaises(ValueError): ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED with self.assertRaises(ValueError): ctx.maximum_version = ssl.TLSVersion.TLSv1 @unittest.skipUnless(have_verify_flags(), "verify_flags need OpenSSL > 0.9.8") def test_verify_flags(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # default value tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0) self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf) ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF) ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN) ctx.verify_flags = ssl.VERIFY_DEFAULT self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT) # supports any value ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT) with self.assertRaises(TypeError): ctx.verify_flags = None def test_load_cert_chain(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # Combined key and cert in a single file ctx.load_cert_chain(CERTFILE, keyfile=None) ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE) self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE) with self.assertRaises(OSError) as cm: ctx.load_cert_chain(NONEXISTINGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(BADCERT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(EMPTYCERT) # Separate key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) ctx.load_cert_chain(ONLYCERT, ONLYKEY) ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY) ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(ONLYCERT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(ONLYKEY) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT) # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"): ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) ctx.load_cert_chain(CERTFILE_PROTECTED, password=bytearray(KEY_PASSWORD.encode())) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode()) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, bytearray(KEY_PASSWORD.encode())) with self.assertRaisesRegex(TypeError, "should be a string"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=True) with self.assertRaises(ssl.SSLError): ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass") with self.assertRaisesRegex(ValueError, "cannot be longer"): # openssl has a fixed limit on the password buffer. # PEM_BUFSIZE is generally set to 1kb. # Return a string larger than this. ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400) # Password callback def getpass_unicode(): return KEY_PASSWORD def getpass_bytes(): return KEY_PASSWORD.encode() def getpass_bytearray(): return bytearray(KEY_PASSWORD.encode()) def getpass_badpass(): return "badpass" def getpass_huge(): return b'a' * (1024 * 1024) def getpass_bad_type(): return 9 def getpass_exception(): raise Exception('getpass error') class GetPassCallable: def __call__(self): return KEY_PASSWORD def getpass(self): return KEY_PASSWORD ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode) ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes) ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray) ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable()) ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable().getpass) with self.assertRaises(ssl.SSLError): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass) with self.assertRaisesRegex(ValueError, "cannot be longer"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge) with self.assertRaisesRegex(TypeError, "must return a string"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type) with self.assertRaisesRegex(Exception, "getpass error"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception) # Make sure the password function isn't called if it isn't needed ctx.load_cert_chain(CERTFILE, password=getpass_exception) def test_load_verify_locations(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) ctx.load_verify_locations(CERTFILE) ctx.load_verify_locations(cafile=CERTFILE, capath=None) ctx.load_verify_locations(BYTES_CERTFILE) ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None) self.assertRaises(TypeError, ctx.load_verify_locations) self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None) with self.assertRaises(OSError) as cm: ctx.load_verify_locations(NONEXISTINGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_verify_locations(BADCERT) ctx.load_verify_locations(CERTFILE, CAPATH) ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH) # Issue #10989: crash if the second argument type is invalid self.assertRaises(TypeError, ctx.load_verify_locations, None, True) def test_load_verify_cadata(self): # test cadata with open(CAFILE_CACERT) as f: cacert_pem = f.read() cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem) with open(CAFILE_NEURONIO) as f: neuronio_pem = f.read() neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem) # test PEM ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0) ctx.load_verify_locations(cadata=cacert_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1) ctx.load_verify_locations(cadata=neuronio_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # cert already in hash table ctx.load_verify_locations(cadata=neuronio_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # combined ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) combined = "\n".join((cacert_pem, neuronio_pem)) ctx.load_verify_locations(cadata=combined) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # with junk around the certs ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) combined = ["head", cacert_pem, "other", neuronio_pem, "again", neuronio_pem, "tail"] ctx.load_verify_locations(cadata="\n".join(combined)) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # test DER ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_verify_locations(cadata=cacert_der) ctx.load_verify_locations(cadata=neuronio_der) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # cert already in hash table ctx.load_verify_locations(cadata=cacert_der) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # combined ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) combined = b"".join((cacert_der, neuronio_der)) ctx.load_verify_locations(cadata=combined) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # error cases ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object) with self.assertRaisesRegex(ssl.SSLError, "no start line"): ctx.load_verify_locations(cadata="broken") with self.assertRaisesRegex(ssl.SSLError, "not enough data"): ctx.load_verify_locations(cadata=b"broken") def test_load_dh_params(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) ctx.load_dh_params(DHFILE) if os.name != 'nt': ctx.load_dh_params(BYTES_DHFILE) self.assertRaises(TypeError, ctx.load_dh_params) self.assertRaises(TypeError, ctx.load_dh_params, None) with self.assertRaises(FileNotFoundError) as cm: ctx.load_dh_params(NONEXISTINGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaises(ssl.SSLError) as cm: ctx.load_dh_params(CERTFILE) def test_session_stats(self): for proto in PROTOCOLS: ctx = ssl.SSLContext(proto) self.assertEqual(ctx.session_stats(), { 'number': 0, 'connect': 0, 'connect_good': 0, 'connect_renegotiate': 0, 'accept': 0, 'accept_good': 0, 'accept_renegotiate': 0, 'hits': 0, 'misses': 0, 'timeouts': 0, 'cache_full': 0, }) def test_set_default_verify_paths(self): # There's not much we can do to test that it acts as expected, # so just check it doesn't crash or raise an exception. ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.set_default_verify_paths() @unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build") def test_set_ecdh_curve(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) ctx.set_ecdh_curve("prime256v1") ctx.set_ecdh_curve(b"prime256v1") self.assertRaises(TypeError, ctx.set_ecdh_curve) self.assertRaises(TypeError, ctx.set_ecdh_curve, None) self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo") self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo") @needs_sni def test_sni_callback(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # set_servername_callback expects a callable, or None self.assertRaises(TypeError, ctx.set_servername_callback) self.assertRaises(TypeError, ctx.set_servername_callback, 4) self.assertRaises(TypeError, ctx.set_servername_callback, "") self.assertRaises(TypeError, ctx.set_servername_callback, ctx) def dummycallback(sock, servername, ctx): pass ctx.set_servername_callback(None) ctx.set_servername_callback(dummycallback) @needs_sni def test_sni_callback_refcycle(self): # Reference cycles through the servername callback are detected # and cleared. ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) def dummycallback(sock, servername, ctx, cycle=ctx): pass ctx.set_servername_callback(dummycallback) wr = weakref.ref(ctx) del ctx, dummycallback gc.collect() self.assertIs(wr(), None) def test_cert_store_stats(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 0}) ctx.load_cert_chain(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 0}) ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) def test_get_ca_certs(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.get_ca_certs(), []) # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) # but CAFILE_CACERT is a CA cert ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), (('commonName', 'CA Cert Signing Authority'),), (('emailAddress', 'support@cacert.org'),)), 'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'), 'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'), 'serialNumber': '00', 'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',), 'subject': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), (('commonName', 'CA Cert Signing Authority'),), (('emailAddress', 'support@cacert.org'),)), 'version': 3}]) with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) def test_load_default_certs(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_default_certs() ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_default_certs(ssl.Purpose.SERVER_AUTH) ctx.load_default_certs() ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH) ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertRaises(TypeError, ctx.load_default_certs, None) self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH') @unittest.skipIf(sys.platform == "win32", "not-Windows specific") @unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars") def test_load_default_certs_env(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) with support.EnvironmentVarGuard() as env: env["SSL_CERT_DIR"] = CAPATH env["SSL_CERT_FILE"] = CERTFILE ctx.load_default_certs() self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0}) @unittest.skipUnless(sys.platform == "win32", "Windows specific") @unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs") def test_load_default_certs_env_windows(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_default_certs() stats = ctx.cert_store_stats() ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) with support.EnvironmentVarGuard() as env: env["SSL_CERT_DIR"] = CAPATH env["SSL_CERT_FILE"] = CERTFILE ctx.load_default_certs() stats["x509"] += 1 self.assertEqual(ctx.cert_store_stats(), stats) def _assert_context_options(self, ctx): self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) if OP_NO_COMPRESSION != 0: self.assertEqual(ctx.options & OP_NO_COMPRESSION, OP_NO_COMPRESSION) if OP_SINGLE_DH_USE != 0: self.assertEqual(ctx.options & OP_SINGLE_DH_USE, OP_SINGLE_DH_USE) if OP_SINGLE_ECDH_USE != 0: self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE, OP_SINGLE_ECDH_USE) if OP_CIPHER_SERVER_PREFERENCE != 0: self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE, OP_CIPHER_SERVER_PREFERENCE) def test_create_default_context(self): ctx = ssl.create_default_context() self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertTrue(ctx.check_hostname) self._assert_context_options(ctx) with open(SIGNING_CA) as f: cadata = f.read() ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH, cadata=cadata) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self._assert_context_options(ctx) ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self._assert_context_options(ctx) def test__create_stdlib_context(self): ctx = ssl._create_stdlib_context() self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertFalse(ctx.check_hostname) self._assert_context_options(ctx) ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self._assert_context_options(ctx) ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1, cert_reqs=ssl.CERT_REQUIRED, check_hostname=True) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertTrue(ctx.check_hostname) self._assert_context_options(ctx) ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self._assert_context_options(ctx) def test_check_hostname(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS) self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) # Auto set CERT_REQUIRED ctx.check_hostname = True self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) ctx.check_hostname = False ctx.verify_mode = ssl.CERT_REQUIRED self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) # Changing verify_mode does not affect check_hostname ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE ctx.check_hostname = False self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) # Auto set ctx.check_hostname = True self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) ctx.check_hostname = False ctx.verify_mode = ssl.CERT_OPTIONAL ctx.check_hostname = False self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL) # keep CERT_OPTIONAL ctx.check_hostname = True self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL) # Cannot set CERT_NONE with check_hostname enabled with self.assertRaises(ValueError): ctx.verify_mode = ssl.CERT_NONE ctx.check_hostname = False self.assertFalse(ctx.check_hostname) ctx.verify_mode = ssl.CERT_NONE self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) def test_context_client_server(self): # PROTOCOL_TLS_CLIENT has sane defaults ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) # PROTOCOL_TLS_SERVER has different but also sane defaults ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) def test_context_custom_class(self): class MySSLSocket(ssl.SSLSocket): pass class MySSLObject(ssl.SSLObject): pass ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) ctx.sslsocket_class = MySSLSocket ctx.sslobject_class = MySSLObject with ctx.wrap_socket(socket.socket(), server_side=True) as sock: self.assertIsInstance(sock, MySSLSocket) obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO()) self.assertIsInstance(obj, MySSLObject) @unittest.skipUnless(IS_OPENSSL_1_1_1, "Test requires OpenSSL 1.1.1") def test_num_tickest(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) self.assertEqual(ctx.num_tickets, 2) ctx.num_tickets = 1 self.assertEqual(ctx.num_tickets, 1) ctx.num_tickets = 0 self.assertEqual(ctx.num_tickets, 0) with self.assertRaises(ValueError): ctx.num_tickets = -1 with self.assertRaises(TypeError): ctx.num_tickets = None ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.num_tickets, 2) with self.assertRaises(ValueError): ctx.num_tickets = 1 class SSLErrorTests(unittest.TestCase): def test_str(self): # The str() of a SSLError doesn't include the errno e = ssl.SSLError(1, "foo") self.assertEqual(str(e), "foo") self.assertEqual(e.errno, 1) # Same for a subclass e = ssl.SSLZeroReturnError(1, "foo") self.assertEqual(str(e), "foo") self.assertEqual(e.errno, 1) def test_lib_reason(self): # Test the library and reason attributes ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) with self.assertRaises(ssl.SSLError) as cm: ctx.load_dh_params(CERTFILE) self.assertEqual(cm.exception.library, 'PEM') self.assertEqual(cm.exception.reason, 'NO_START_LINE') s = str(cm.exception) self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s) def test_subclass(self): # Check that the appropriate SSLError subclass is raised # (this only tests one of them) ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE with socket.create_server(("127.0.0.1", 0)) as s: c = socket.create_connection(s.getsockname()) c.setblocking(False) with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c: with self.assertRaises(ssl.SSLWantReadError) as cm: c.do_handshake() s = str(cm.exception) self.assertTrue(s.startswith("The operation did not complete (read)"), s) # For compatibility self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ) def test_bad_server_hostname(self): ctx = ssl.create_default_context() with self.assertRaises(ValueError): ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(), server_hostname="") with self.assertRaises(ValueError): ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(), server_hostname=".example.org") with self.assertRaises(TypeError): ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(), server_hostname="example.org\x00evil.com") class MemoryBIOTests(unittest.TestCase): def test_read_write(self): bio = ssl.MemoryBIO() bio.write(b'foo') self.assertEqual(bio.read(), b'foo') self.assertEqual(bio.read(), b'') bio.write(b'foo') bio.write(b'bar') self.assertEqual(bio.read(), b'foobar') self.assertEqual(bio.read(), b'') bio.write(b'baz') self.assertEqual(bio.read(2), b'ba') self.assertEqual(bio.read(1), b'z') self.assertEqual(bio.read(1), b'') def test_eof(self): bio = ssl.MemoryBIO() self.assertFalse(bio.eof) self.assertEqual(bio.read(), b'') self.assertFalse(bio.eof) bio.write(b'foo') self.assertFalse(bio.eof) bio.write_eof() self.assertFalse(bio.eof) self.assertEqual(bio.read(2), b'fo') self.assertFalse(bio.eof) self.assertEqual(bio.read(1), b'o') self.assertTrue(bio.eof) self.assertEqual(bio.read(), b'') self.assertTrue(bio.eof) def test_pending(self): bio = ssl.MemoryBIO() self.assertEqual(bio.pending, 0) bio.write(b'foo') self.assertEqual(bio.pending, 3) for i in range(3): bio.read(1) self.assertEqual(bio.pending, 3-i-1) for i in range(3): bio.write(b'x') self.assertEqual(bio.pending, i+1) bio.read() self.assertEqual(bio.pending, 0) def test_buffer_types(self): bio = ssl.MemoryBIO() bio.write(b'foo') self.assertEqual(bio.read(), b'foo') bio.write(bytearray(b'bar')) self.assertEqual(bio.read(), b'bar') bio.write(memoryview(b'baz')) self.assertEqual(bio.read(), b'baz') def test_error_types(self): bio = ssl.MemoryBIO() self.assertRaises(TypeError, bio.write, 'foo') self.assertRaises(TypeError, bio.write, None) self.assertRaises(TypeError, bio.write, True) self.assertRaises(TypeError, bio.write, 1) class SSLObjectTests(unittest.TestCase): def test_private_init(self): bio = ssl.MemoryBIO() with self.assertRaisesRegex(TypeError, "public constructor"): ssl.SSLObject(bio, bio) def test_unwrap(self): client_ctx, server_ctx, hostname = testing_context() c_in = ssl.MemoryBIO() c_out = ssl.MemoryBIO() s_in = ssl.MemoryBIO() s_out = ssl.MemoryBIO() client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname) server = server_ctx.wrap_bio(s_in, s_out, server_side=True) # Loop on the handshake for a bit to get it settled for _ in range(5): try: client.do_handshake() except ssl.SSLWantReadError: pass if c_out.pending: s_in.write(c_out.read()) try: server.do_handshake() except ssl.SSLWantReadError: pass if s_out.pending: c_in.write(s_out.read()) # Now the handshakes should be complete (don't raise WantReadError) client.do_handshake() server.do_handshake() # Now if we unwrap one side unilaterally, it should send close-notify # and raise WantReadError: with self.assertRaises(ssl.SSLWantReadError): client.unwrap() # But server.unwrap() does not raise, because it reads the client's # close-notify: s_in.write(c_out.read()) server.unwrap() # And now that the client gets the server's close-notify, it doesn't # raise either. c_in.write(s_out.read()) client.unwrap() class SimpleBackgroundTests(unittest.TestCase): """Tests that connect to a simple server running in the background""" def setUp(self): server = ThreadedEchoServer(SIGNED_CERTFILE) self.server_addr = (HOST, server.port) server.__enter__() self.addCleanup(server.__exit__, None, None, None) def test_connect(self): with test_wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) as s: s.connect(self.server_addr) self.assertEqual({}, s.getpeercert()) self.assertFalse(s.server_side) # this should succeed because we specify the root cert with test_wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=SIGNING_CA) as s: s.connect(self.server_addr) self.assertTrue(s.getpeercert()) self.assertFalse(s.server_side) def test_connect_fail(self): # This should fail because we have no verification certs. Connection # failure crashes ThreadedEchoServer, so run this in an independent # test method. s = test_wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.addCleanup(s.close) self.assertRaisesRegex(ssl.SSLError, "certificate verify failed", s.connect, self.server_addr) def test_connect_ex(self): # Issue #11326: check connect_ex() implementation s = test_wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=SIGNING_CA) self.addCleanup(s.close) self.assertEqual(0, s.connect_ex(self.server_addr)) self.assertTrue(s.getpeercert()) def test_non_blocking_connect_ex(self): # Issue #11326: non-blocking connect_ex() should allow handshake # to proceed after the socket gets ready. s = test_wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=SIGNING_CA, do_handshake_on_connect=False) self.addCleanup(s.close) s.setblocking(False) rc = s.connect_ex(self.server_addr) # EWOULDBLOCK under Windows, EINPROGRESS elsewhere self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK)) # Wait for connect to finish select.select([], [s], [], 5.0) # Non-blocking handshake while True: try: s.do_handshake() break except ssl.SSLWantReadError: select.select([s], [], [], 5.0) except ssl.SSLWantWriteError: select.select([], [s], [], 5.0) # SSL established self.assertTrue(s.getpeercert()) def test_connect_with_context(self): # Same as test_connect, but with a separately created context ctx = ssl.SSLContext(ssl.PROTOCOL_TLS) with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s: s.connect(self.server_addr) self.assertEqual({}, s.getpeercert()) # Same with a server hostname with ctx.wrap_socket(socket.socket(socket.AF_INET), server_hostname="dummy") as s: s.connect(self.server_addr) ctx.verify_mode = ssl.CERT_REQUIRED # This should succeed because we specify the root cert ctx.load_verify_locations(SIGNING_CA) with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s: s.connect(self.server_addr) cert = s.getpeercert() self.assertTrue(cert) def test_connect_with_context_fail(self): # This should fail because we have no verification certs. Connection # failure crashes ThreadedEchoServer, so run this in an independent # test method. ctx = ssl.SSLContext(ssl.PROTOCOL_TLS) ctx.verify_mode = ssl.CERT_REQUIRED s = ctx.wrap_socket(socket.socket(socket.AF_INET)) self.addCleanup(s.close) self.assertRaisesRegex(ssl.SSLError, "certificate verify failed", s.connect, self.server_addr) def test_connect_capath(self): # Verify server certificates using the `capath` argument # NOTE: the subject hashing algorithm has been changed between # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must # contain both versions of each certificate (same content, different # filename) for this test to be portable across OpenSSL releases. ctx = ssl.SSLContext(ssl.PROTOCOL_TLS) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s: s.connect(self.server_addr) cert = s.getpeercert() self.assertTrue(cert) # Same with a bytes `capath` argument ctx = ssl.SSLContext(ssl.PROTOCOL_TLS) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=BYTES_CAPATH) with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s: s.connect(self.server_addr) cert = s.getpeercert() self.assertTrue(cert) def test_connect_cadata(self): with open(SIGNING_CA) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) ctx = ssl.SSLContext(ssl.PROTOCOL_TLS) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=pem) with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s: s.connect(self.server_addr) cert = s.getpeercert() self.assertTrue(cert) # same with DER ctx = ssl.SSLContext(ssl.PROTOCOL_TLS) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=der) with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s: s.connect(self.server_addr) cert = s.getpeercert() self.assertTrue(cert) @unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows") def test_makefile_close(self): # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). ss = test_wrap_socket(socket.socket(socket.AF_INET)) ss.connect(self.server_addr) fd = ss.fileno() f = ss.makefile() f.close() # The fd is still open os.read(fd, 0) # Closing the SSL socket should close the fd too ss.close() gc.collect() with self.assertRaises(OSError) as e: os.read(fd, 0) self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): s = socket.socket(socket.AF_INET) s.connect(self.server_addr) s.setblocking(False) s = test_wrap_socket(s, cert_reqs=ssl.CERT_NONE, do_handshake_on_connect=False) self.addCleanup(s.close) count = 0 while True: try: count += 1 s.do_handshake() break except ssl.SSLWantReadError: select.select([s], [], []) except ssl.SSLWantWriteError: select.select([], [s], []) if support.verbose: sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count) def test_get_server_certificate(self): _test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA) def test_get_server_certificate_fail(self): # Connection failure crashes ThreadedEchoServer, so run this in an # independent test method _test_get_server_certificate_fail(self, *self.server_addr) def test_ciphers(self): with test_wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s: s.connect(self.server_addr) with test_wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s: s.connect(self.server_addr) # Error checking can happen at instantiation or when connecting with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"): with socket.socket(socket.AF_INET) as sock: s = test_wrap_socket(sock, cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx") s.connect(self.server_addr) def test_get_ca_certs_capath(self): # capath certs are loaded on request ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_verify_locations(capath=CAPATH) self.assertEqual(ctx.get_ca_certs(), []) with ctx.wrap_socket(socket.socket(socket.AF_INET), server_hostname='localhost') as s: s.connect(self.server_addr) cert = s.getpeercert() self.assertTrue(cert) self.assertEqual(len(ctx.get_ca_certs()), 1) @needs_sni def test_context_setget(self): # Check that the context of a connected socket can be replaced. ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx1.load_verify_locations(capath=CAPATH) ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx2.load_verify_locations(capath=CAPATH) s = socket.socket(socket.AF_INET) with ctx1.wrap_socket(s, server_hostname='localhost') as ss: ss.connect(self.server_addr) self.assertIs(ss.context, ctx1) self.assertIs(ss._sslobj.context, ctx1) ss.context = ctx2 self.assertIs(ss.context, ctx2) self.assertIs(ss._sslobj.context, ctx2) def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs): # A simple IO loop. Call func(*args) depending on the error we get # (WANT_READ or WANT_WRITE) move data between the socket and the BIOs. timeout = kwargs.get('timeout', 10) deadline = time.monotonic() + timeout count = 0 while True: if time.monotonic() > deadline: self.fail("timeout") errno = None count += 1 try: ret = func(*args) except ssl.SSLError as e: if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): raise errno = e.errno # Get any data from the outgoing BIO irrespective of any error, and # send it to the socket. buf = outgoing.read() sock.sendall(buf) # If there's no error, we're done. For WANT_READ, we need to get # data from the socket and put it in the incoming BIO. if errno is None: break elif errno == ssl.SSL_ERROR_WANT_READ: buf = sock.recv(32768) if buf: incoming.write(buf) else: incoming.write_eof() if support.verbose: sys.stdout.write("Needed %d calls to complete %s().\n" % (count, func.__name__)) return ret def test_bio_handshake(self): sock = socket.socket(socket.AF_INET) self.addCleanup(sock.close) sock.connect(self.server_addr) incoming = ssl.MemoryBIO() outgoing = ssl.MemoryBIO() ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) ctx.load_verify_locations(SIGNING_CA) sslobj = ctx.wrap_bio(incoming, outgoing, False, SIGNED_CERTFILE_HOSTNAME) self.assertIs(sslobj._sslobj.owner, sslobj) self.assertIsNone(sslobj.cipher()) self.assertIsNone(sslobj.version()) self.assertIsNotNone(sslobj.shared_ciphers()) self.assertRaises(ValueError, sslobj.getpeercert) if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES: self.assertIsNone(sslobj.get_channel_binding('tls-unique')) self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake) self.assertTrue(sslobj.cipher()) self.assertIsNotNone(sslobj.shared_ciphers()) self.assertIsNotNone(sslobj.version()) self.assertTrue(sslobj.getpeercert()) if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES: self.assertTrue(sslobj.get_channel_binding('tls-unique')) try: self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap) except ssl.SSLSyscallError: # If the server shuts down the TCP connection without sending a # secure shutdown message, this is reported as SSL_ERROR_SYSCALL pass self.assertRaises(ssl.SSLError, sslobj.write, b'foo') def test_bio_read_write_data(self): sock = socket.socket(socket.AF_INET) self.addCleanup(sock.close) sock.connect(self.server_addr) incoming = ssl.MemoryBIO() outgoing = ssl.MemoryBIO() ctx = ssl.SSLContext(ssl.PROTOCOL_TLS) ctx.verify_mode = ssl.CERT_NONE sslobj = ctx.wrap_bio(incoming, outgoing, False) self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake) req = b'FOO\n' self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req) buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024) self.assertEqual(buf, b'foo\n') self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap) class NetworkedTests(unittest.TestCase): def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). with support.transient_internet(REMOTE_HOST): s = test_wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, do_handshake_on_connect=False) self.addCleanup(s.close) s.settimeout(0.0000001) rc = s.connect_ex((REMOTE_HOST, 443)) if rc == 0: self.skipTest("REMOTE_HOST responded too quickly") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) @unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6') def test_get_server_certificate_ipv6(self): with support.transient_internet('ipv6.google.com'): _test_get_server_certificate(self, 'ipv6.google.com', 443) _test_get_server_certificate_fail(self, 'ipv6.google.com', 443) def _test_get_server_certificate(test, host, port, cert=None): pem = ssl.get_server_certificate((host, port)) if not pem: test.fail("No server certificate on %s:%s!" % (host, port)) pem = ssl.get_server_certificate((host, port), ca_certs=cert) if not pem: test.fail("No server certificate on %s:%s!" % (host, port)) if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) def _test_get_server_certificate_fail(test, host, port): try: pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE) except ssl.SSLError as x: #should fail if support.verbose: sys.stdout.write("%s\n" % x) else: test.fail("Got server certificate %s for %s:%s!" % (pem, host, port)) from test.ssl_servers import make_https_server class ThreadedEchoServer(threading.Thread): class ConnectionHandler(threading.Thread): """A mildly complicated class, because we want it to work both with and without the SSL wrapper around the socket connection, so that we can test the STARTTLS functionality.""" def __init__(self, server, connsock, addr): self.server = server self.running = False self.sock = connsock self.addr = addr self.sock.setblocking(1) self.sslconn = None threading.Thread.__init__(self) self.daemon = True def wrap_conn(self): try: self.sslconn = self.server.context.wrap_socket( self.sock, server_side=True) self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol()) self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol()) except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e: # We treat ConnectionResetError as though it were an # SSLError - OpenSSL on Ubuntu abruptly closes the # connection when asked to use an unsupported protocol. # # BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL # tries to send session tickets after handshake. # https://github.com/openssl/openssl/issues/6342 # # ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL # tries to send session tickets after handshake when using WinSock. self.server.conn_errors.append(str(e)) if self.server.chatty: handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n") self.running = False self.close() return False except (ssl.SSLError, OSError) as e: # OSError may occur with wrong protocols, e.g. both # sides use PROTOCOL_TLS_SERVER. # # XXX Various errors can have happened here, for example # a mismatching protocol version, an invalid certificate, # or a low-level bug. This should be made more discriminating. # # bpo-31323: Store the exception as string to prevent # a reference leak: server -> conn_errors -> exception # -> traceback -> self (ConnectionHandler) -> server self.server.conn_errors.append(str(e)) if self.server.chatty: handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n") self.running = False self.server.stop() self.close() return False else: self.server.shared_ciphers.append(self.sslconn.shared_ciphers()) if self.server.context.verify_mode == ssl.CERT_REQUIRED: cert = self.sslconn.getpeercert() if support.verbose and self.server.chatty: sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n") cert_binary = self.sslconn.getpeercert(True) if support.verbose and self.server.chatty: sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n") cipher = self.sslconn.cipher() if support.verbose and self.server.chatty: sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n") sys.stdout.write(" server: selected protocol is now " + str(self.sslconn.selected_npn_protocol()) + "\n") return True def read(self): if self.sslconn: return self.sslconn.read() else: return self.sock.recv(1024) def write(self, bytes): if self.sslconn: return self.sslconn.write(bytes) else: return self.sock.send(bytes) def close(self): if self.sslconn: self.sslconn.close() else: self.sock.close() def run(self): self.running = True if not self.server.starttls_server: if not self.wrap_conn(): return while self.running: try: msg = self.read() stripped = msg.strip() if not stripped: # eof, so quit this handler self.running = False try: self.sock = self.sslconn.unwrap() except OSError: # Many tests shut the TCP connection down # without an SSL shutdown. This causes # unwrap() to raise OSError with errno=0! pass else: self.sslconn = None self.close() elif stripped == b'over': if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: client closed connection\n") self.close() return elif (self.server.starttls_server and stripped == b'STARTTLS'): if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: read STARTTLS from client, sending OK...\n") self.write(b"OK\n") if not self.wrap_conn(): return elif (self.server.starttls_server and self.sslconn and stripped == b'ENDTLS'): if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: read ENDTLS from client, sending OK...\n") self.write(b"OK\n") self.sock = self.sslconn.unwrap() self.sslconn = None if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: connection is now unencrypted...\n") elif stripped == b'CB tls-unique': if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n") data = self.sslconn.get_channel_binding("tls-unique") self.write(repr(data).encode("us-ascii") + b"\n") elif stripped == b'PHA': if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: initiating post handshake auth\n") try: self.sslconn.verify_client_post_handshake() except ssl.SSLError as e: self.write(repr(e).encode("us-ascii") + b"\n") else: self.write(b"OK\n") elif stripped == b'HASCERT': if self.sslconn.getpeercert() is not None: self.write(b'TRUE\n') else: self.write(b'FALSE\n') elif stripped == b'GETCERT': cert = self.sslconn.getpeercert() self.write(repr(cert).encode("us-ascii") + b"\n") else: if (support.verbose and self.server.connectionchatty): ctype = (self.sslconn and "encrypted") or "unencrypted" sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n" % (msg, ctype, msg.lower(), ctype)) self.write(msg.lower()) except (ConnectionResetError, ConnectionAbortedError): # XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError # when connection is not shut down gracefully. if self.server.chatty and support.verbose: sys.stdout.write( " Connection reset by peer: {}\n".format( self.addr) ) self.close() self.running = False except ssl.SSLError as err: # On Windows sometimes test_pha_required_nocert receives the # PEER_DID_NOT_RETURN_A_CERTIFICATE exception # before the 'tlsv13 alert certificate required' exception. # If the server is stopped when PEER_DID_NOT_RETURN_A_CERTIFICATE # is received test_pha_required_nocert fails with ConnectionResetError # because the underlying socket is closed if 'PEER_DID_NOT_RETURN_A_CERTIFICATE' == err.reason: if self.server.chatty and support.verbose: sys.stdout.write(err.args[1]) # test_pha_required_nocert is expecting this exception raise ssl.SSLError('tlsv13 alert certificate required') except OSError: if self.server.chatty: handle_error("Test server failure:\n") self.close() self.running = False # normally, we'd just stop here, but for the test # harness, we want to stop the server self.server.stop() def __init__(self, certificate=None, ssl_version=None, certreqs=None, cacerts=None, chatty=True, connectionchatty=False, starttls_server=False, npn_protocols=None, alpn_protocols=None, ciphers=None, context=None): if context: self.context = context else: self.context = ssl.SSLContext(ssl_version if ssl_version is not None else ssl.PROTOCOL_TLS_SERVER) self.context.verify_mode = (certreqs if certreqs is not None else ssl.CERT_NONE) if cacerts: self.context.load_verify_locations(cacerts) if certificate: self.context.load_cert_chain(certificate) if npn_protocols: self.context.set_npn_protocols(npn_protocols) if alpn_protocols: self.context.set_alpn_protocols(alpn_protocols) if ciphers: self.context.set_ciphers(ciphers) self.chatty = chatty self.connectionchatty = connectionchatty self.starttls_server = starttls_server self.sock = socket.socket() self.port = support.bind_port(self.sock) self.flag = None self.active = False self.selected_npn_protocols = [] self.selected_alpn_protocols = [] self.shared_ciphers = [] self.conn_errors = [] threading.Thread.__init__(self) self.daemon = True def __enter__(self): self.start(threading.Event()) self.flag.wait() return self def __exit__(self, *args): self.stop() self.join() def start(self, flag=None): self.flag = flag threading.Thread.start(self) def run(self): self.sock.settimeout(0.05) self.sock.listen() self.active = True if self.flag: # signal an event self.flag.set() while self.active: try: newconn, connaddr = self.sock.accept() if support.verbose and self.chatty: sys.stdout.write(' server: new connection from ' + repr(connaddr) + '\n') handler = self.ConnectionHandler(self, newconn, connaddr) handler.start() handler.join() except socket.timeout: pass except KeyboardInterrupt: self.stop() except BaseException as e: if support.verbose and self.chatty: sys.stdout.write( ' connection handling failed: ' + repr(e) + '\n') self.sock.close() def stop(self): self.active = False class AsyncoreEchoServer(threading.Thread): # this one's based on asyncore.dispatcher class EchoServer (asyncore.dispatcher): class ConnectionHandler(asyncore.dispatcher_with_send): def __init__(self, conn, certfile): self.socket = test_wrap_socket(conn, server_side=True, certfile=certfile, do_handshake_on_connect=False) asyncore.dispatcher_with_send.__init__(self, self.socket) self._ssl_accepting = True self._do_ssl_handshake() def readable(self): if isinstance(self.socket, ssl.SSLSocket): while self.socket.pending() > 0: self.handle_read_event() return True def _do_ssl_handshake(self): try: self.socket.do_handshake() except (ssl.SSLWantReadError, ssl.SSLWantWriteError): return except ssl.SSLEOFError: return self.handle_close() except ssl.SSLError: raise except OSError as err: if err.args[0] == errno.ECONNABORTED: return self.handle_close() else: self._ssl_accepting = False def handle_read(self): if self._ssl_accepting: self._do_ssl_handshake() else: data = self.recv(1024) if support.verbose: sys.stdout.write(" server: read %s from client\n" % repr(data)) if not data: self.close() else: self.send(data.lower()) def handle_close(self): self.close() if support.verbose: sys.stdout.write(" server: closed connection %s\n" % self.socket) def handle_error(self): raise def __init__(self, certfile): self.certfile = certfile sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.port = support.bind_port(sock, '') asyncore.dispatcher.__init__(self, sock) self.listen(5) def handle_accepted(self, sock_obj, addr): if support.verbose: sys.stdout.write(" server: new connection from %s:%s\n" %addr) self.ConnectionHandler(sock_obj, self.certfile) def handle_error(self): raise def __init__(self, certfile): self.flag = None self.active = False self.server = self.EchoServer(certfile) self.port = self.server.port threading.Thread.__init__(self) self.daemon = True def __str__(self): return "<%s %s>" % (self.__class__.__name__, self.server) def __enter__(self): self.start(threading.Event()) self.flag.wait() return self def __exit__(self, *args): if support.verbose: sys.stdout.write(" cleanup: stopping server.\n") self.stop() if support.verbose: sys.stdout.write(" cleanup: joining server thread.\n") self.join() if support.verbose: sys.stdout.write(" cleanup: successfully joined.\n") # make sure that ConnectionHandler is removed from socket_map asyncore.close_all(ignore_all=True) def start (self, flag=None): self.flag = flag threading.Thread.start(self) def run(self): self.active = True if self.flag: self.flag.set() while self.active: try: asyncore.loop(1) except: pass def stop(self): self.active = False self.server.close() def server_params_test(client_context, server_context, indata=b"FOO\n", chatty=True, connectionchatty=False, sni_name=None, session=None): """ Launch a server, connect a client to it and try various reads and writes. """ stats = {} server = ThreadedEchoServer(context=server_context, chatty=chatty, connectionchatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=sni_name, session=session) as s: s.connect((HOST, server.port)) for arg in [indata, bytearray(indata), memoryview(indata)]: if connectionchatty: if support.verbose: sys.stdout.write( " client: sending %r...\n" % indata) s.write(arg) outdata = s.read() if connectionchatty: if support.verbose: sys.stdout.write(" client: read %r\n" % outdata) if outdata != indata.lower(): raise AssertionError( "bad data <<%r>> (%d) received; expected <<%r>> (%d)\n" % (outdata[:20], len(outdata), indata[:20].lower(), len(indata))) s.write(b"over\n") if connectionchatty: if support.verbose: sys.stdout.write(" client: closing connection.\n") stats.update({ 'compression': s.compression(), 'cipher': s.cipher(), 'peercert': s.getpeercert(), 'client_alpn_protocol': s.selected_alpn_protocol(), 'client_npn_protocol': s.selected_npn_protocol(), 'version': s.version(), 'session_reused': s.session_reused, 'session': s.session, }) s.close() stats['server_alpn_protocols'] = server.selected_alpn_protocols stats['server_npn_protocols'] = server.selected_npn_protocols stats['server_shared_ciphers'] = server.shared_ciphers return stats def try_protocol_combo(server_protocol, client_protocol, expect_success, certsreqs=None, server_options=0, client_options=0): """ Try to SSL-connect using *client_protocol* to *server_protocol*. If *expect_success* is true, assert that the connection succeeds, if it's false, assert that the connection fails. Also, if *expect_success* is a string, assert that it is the protocol version actually used by the connection. """ if certsreqs is None: certsreqs = ssl.CERT_NONE certtype = { ssl.CERT_NONE: "CERT_NONE", ssl.CERT_OPTIONAL: "CERT_OPTIONAL", ssl.CERT_REQUIRED: "CERT_REQUIRED", }[certsreqs] if support.verbose: formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n" sys.stdout.write(formatstr % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol), certtype)) client_context = ssl.SSLContext(client_protocol) client_context.options |= client_options server_context = ssl.SSLContext(server_protocol) server_context.options |= server_options min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None) if (min_version is not None # SSLContext.minimum_version is only available on recent OpenSSL # (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1) and hasattr(server_context, 'minimum_version') and server_protocol == ssl.PROTOCOL_TLS and server_context.minimum_version > min_version): # If OpenSSL configuration is strict and requires more recent TLS # version, we have to change the minimum to test old TLS versions. server_context.minimum_version = min_version # NOTE: we must enable "ALL" ciphers on the client, otherwise an # SSLv23 client will send an SSLv3 hello (rather than SSLv2) # starting from OpenSSL 1.0.0 (see issue #8322). if client_context.protocol == ssl.PROTOCOL_TLS: client_context.set_ciphers("ALL") for ctx in (client_context, server_context): ctx.verify_mode = certsreqs ctx.load_cert_chain(SIGNED_CERTFILE) ctx.load_verify_locations(SIGNING_CA) try: stats = server_params_test(client_context, server_context, chatty=False, connectionchatty=False) # Protocol mismatch can result in either an SSLError, or a # "Connection reset by peer" error. except ssl.SSLError: if expect_success: raise except OSError as e: if expect_success or e.errno != errno.ECONNRESET: raise else: if not expect_success: raise AssertionError( "Client protocol %s succeeded with server protocol %s!" % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol))) elif (expect_success is not True and expect_success != stats['version']): raise AssertionError("version mismatch: expected %r, got %r" % (expect_success, stats['version'])) class ThreadedTests(unittest.TestCase): def test_echo(self): """Basic test of an SSL client connecting to a server""" if support.verbose: sys.stdout.write("\n") for protocol in PROTOCOLS: if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}: continue if not has_tls_protocol(protocol): continue with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]): context = ssl.SSLContext(protocol) context.load_cert_chain(CERTFILE) server_params_test(context, context, chatty=True, connectionchatty=True) client_context, server_context, hostname = testing_context() with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER): server_params_test(client_context=client_context, server_context=server_context, chatty=True, connectionchatty=True, sni_name=hostname) client_context.check_hostname = False with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT): with self.assertRaises(ssl.SSLError) as e: server_params_test(client_context=server_context, server_context=client_context, chatty=True, connectionchatty=True, sni_name=hostname) self.assertIn('called a function you should not call', str(e.exception)) with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER): with self.assertRaises(ssl.SSLError) as e: server_params_test(client_context=server_context, server_context=server_context, chatty=True, connectionchatty=True) self.assertIn('called a function you should not call', str(e.exception)) with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT): with self.assertRaises(ssl.SSLError) as e: server_params_test(client_context=server_context, server_context=client_context, chatty=True, connectionchatty=True) self.assertIn('called a function you should not call', str(e.exception)) def test_getpeercert(self): if support.verbose: sys.stdout.write("\n") client_context, server_context, hostname = testing_context() server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), do_handshake_on_connect=False, server_hostname=hostname) as s: s.connect((HOST, server.port)) # getpeercert() raise ValueError while the handshake isn't # done. with self.assertRaises(ValueError): s.getpeercert() s.do_handshake() cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") cipher = s.cipher() if support.verbose: sys.stdout.write(pprint.pformat(cert) + '\n') sys.stdout.write("Connection cipher is " + str(cipher) + '.\n') if 'subject' not in cert: self.fail("No subject field in certificate: %s." % pprint.pformat(cert)) if ((('organizationName', 'Python Software Foundation'),) not in cert['subject']): self.fail( "Missing or invalid 'organizationName' field in certificate subject; " "should be 'Python Software Foundation'.") self.assertIn('notBefore', cert) self.assertIn('notAfter', cert) before = ssl.cert_time_to_seconds(cert['notBefore']) after = ssl.cert_time_to_seconds(cert['notAfter']) self.assertLess(before, after) @unittest.skipUnless(have_verify_flags(), "verify_flags need OpenSSL > 0.9.8") def test_crl_check(self): if support.verbose: sys.stdout.write("\n") client_context, server_context, hostname = testing_context() tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0) self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf) # VERIFY_DEFAULT should pass server = ThreadedEchoServer(context=server_context, chatty=True) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") # VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF server = ThreadedEchoServer(context=server_context, chatty=True) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: with self.assertRaisesRegex(ssl.SSLError, "certificate verify failed"): s.connect((HOST, server.port)) # now load a CRL file. The CRL file is signed by the CA. client_context.load_verify_locations(CRLFILE) server = ThreadedEchoServer(context=server_context, chatty=True) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") def test_check_hostname(self): if support.verbose: sys.stdout.write("\n") client_context, server_context, hostname = testing_context() # correct hostname should verify server = ThreadedEchoServer(context=server_context, chatty=True) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") # incorrect hostname should raise an exception server = ThreadedEchoServer(context=server_context, chatty=True) with server: with client_context.wrap_socket(socket.socket(), server_hostname="invalid") as s: with self.assertRaisesRegex( ssl.CertificateError, "Hostname mismatch, certificate is not valid for 'invalid'."): s.connect((HOST, server.port)) # missing server_hostname arg should cause an exception, too server = ThreadedEchoServer(context=server_context, chatty=True) with server: with socket.socket() as s: with self.assertRaisesRegex(ValueError, "check_hostname requires server_hostname"): client_context.wrap_socket(s) def test_ecc_cert(self): client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) client_context.load_verify_locations(SIGNING_CA) client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA') hostname = SIGNED_CERTFILE_ECC_HOSTNAME server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # load ECC cert server_context.load_cert_chain(SIGNED_CERTFILE_ECC) # correct hostname should verify server = ThreadedEchoServer(context=server_context, chatty=True) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") cipher = s.cipher()[0].split('-') self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA')) def test_dual_rsa_ecc(self): client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) client_context.load_verify_locations(SIGNING_CA) # TODO: fix TLSv1.3 once SSLContext can restrict signature # algorithms. client_context.options |= ssl.OP_NO_TLSv1_3 # only ECDSA certs client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA') hostname = SIGNED_CERTFILE_ECC_HOSTNAME server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # load ECC and RSA key/cert pairs server_context.load_cert_chain(SIGNED_CERTFILE_ECC) server_context.load_cert_chain(SIGNED_CERTFILE) # correct hostname should verify server = ThreadedEchoServer(context=server_context, chatty=True) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") cipher = s.cipher()[0].split('-') self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA')) def test_check_hostname_idn(self): if support.verbose: sys.stdout.write("\n") server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) server_context.load_cert_chain(IDNSANSFILE) context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) context.verify_mode = ssl.CERT_REQUIRED context.check_hostname = True context.load_verify_locations(SIGNING_CA) # correct hostname should verify, when specified in several # different ways idn_hostnames = [ ('könig.idn.pythontest.net', 'xn--knig-5qa.idn.pythontest.net'), ('xn--knig-5qa.idn.pythontest.net', 'xn--knig-5qa.idn.pythontest.net'), (b'xn--knig-5qa.idn.pythontest.net', 'xn--knig-5qa.idn.pythontest.net'), ('königsgäßchen.idna2003.pythontest.net', 'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'), ('xn--knigsgsschen-lcb0w.idna2003.pythontest.net', 'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'), (b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net', 'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'), # ('königsgäßchen.idna2008.pythontest.net', # 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'), ('xn--knigsgchen-b4a3dun.idna2008.pythontest.net', 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'), (b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net', 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'), ] for server_hostname, expected_hostname in idn_hostnames: server = ThreadedEchoServer(context=server_context, chatty=True) with server: with context.wrap_socket(socket.socket(), server_hostname=server_hostname) as s: self.assertEqual(s.server_hostname, expected_hostname) s.connect((HOST, server.port)) cert = s.getpeercert() self.assertEqual(s.server_hostname, expected_hostname) self.assertTrue(cert, "Can't get peer certificate.") # incorrect hostname should raise an exception server = ThreadedEchoServer(context=server_context, chatty=True) with server: with context.wrap_socket(socket.socket(), server_hostname="python.example.org") as s: with self.assertRaises(ssl.CertificateError): s.connect((HOST, server.port)) def test_wrong_cert_tls12(self): """Connecting when the server rejects the client's certificate Launch a server with CERT_REQUIRED, and check that trying to connect to it with a wrong client certificate fails. """ client_context, server_context, hostname = testing_context() # load client cert that is not signed by trusted CA client_context.load_cert_chain(CERTFILE) # require TLS client authentication server_context.verify_mode = ssl.CERT_REQUIRED # TLS 1.3 has different handshake client_context.maximum_version = ssl.TLSVersion.TLSv1_2 server = ThreadedEchoServer( context=server_context, chatty=True, connectionchatty=True, ) with server, \ client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: try: # Expect either an SSL error about the server rejecting # the connection, or a low-level connection reset (which # sometimes happens on Windows) s.connect((HOST, server.port)) except ssl.SSLError as e: if support.verbose: sys.stdout.write("\nSSLError is %r\n" % e) except OSError as e: if e.errno != errno.ECONNRESET: raise if support.verbose: sys.stdout.write("\nsocket.error is %r\n" % e) else: self.fail("Use of invalid cert should have failed!") @requires_tls_version('TLSv1_3') def test_wrong_cert_tls13(self): client_context, server_context, hostname = testing_context() # load client cert that is not signed by trusted CA client_context.load_cert_chain(CERTFILE) server_context.verify_mode = ssl.CERT_REQUIRED server_context.minimum_version = ssl.TLSVersion.TLSv1_3 client_context.minimum_version = ssl.TLSVersion.TLSv1_3 server = ThreadedEchoServer( context=server_context, chatty=True, connectionchatty=True, ) with server, \ client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: # TLS 1.3 perform client cert exchange after handshake s.connect((HOST, server.port)) try: s.write(b'data') s.read(4) except ssl.SSLError as e: if support.verbose: sys.stdout.write("\nSSLError is %r\n" % e) except OSError as e: if e.errno != errno.ECONNRESET: raise if support.verbose: sys.stdout.write("\nsocket.error is %r\n" % e) else: self.fail("Use of invalid cert should have failed!") def test_rude_shutdown(self): """A brutal shutdown of an SSL server should raise an OSError in the client when attempting handshake. """ listener_ready = threading.Event() listener_gone = threading.Event() s = socket.socket() port = support.bind_port(s, HOST) # `listener` runs in a thread. It sits in an accept() until # the main thread connects. Then it rudely closes the socket, # and sets Event `listener_gone` to let the main thread know # the socket is gone. def listener(): s.listen() listener_ready.set() newsock, addr = s.accept() newsock.close() s.close() listener_gone.set() def connector(): listener_ready.wait() with socket.socket() as c: c.connect((HOST, port)) listener_gone.wait() try: ssl_sock = test_wrap_socket(c) except OSError: pass else: self.fail('connecting to closed SSL socket should have failed') t = threading.Thread(target=listener) t.start() try: connector() finally: t.join() def test_ssl_cert_verify_error(self): if support.verbose: sys.stdout.write("\n") server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) server_context.load_cert_chain(SIGNED_CERTFILE) context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) server = ThreadedEchoServer(context=server_context, chatty=True) with server: with context.wrap_socket(socket.socket(), server_hostname=SIGNED_CERTFILE_HOSTNAME) as s: try: s.connect((HOST, server.port)) except ssl.SSLError as e: msg = 'unable to get local issuer certificate' self.assertIsInstance(e, ssl.SSLCertVerificationError) self.assertEqual(e.verify_code, 20) self.assertEqual(e.verify_message, msg) self.assertIn(msg, repr(e)) self.assertIn('certificate verify failed', repr(e)) @requires_tls_version('SSLv2') def test_protocol_sslv2(self): """Connecting to an SSLv2 server with various client options""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False) if has_tls_version('SSLv3'): try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) # SSLv23 client with specific SSL options if no_sslv2_implies_sslv3_hello(): # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False, client_options=ssl.OP_NO_SSLv2) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False, client_options=ssl.OP_NO_SSLv3) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False, client_options=ssl.OP_NO_TLSv1) def test_PROTOCOL_TLS(self): """Connecting to an SSLv23 server with various client options""" if support.verbose: sys.stdout.write("\n") if has_tls_version('SSLv2'): try: try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True) except OSError as x: # this fails on some older versions of OpenSSL (0.9.7l, for instance) if support.verbose: sys.stdout.write( " SSL2 client to SSL23 server test unexpectedly failed:\n %s\n" % str(x)) if has_tls_version('SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True) if has_tls_version('TLSv1'): try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1') if has_tls_version('SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL) if has_tls_version('TLSv1'): try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL) if has_tls_version('SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED) if has_tls_version('TLSv1'): try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED) # Server with specific SSL options if has_tls_version('SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, server_options=ssl.OP_NO_SSLv3) # Will choose TLSv1 try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3) if has_tls_version('TLSv1'): try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False, server_options=ssl.OP_NO_TLSv1) @requires_tls_version('SSLv3') def test_protocol_sslv3(self): """Connecting to an SSLv3 server with various client options""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3') try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED) if has_tls_version('SSLv2'): try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False, client_options=ssl.OP_NO_SSLv3) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) if no_sslv2_implies_sslv3_hello(): # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False, client_options=ssl.OP_NO_SSLv2) @requires_tls_version('TLSv1') def test_protocol_tlsv1(self): """Connecting to a TLSv1 server with various client options""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1') try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED) if has_tls_version('SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) if has_tls_version('SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False, client_options=ssl.OP_NO_TLSv1) @requires_tls_version('TLSv1_1') def test_protocol_tlsv1_1(self): """Connecting to a TLSv1.1 server with various client options. Testing against older TLS versions.""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1') if has_tls_version('SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False) if has_tls_version('SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False, client_options=ssl.OP_NO_TLSv1_1) try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1') try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False) @requires_tls_version('TLSv1_2') def test_protocol_tlsv1_2(self): """Connecting to a TLSv1.2 server with various client options. Testing against older TLS versions.""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2', server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2, client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,) if has_tls_version('SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False) if has_tls_version('SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False, client_options=ssl.OP_NO_TLSv1_2) try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2') try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False) def test_starttls(self): """Switching from clear text to encrypted and back again.""" msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6") server = ThreadedEchoServer(CERTFILE, starttls_server=True, chatty=True, connectionchatty=True) wrapped = False with server: s = socket.socket() s.setblocking(1) s.connect((HOST, server.port)) if support.verbose: sys.stdout.write("\n") for indata in msgs: if support.verbose: sys.stdout.write( " client: sending %r...\n" % indata) if wrapped: conn.write(indata) outdata = conn.read() else: s.send(indata) outdata = s.recv(1024) msg = outdata.strip().lower() if indata == b"STARTTLS" and msg.startswith(b"ok"): # STARTTLS ok, switch to secure mode if support.verbose: sys.stdout.write( " client: read %r from server, starting TLS...\n" % msg) conn = test_wrap_socket(s) wrapped = True elif indata == b"ENDTLS" and msg.startswith(b"ok"): # ENDTLS ok, switch back to clear text if support.verbose: sys.stdout.write( " client: read %r from server, ending TLS...\n" % msg) s = conn.unwrap() wrapped = False else: if support.verbose: sys.stdout.write( " client: read %r from server\n" % msg) if support.verbose: sys.stdout.write(" client: closing connection.\n") if wrapped: conn.write(b"over\n") else: s.send(b"over\n") if wrapped: conn.close() else: s.close() def test_socketserver(self): """Using socketserver to create and manage SSL connections.""" server = make_https_server(self, certfile=SIGNED_CERTFILE) # try to connect if support.verbose: sys.stdout.write('\n') with open(CERTFILE, 'rb') as f: d1 = f.read() d2 = '' # now fetch the same data from the HTTPS server url = 'https://localhost:%d/%s' % ( server.port, os.path.split(CERTFILE)[1]) context = ssl.create_default_context(cafile=SIGNING_CA) f = urllib.request.urlopen(url, context=context) try: dlen = f.info().get("content-length") if dlen and (int(dlen) > 0): d2 = f.read(int(dlen)) if support.verbose: sys.stdout.write( " client: read %d bytes from remote server '%s'\n" % (len(d2), server)) finally: f.close() self.assertEqual(d1, d2) def test_asyncore_server(self): """Check the example asyncore integration.""" if support.verbose: sys.stdout.write("\n") indata = b"FOO\n" server = AsyncoreEchoServer(CERTFILE) with server: s = test_wrap_socket(socket.socket()) s.connect(('127.0.0.1', server.port)) if support.verbose: sys.stdout.write( " client: sending %r...\n" % indata) s.write(indata) outdata = s.read() if support.verbose: sys.stdout.write(" client: read %r\n" % outdata) if outdata != indata.lower(): self.fail( "bad data <<%r>> (%d) received; expected <<%r>> (%d)\n" % (outdata[:20], len(outdata), indata[:20].lower(), len(indata))) s.write(b"over\n") if support.verbose: sys.stdout.write(" client: closing connection.\n") s.close() if support.verbose: sys.stdout.write(" client: connection closed.\n") def test_recv_send(self): """Test recv(), send() and friends.""" if support.verbose: sys.stdout.write("\n") server = ThreadedEchoServer(CERTFILE, certreqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLS_SERVER, cacerts=CERTFILE, chatty=True, connectionchatty=False) with server: s = test_wrap_socket(socket.socket(), server_side=False, certfile=CERTFILE, ca_certs=CERTFILE, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLS_CLIENT) s.connect((HOST, server.port)) # helper methods for standardising recv* method signatures def _recv_into(): b = bytearray(b"\0"*100) count = s.recv_into(b) return b[:count] def _recvfrom_into(): b = bytearray(b"\0"*100) count, addr = s.recvfrom_into(b) return b[:count] # (name, method, expect success?, *args, return value func) send_methods = [ ('send', s.send, True, [], len), ('sendto', s.sendto, False, ["some.address"], len), ('sendall', s.sendall, True, [], lambda x: None), ] # (name, method, whether to expect success, *args) recv_methods = [ ('recv', s.recv, True, []), ('recvfrom', s.recvfrom, False, ["some.address"]), ('recv_into', _recv_into, True, []), ('recvfrom_into', _recvfrom_into, False, []), ] data_prefix = "PREFIX_" for (meth_name, send_meth, expect_success, args, ret_val_meth) in send_methods: indata = (data_prefix + meth_name).encode('ascii') try: ret = send_meth(indata, *args) msg = "sending with {}".format(meth_name) self.assertEqual(ret, ret_val_meth(indata), msg=msg) outdata = s.read() if outdata != indata.lower(): self.fail( "While sending with <<{name:s}>> bad data " "<<{outdata:r}>> ({nout:d}) received; " "expected <<{indata:r}>> ({nin:d})\n".format( name=meth_name, outdata=outdata[:20], nout=len(outdata), indata=indata[:20], nin=len(indata) ) ) except ValueError as e: if expect_success: self.fail( "Failed to send with method <<{name:s}>>; " "expected to succeed.\n".format(name=meth_name) ) if not str(e).startswith(meth_name): self.fail( "Method <<{name:s}>> failed with unexpected " "exception message: {exp:s}\n".format( name=meth_name, exp=e ) ) for meth_name, recv_meth, expect_success, args in recv_methods: indata = (data_prefix + meth_name).encode('ascii') try: s.send(indata) outdata = recv_meth(*args) if outdata != indata.lower(): self.fail( "While receiving with <<{name:s}>> bad data " "<<{outdata:r}>> ({nout:d}) received; " "expected <<{indata:r}>> ({nin:d})\n".format( name=meth_name, outdata=outdata[:20], nout=len(outdata), indata=indata[:20], nin=len(indata) ) ) except ValueError as e: if expect_success: self.fail( "Failed to receive with method <<{name:s}>>; " "expected to succeed.\n".format(name=meth_name) ) if not str(e).startswith(meth_name): self.fail( "Method <<{name:s}>> failed with unexpected " "exception message: {exp:s}\n".format( name=meth_name, exp=e ) ) # consume data s.read() # read(-1, buffer) is supported, even though read(-1) is not data = b"data" s.send(data) buffer = bytearray(len(data)) self.assertEqual(s.read(-1, buffer), len(data)) self.assertEqual(buffer, data) # sendall accepts bytes-like objects if ctypes is not None: ubyte = ctypes.c_ubyte * len(data) byteslike = ubyte.from_buffer_copy(data) s.sendall(byteslike) self.assertEqual(s.read(), data) # Make sure sendmsg et al are disallowed to avoid # inadvertent disclosure of data and/or corruption # of the encrypted data stream self.assertRaises(NotImplementedError, s.dup) self.assertRaises(NotImplementedError, s.sendmsg, [b"data"]) self.assertRaises(NotImplementedError, s.recvmsg, 100) self.assertRaises(NotImplementedError, s.recvmsg_into, [bytearray(100)]) s.write(b"over\n") self.assertRaises(ValueError, s.recv, -1) self.assertRaises(ValueError, s.read, -1) s.close() def test_recv_zero(self): server = ThreadedEchoServer(CERTFILE) server.__enter__() self.addCleanup(server.__exit__, None, None) s = socket.create_connection((HOST, server.port)) self.addCleanup(s.close) s = test_wrap_socket(s, suppress_ragged_eofs=False) self.addCleanup(s.close) # recv/read(0) should return no data s.send(b"data") self.assertEqual(s.recv(0), b"") self.assertEqual(s.read(0), b"") self.assertEqual(s.read(), b"data") # Should not block if the other end sends no data s.setblocking(False) self.assertEqual(s.recv(0), b"") self.assertEqual(s.recv_into(bytearray()), 0) def test_nonblocking_send(self): server = ThreadedEchoServer(CERTFILE, certreqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLS_SERVER, cacerts=CERTFILE, chatty=True, connectionchatty=False) with server: s = test_wrap_socket(socket.socket(), server_side=False, certfile=CERTFILE, ca_certs=CERTFILE, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLS_CLIENT) s.connect((HOST, server.port)) s.setblocking(False) # If we keep sending data, at some point the buffers # will be full and the call will block buf = bytearray(8192) def fill_buffer(): while True: s.send(buf) self.assertRaises((ssl.SSLWantWriteError, ssl.SSLWantReadError), fill_buffer) # Now read all the output and discard it s.setblocking(True) s.close() def test_handshake_timeout(self): # Issue #5103: SSL handshake must respect the socket timeout server = socket.socket(socket.AF_INET) host = "127.0.0.1" port = support.bind_port(server) started = threading.Event() finish = False def serve(): server.listen() started.set() conns = [] while not finish: r, w, e = select.select([server], [], [], 0.1) if server in r: # Let the socket hang around rather than having # it closed by garbage collection. conns.append(server.accept()[0]) for sock in conns: sock.close() t = threading.Thread(target=serve) t.start() started.wait() try: try: c = socket.socket(socket.AF_INET) c.settimeout(0.2) c.connect((host, port)) # Will attempt handshake and time out self.assertRaisesRegex(socket.timeout, "timed out", test_wrap_socket, c) finally: c.close() try: c = socket.socket(socket.AF_INET) c = test_wrap_socket(c) c.settimeout(0.2) # Will attempt handshake and time out self.assertRaisesRegex(socket.timeout, "timed out", c.connect, (host, port)) finally: c.close() finally: finish = True t.join() server.close() def test_server_accept(self): # Issue #16357: accept() on a SSLSocket created through # SSLContext.wrap_socket(). context = ssl.SSLContext(ssl.PROTOCOL_TLS) context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(SIGNING_CA) context.load_cert_chain(SIGNED_CERTFILE) server = socket.socket(socket.AF_INET) host = "127.0.0.1" port = support.bind_port(server) server = context.wrap_socket(server, server_side=True) self.assertTrue(server.server_side) evt = threading.Event() remote = None peer = None def serve(): nonlocal remote, peer server.listen() # Block on the accept and wait on the connection to close. evt.set() remote, peer = server.accept() remote.send(remote.recv(4)) t = threading.Thread(target=serve) t.start() # Client wait until server setup and perform a connect. evt.wait() client = context.wrap_socket(socket.socket()) client.connect((host, port)) client.send(b'data') client.recv() client_addr = client.getsockname() client.close() t.join() remote.close() server.close() # Sanity checks. self.assertIsInstance(remote, ssl.SSLSocket) self.assertEqual(peer, client_addr) def test_getpeercert_enotconn(self): context = ssl.SSLContext(ssl.PROTOCOL_TLS) with context.wrap_socket(socket.socket()) as sock: with self.assertRaises(OSError) as cm: sock.getpeercert() self.assertEqual(cm.exception.errno, errno.ENOTCONN) def test_do_handshake_enotconn(self): context = ssl.SSLContext(ssl.PROTOCOL_TLS) with context.wrap_socket(socket.socket()) as sock: with self.assertRaises(OSError) as cm: sock.do_handshake() self.assertEqual(cm.exception.errno, errno.ENOTCONN) def test_no_shared_ciphers(self): client_context, server_context, hostname = testing_context() # OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test client_context.options |= ssl.OP_NO_TLSv1_3 # Force different suites on client and server client_context.set_ciphers("AES128") server_context.set_ciphers("AES256") with ThreadedEchoServer(context=server_context) as server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: with self.assertRaises(OSError): s.connect((HOST, server.port)) self.assertIn("no shared cipher", server.conn_errors[0]) def test_version_basic(self): """ Basic tests for SSLSocket.version(). More tests are done in the test_protocol_*() methods. """ context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) context.check_hostname = False context.verify_mode = ssl.CERT_NONE with ThreadedEchoServer(CERTFILE, ssl_version=ssl.PROTOCOL_TLS_SERVER, chatty=False) as server: with context.wrap_socket(socket.socket()) as s: self.assertIs(s.version(), None) self.assertIs(s._sslobj, None) s.connect((HOST, server.port)) if IS_OPENSSL_1_1_1 and has_tls_version('TLSv1_3'): self.assertEqual(s.version(), 'TLSv1.3') elif ssl.OPENSSL_VERSION_INFO >= (1, 0, 2): self.assertEqual(s.version(), 'TLSv1.2') else: # 0.9.8 to 1.0.1 self.assertIn(s.version(), ('TLSv1', 'TLSv1.2')) self.assertIs(s._sslobj, None) self.assertIs(s.version(), None) @requires_tls_version('TLSv1_3') def test_tls1_3(self): context = ssl.SSLContext(ssl.PROTOCOL_TLS) context.load_cert_chain(CERTFILE) context.options |= ( ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2 ) with ThreadedEchoServer(context=context) as server: with context.wrap_socket(socket.socket()) as s: s.connect((HOST, server.port)) self.assertIn(s.cipher()[0], { 'TLS_AES_256_GCM_SHA384', 'TLS_CHACHA20_POLY1305_SHA256', 'TLS_AES_128_GCM_SHA256', }) self.assertEqual(s.version(), 'TLSv1.3') @requires_minimum_version @requires_tls_version('TLSv1_2') def test_min_max_version_tlsv1_2(self): client_context, server_context, hostname = testing_context() # client TLSv1.0 to 1.2 client_context.minimum_version = ssl.TLSVersion.TLSv1 client_context.maximum_version = ssl.TLSVersion.TLSv1_2 # server only TLSv1.2 server_context.minimum_version = ssl.TLSVersion.TLSv1_2 server_context.maximum_version = ssl.TLSVersion.TLSv1_2 with ThreadedEchoServer(context=server_context) as server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) self.assertEqual(s.version(), 'TLSv1.2') @requires_minimum_version @requires_tls_version('TLSv1_1') def test_min_max_version_tlsv1_1(self): client_context, server_context, hostname = testing_context() # client 1.0 to 1.2, server 1.0 to 1.1 client_context.minimum_version = ssl.TLSVersion.TLSv1 client_context.maximum_version = ssl.TLSVersion.TLSv1_2 server_context.minimum_version = ssl.TLSVersion.TLSv1 server_context.maximum_version = ssl.TLSVersion.TLSv1_1 with ThreadedEchoServer(context=server_context) as server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) self.assertEqual(s.version(), 'TLSv1.1') @requires_minimum_version @requires_tls_version('TLSv1_2') def test_min_max_version_mismatch(self): client_context, server_context, hostname = testing_context() # client 1.0, server 1.2 (mismatch) server_context.maximum_version = ssl.TLSVersion.TLSv1_2 server_context.minimum_version = ssl.TLSVersion.TLSv1_2 client_context.maximum_version = ssl.TLSVersion.TLSv1 client_context.minimum_version = ssl.TLSVersion.TLSv1 with ThreadedEchoServer(context=server_context) as server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: with self.assertRaises(ssl.SSLError) as e: s.connect((HOST, server.port)) self.assertIn("alert", str(e.exception)) @requires_minimum_version @requires_tls_version('SSLv3') def test_min_max_version_sslv3(self): client_context, server_context, hostname = testing_context() server_context.minimum_version = ssl.TLSVersion.SSLv3 client_context.minimum_version = ssl.TLSVersion.SSLv3 client_context.maximum_version = ssl.TLSVersion.SSLv3 with ThreadedEchoServer(context=server_context) as server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) self.assertEqual(s.version(), 'SSLv3') @unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL") def test_default_ecdh_curve(self): # Issue #21015: elliptic curve-based Diffie Hellman key exchange # should be enabled by default on SSL contexts. context = ssl.SSLContext(ssl.PROTOCOL_TLS) context.load_cert_chain(CERTFILE) # TLSv1.3 defaults to PFS key agreement and no longer has KEA in # cipher name. context.options |= ssl.OP_NO_TLSv1_3 # Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled # explicitly using the 'ECCdraft' cipher alias. Otherwise, # our default cipher list should prefer ECDH-based ciphers # automatically. if ssl.OPENSSL_VERSION_INFO < (1, 0, 0): context.set_ciphers("ECCdraft:ECDH") with ThreadedEchoServer(context=context) as server: with context.wrap_socket(socket.socket()) as s: s.connect((HOST, server.port)) self.assertIn("ECDH", s.cipher()[0]) @unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES, "'tls-unique' channel binding not available") def test_tls_unique_channel_binding(self): """Test tls-unique channel binding.""" if support.verbose: sys.stdout.write("\n") client_context, server_context, hostname = testing_context() server = ThreadedEchoServer(context=server_context, chatty=True, connectionchatty=False) with server: with client_context.wrap_socket( socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) # get the data cb_data = s.get_channel_binding("tls-unique") if support.verbose: sys.stdout.write( " got channel binding data: {0!r}\n".format(cb_data)) # check if it is sane self.assertIsNotNone(cb_data) if s.version() == 'TLSv1.3': self.assertEqual(len(cb_data), 48) else: self.assertEqual(len(cb_data), 12) # True for TLSv1 # and compare with the peers version s.write(b"CB tls-unique\n") peer_data_repr = s.read().strip() self.assertEqual(peer_data_repr, repr(cb_data).encode("us-ascii")) # now, again with client_context.wrap_socket( socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) new_cb_data = s.get_channel_binding("tls-unique") if support.verbose: sys.stdout.write( "got another channel binding data: {0!r}\n".format( new_cb_data) ) # is it really unique self.assertNotEqual(cb_data, new_cb_data) self.assertIsNotNone(cb_data) if s.version() == 'TLSv1.3': self.assertEqual(len(cb_data), 48) else: self.assertEqual(len(cb_data), 12) # True for TLSv1 s.write(b"CB tls-unique\n") peer_data_repr = s.read().strip() self.assertEqual(peer_data_repr, repr(new_cb_data).encode("us-ascii")) def test_compression(self): client_context, server_context, hostname = testing_context() stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, sni_name=hostname) if support.verbose: sys.stdout.write(" got compression: {!r}\n".format(stats['compression'])) self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' }) @unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'), "ssl.OP_NO_COMPRESSION needed for this test") def test_compression_disabled(self): client_context, server_context, hostname = testing_context() client_context.options |= ssl.OP_NO_COMPRESSION server_context.options |= ssl.OP_NO_COMPRESSION stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, sni_name=hostname) self.assertIs(stats['compression'], None) def test_dh_params(self): # Check we can get a connection with ephemeral Diffie-Hellman client_context, server_context, hostname = testing_context() # test scenario needs TLS <= 1.2 client_context.options |= ssl.OP_NO_TLSv1_3 server_context.load_dh_params(DHFILE) server_context.set_ciphers("kEDH") server_context.options |= ssl.OP_NO_TLSv1_3 stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, sni_name=hostname) cipher = stats["cipher"][0] parts = cipher.split("-") if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts: self.fail("Non-DH cipher: " + cipher[0]) @unittest.skipUnless(HAVE_SECP_CURVES, "needs secp384r1 curve support") @unittest.skipIf(IS_OPENSSL_1_1_1, "TODO: Test doesn't work on 1.1.1") def test_ecdh_curve(self): # server secp384r1, client auto client_context, server_context, hostname = testing_context() server_context.set_ecdh_curve("secp384r1") server_context.set_ciphers("ECDHE:!eNULL:!aNULL") server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, sni_name=hostname) # server auto, client secp384r1 client_context, server_context, hostname = testing_context() client_context.set_ecdh_curve("secp384r1") server_context.set_ciphers("ECDHE:!eNULL:!aNULL") server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, sni_name=hostname) # server / client curve mismatch client_context, server_context, hostname = testing_context() client_context.set_ecdh_curve("prime256v1") server_context.set_ecdh_curve("secp384r1") server_context.set_ciphers("ECDHE:!eNULL:!aNULL") server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 try: stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, sni_name=hostname) except ssl.SSLError: pass else: # OpenSSL 1.0.2 does not fail although it should. if IS_OPENSSL_1_1_0: self.fail("mismatch curve did not fail") def test_selected_alpn_protocol(self): # selected_alpn_protocol() is None unless ALPN is used. client_context, server_context, hostname = testing_context() stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, sni_name=hostname) self.assertIs(stats['client_alpn_protocol'], None) @unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required") def test_selected_alpn_protocol_if_server_uses_alpn(self): # selected_alpn_protocol() is None unless ALPN is used by the client. client_context, server_context, hostname = testing_context() server_context.set_alpn_protocols(['foo', 'bar']) stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, sni_name=hostname) self.assertIs(stats['client_alpn_protocol'], None) @unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test") def test_alpn_protocols(self): server_protocols = ['foo', 'bar', 'milkshake'] protocol_tests = [ (['foo', 'bar'], 'foo'), (['bar', 'foo'], 'foo'), (['milkshake'], 'milkshake'), (['http/3.0', 'http/4.0'], None) ] for client_protocols, expected in protocol_tests: client_context, server_context, hostname = testing_context() server_context.set_alpn_protocols(server_protocols) client_context.set_alpn_protocols(client_protocols) try: stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, sni_name=hostname) except ssl.SSLError as e: stats = e if (expected is None and IS_OPENSSL_1_1_0 and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)): # OpenSSL 1.1.0 to 1.1.0e raises handshake error self.assertIsInstance(stats, ssl.SSLError) else: msg = "failed trying %s (s) and %s (c).\n" \ "was expecting %s, but got %%s from the %%s" \ % (str(server_protocols), str(client_protocols), str(expected)) client_result = stats['client_alpn_protocol'] self.assertEqual(client_result, expected, msg % (client_result, "client")) server_result = stats['server_alpn_protocols'][-1] \ if len(stats['server_alpn_protocols']) else 'nothing' self.assertEqual(server_result, expected, msg % (server_result, "server")) def test_selected_npn_protocol(self): # selected_npn_protocol() is None unless NPN is used client_context, server_context, hostname = testing_context() stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, sni_name=hostname) self.assertIs(stats['client_npn_protocol'], None) @unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test") def test_npn_protocols(self): server_protocols = ['http/1.1', 'spdy/2'] protocol_tests = [ (['http/1.1', 'spdy/2'], 'http/1.1'), (['spdy/2', 'http/1.1'], 'http/1.1'), (['spdy/2', 'test'], 'spdy/2'), (['abc', 'def'], 'abc') ] for client_protocols, expected in protocol_tests: client_context, server_context, hostname = testing_context() server_context.set_npn_protocols(server_protocols) client_context.set_npn_protocols(client_protocols) stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, sni_name=hostname) msg = "failed trying %s (s) and %s (c).\n" \ "was expecting %s, but got %%s from the %%s" \ % (str(server_protocols), str(client_protocols), str(expected)) client_result = stats['client_npn_protocol'] self.assertEqual(client_result, expected, msg % (client_result, "client")) server_result = stats['server_npn_protocols'][-1] \ if len(stats['server_npn_protocols']) else 'nothing' self.assertEqual(server_result, expected, msg % (server_result, "server")) def sni_contexts(self): server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) server_context.load_cert_chain(SIGNED_CERTFILE) other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) other_context.load_cert_chain(SIGNED_CERTFILE2) client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) client_context.load_verify_locations(SIGNING_CA) return server_context, other_context, client_context def check_common_name(self, stats, name): cert = stats['peercert'] self.assertIn((('commonName', name),), cert['subject']) @needs_sni def test_sni_callback(self): calls = [] server_context, other_context, client_context = self.sni_contexts() client_context.check_hostname = False def servername_cb(ssl_sock, server_name, initial_context): calls.append((server_name, initial_context)) if server_name is not None: ssl_sock.context = other_context server_context.set_servername_callback(servername_cb) stats = server_params_test(client_context, server_context, chatty=True, sni_name='supermessage') # The hostname was fetched properly, and the certificate was # changed for the connection. self.assertEqual(calls, [("supermessage", server_context)]) # CERTFILE4 was selected self.check_common_name(stats, 'fakehostname') calls = [] # The callback is called with server_name=None stats = server_params_test(client_context, server_context, chatty=True, sni_name=None) self.assertEqual(calls, [(None, server_context)]) self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME) # Check disabling the callback calls = [] server_context.set_servername_callback(None) stats = server_params_test(client_context, server_context, chatty=True, sni_name='notfunny') # Certificate didn't change self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME) self.assertEqual(calls, []) @needs_sni def test_sni_callback_alert(self): # Returning a TLS alert is reflected to the connecting client server_context, other_context, client_context = self.sni_contexts() def cb_returning_alert(ssl_sock, server_name, initial_context): return ssl.ALERT_DESCRIPTION_ACCESS_DENIED server_context.set_servername_callback(cb_returning_alert) with self.assertRaises(ssl.SSLError) as cm: stats = server_params_test(client_context, server_context, chatty=False, sni_name='supermessage') self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED') @needs_sni def test_sni_callback_raising(self): # Raising fails the connection with a TLS handshake failure alert. server_context, other_context, client_context = self.sni_contexts() def cb_raising(ssl_sock, server_name, initial_context): 1/0 server_context.set_servername_callback(cb_raising) with support.catch_unraisable_exception() as catch: with self.assertRaises(ssl.SSLError) as cm: stats = server_params_test(client_context, server_context, chatty=False, sni_name='supermessage') self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE') self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError) @needs_sni def test_sni_callback_wrong_return_type(self): # Returning the wrong return type terminates the TLS connection # with an internal error alert. server_context, other_context, client_context = self.sni_contexts() def cb_wrong_return_type(ssl_sock, server_name, initial_context): return "foo" server_context.set_servername_callback(cb_wrong_return_type) with support.catch_unraisable_exception() as catch: with self.assertRaises(ssl.SSLError) as cm: stats = server_params_test(client_context, server_context, chatty=False, sni_name='supermessage') self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR') self.assertEqual(catch.unraisable.exc_type, TypeError) def test_shared_ciphers(self): client_context, server_context, hostname = testing_context() client_context.set_ciphers("AES128:AES256") server_context.set_ciphers("AES256") expected_algs = [ "AES256", "AES-256", # TLS 1.3 ciphers are always enabled "TLS_CHACHA20", "TLS_AES", ] stats = server_params_test(client_context, server_context, sni_name=hostname) ciphers = stats['server_shared_ciphers'][0] self.assertGreater(len(ciphers), 0) for name, tls_version, bits in ciphers: if not any(alg in name for alg in expected_algs): self.fail(name) def test_read_write_after_close_raises_valuerror(self): client_context, server_context, hostname = testing_context() server = ThreadedEchoServer(context=server_context, chatty=False) with server: s = client_context.wrap_socket(socket.socket(), server_hostname=hostname) s.connect((HOST, server.port)) s.close() self.assertRaises(ValueError, s.read, 1024) self.assertRaises(ValueError, s.write, b'hello') def test_sendfile(self): TEST_DATA = b"x" * 512 with open(support.TESTFN, 'wb') as f: f.write(TEST_DATA) self.addCleanup(support.unlink, support.TESTFN) context = ssl.SSLContext(ssl.PROTOCOL_TLS) context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(SIGNING_CA) context.load_cert_chain(SIGNED_CERTFILE) server = ThreadedEchoServer(context=context, chatty=False) with server: with context.wrap_socket(socket.socket()) as s: s.connect((HOST, server.port)) with open(support.TESTFN, 'rb') as file: s.sendfile(file) self.assertEqual(s.recv(1024), TEST_DATA) def test_session(self): client_context, server_context, hostname = testing_context() # TODO: sessions aren't compatible with TLSv1.3 yet client_context.options |= ssl.OP_NO_TLSv1_3 # first connection without session stats = server_params_test(client_context, server_context, sni_name=hostname) session = stats['session'] self.assertTrue(session.id) self.assertGreater(session.time, 0) self.assertGreater(session.timeout, 0) self.assertTrue(session.has_ticket) if ssl.OPENSSL_VERSION_INFO > (1, 0, 1): self.assertGreater(session.ticket_lifetime_hint, 0) self.assertFalse(stats['session_reused']) sess_stat = server_context.session_stats() self.assertEqual(sess_stat['accept'], 1) self.assertEqual(sess_stat['hits'], 0) # reuse session stats = server_params_test(client_context, server_context, session=session, sni_name=hostname) sess_stat = server_context.session_stats() self.assertEqual(sess_stat['accept'], 2) self.assertEqual(sess_stat['hits'], 1) self.assertTrue(stats['session_reused']) session2 = stats['session'] self.assertEqual(session2.id, session.id) self.assertEqual(session2, session) self.assertIsNot(session2, session) self.assertGreaterEqual(session2.time, session.time) self.assertGreaterEqual(session2.timeout, session.timeout) # another one without session stats = server_params_test(client_context, server_context, sni_name=hostname) self.assertFalse(stats['session_reused']) session3 = stats['session'] self.assertNotEqual(session3.id, session.id) self.assertNotEqual(session3, session) sess_stat = server_context.session_stats() self.assertEqual(sess_stat['accept'], 3) self.assertEqual(sess_stat['hits'], 1) # reuse session again stats = server_params_test(client_context, server_context, session=session, sni_name=hostname) self.assertTrue(stats['session_reused']) session4 = stats['session'] self.assertEqual(session4.id, session.id) self.assertEqual(session4, session) self.assertGreaterEqual(session4.time, session.time) self.assertGreaterEqual(session4.timeout, session.timeout) sess_stat = server_context.session_stats() self.assertEqual(sess_stat['accept'], 4) self.assertEqual(sess_stat['hits'], 2) def test_session_handling(self): client_context, server_context, hostname = testing_context() client_context2, _, _ = testing_context() # TODO: session reuse does not work with TLSv1.3 client_context.options |= ssl.OP_NO_TLSv1_3 client_context2.options |= ssl.OP_NO_TLSv1_3 server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: # session is None before handshake self.assertEqual(s.session, None) self.assertEqual(s.session_reused, None) s.connect((HOST, server.port)) session = s.session self.assertTrue(session) with self.assertRaises(TypeError) as e: s.session = object self.assertEqual(str(e.exception), 'Value is not a SSLSession.') with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) # cannot set session after handshake with self.assertRaises(ValueError) as e: s.session = session self.assertEqual(str(e.exception), 'Cannot set session after handshake.') with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: # can set session before handshake and before the # connection was established s.session = session s.connect((HOST, server.port)) self.assertEqual(s.session.id, session.id) self.assertEqual(s.session, session) self.assertEqual(s.session_reused, True) with client_context2.wrap_socket(socket.socket(), server_hostname=hostname) as s: # cannot re-use session with a different SSLContext with self.assertRaises(ValueError) as e: s.session = session s.connect((HOST, server.port)) self.assertEqual(str(e.exception), 'Session refers to a different SSLContext.') @unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3") class TestPostHandshakeAuth(unittest.TestCase): def test_pha_setter(self): protocols = [ ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT ] for protocol in protocols: ctx = ssl.SSLContext(protocol) self.assertEqual(ctx.post_handshake_auth, False) ctx.post_handshake_auth = True self.assertEqual(ctx.post_handshake_auth, True) ctx.verify_mode = ssl.CERT_REQUIRED self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertEqual(ctx.post_handshake_auth, True) ctx.post_handshake_auth = False self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertEqual(ctx.post_handshake_auth, False) ctx.verify_mode = ssl.CERT_OPTIONAL ctx.post_handshake_auth = True self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL) self.assertEqual(ctx.post_handshake_auth, True) def test_pha_required(self): client_context, server_context, hostname = testing_context() server_context.post_handshake_auth = True server_context.verify_mode = ssl.CERT_REQUIRED client_context.post_handshake_auth = True client_context.load_cert_chain(SIGNED_CERTFILE) server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) s.write(b'HASCERT') self.assertEqual(s.recv(1024), b'FALSE\n') s.write(b'PHA') self.assertEqual(s.recv(1024), b'OK\n') s.write(b'HASCERT') self.assertEqual(s.recv(1024), b'TRUE\n') # PHA method just returns true when cert is already available s.write(b'PHA') self.assertEqual(s.recv(1024), b'OK\n') s.write(b'GETCERT') cert_text = s.recv(4096).decode('us-ascii') self.assertIn('Python Software Foundation CA', cert_text) def test_pha_required_nocert(self): client_context, server_context, hostname = testing_context() server_context.post_handshake_auth = True server_context.verify_mode = ssl.CERT_REQUIRED client_context.post_handshake_auth = True # Ignore expected SSLError in ConnectionHandler of ThreadedEchoServer # (it is only raised sometimes on Windows) with support.catch_threading_exception() as cm: server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) s.write(b'PHA') # receive CertificateRequest self.assertEqual(s.recv(1024), b'OK\n') # send empty Certificate + Finish s.write(b'HASCERT') # receive alert with self.assertRaisesRegex( ssl.SSLError, 'tlsv13 alert certificate required'): s.recv(1024) def test_pha_optional(self): if support.verbose: sys.stdout.write("\n") client_context, server_context, hostname = testing_context() server_context.post_handshake_auth = True server_context.verify_mode = ssl.CERT_REQUIRED client_context.post_handshake_auth = True client_context.load_cert_chain(SIGNED_CERTFILE) # check CERT_OPTIONAL server_context.verify_mode = ssl.CERT_OPTIONAL server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) s.write(b'HASCERT') self.assertEqual(s.recv(1024), b'FALSE\n') s.write(b'PHA') self.assertEqual(s.recv(1024), b'OK\n') s.write(b'HASCERT') self.assertEqual(s.recv(1024), b'TRUE\n') def test_pha_optional_nocert(self): if support.verbose: sys.stdout.write("\n") client_context, server_context, hostname = testing_context() server_context.post_handshake_auth = True server_context.verify_mode = ssl.CERT_OPTIONAL client_context.post_handshake_auth = True server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) s.write(b'HASCERT') self.assertEqual(s.recv(1024), b'FALSE\n') s.write(b'PHA') self.assertEqual(s.recv(1024), b'OK\n') # optional doesn't fail when client does not have a cert s.write(b'HASCERT') self.assertEqual(s.recv(1024), b'FALSE\n') def test_pha_no_pha_client(self): client_context, server_context, hostname = testing_context() server_context.post_handshake_auth = True server_context.verify_mode = ssl.CERT_REQUIRED client_context.load_cert_chain(SIGNED_CERTFILE) server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) with self.assertRaisesRegex(ssl.SSLError, 'not server'): s.verify_client_post_handshake() s.write(b'PHA') self.assertIn(b'extension not received', s.recv(1024)) def test_pha_no_pha_server(self): # server doesn't have PHA enabled, cert is requested in handshake client_context, server_context, hostname = testing_context() server_context.verify_mode = ssl.CERT_REQUIRED client_context.post_handshake_auth = True client_context.load_cert_chain(SIGNED_CERTFILE) server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) s.write(b'HASCERT') self.assertEqual(s.recv(1024), b'TRUE\n') # PHA doesn't fail if there is already a cert s.write(b'PHA') self.assertEqual(s.recv(1024), b'OK\n') s.write(b'HASCERT') self.assertEqual(s.recv(1024), b'TRUE\n') def test_pha_not_tls13(self): # TLS 1.2 client_context, server_context, hostname = testing_context() server_context.verify_mode = ssl.CERT_REQUIRED client_context.maximum_version = ssl.TLSVersion.TLSv1_2 client_context.post_handshake_auth = True client_context.load_cert_chain(SIGNED_CERTFILE) server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) # PHA fails for TLS != 1.3 s.write(b'PHA') self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024)) def test_bpo37428_pha_cert_none(self): # verify that post_handshake_auth does not implicitly enable cert # validation. hostname = SIGNED_CERTFILE_HOSTNAME client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) client_context.post_handshake_auth = True client_context.load_cert_chain(SIGNED_CERTFILE) # no cert validation and CA on client side client_context.check_hostname = False client_context.verify_mode = ssl.CERT_NONE server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) server_context.load_cert_chain(SIGNED_CERTFILE) server_context.load_verify_locations(SIGNING_CA) server_context.post_handshake_auth = True server_context.verify_mode = ssl.CERT_REQUIRED server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) s.write(b'HASCERT') self.assertEqual(s.recv(1024), b'FALSE\n') s.write(b'PHA') self.assertEqual(s.recv(1024), b'OK\n') s.write(b'HASCERT') self.assertEqual(s.recv(1024), b'TRUE\n') # server cert has not been validated self.assertEqual(s.getpeercert(), {}) HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename') requires_keylog = unittest.skipUnless( HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback') class TestSSLDebug(unittest.TestCase): def keylog_lines(self, fname=support.TESTFN): with open(fname) as f: return len(list(f)) @requires_keylog def test_keylog_defaults(self): self.addCleanup(support.unlink, support.TESTFN) ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.keylog_filename, None) self.assertFalse(os.path.isfile(support.TESTFN)) ctx.keylog_filename = support.TESTFN self.assertEqual(ctx.keylog_filename, support.TESTFN) self.assertTrue(os.path.isfile(support.TESTFN)) self.assertEqual(self.keylog_lines(), 1) ctx.keylog_filename = None self.assertEqual(ctx.keylog_filename, None) with self.assertRaises((IsADirectoryError, PermissionError)): # Windows raises PermissionError ctx.keylog_filename = os.path.dirname( os.path.abspath(support.TESTFN)) with self.assertRaises(TypeError): ctx.keylog_filename = 1 @requires_keylog def test_keylog_filename(self): self.addCleanup(support.unlink, support.TESTFN) client_context, server_context, hostname = testing_context() client_context.keylog_filename = support.TESTFN server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) # header, 5 lines for TLS 1.3 self.assertEqual(self.keylog_lines(), 6) client_context.keylog_filename = None server_context.keylog_filename = support.TESTFN server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) self.assertGreaterEqual(self.keylog_lines(), 11) client_context.keylog_filename = support.TESTFN server_context.keylog_filename = support.TESTFN server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) self.assertGreaterEqual(self.keylog_lines(), 21) client_context.keylog_filename = None server_context.keylog_filename = None @requires_keylog @unittest.skipIf(sys.flags.ignore_environment, "test is not compatible with ignore_environment") def test_keylog_env(self): self.addCleanup(support.unlink, support.TESTFN) with unittest.mock.patch.dict(os.environ): os.environ['SSLKEYLOGFILE'] = support.TESTFN self.assertEqual(os.environ['SSLKEYLOGFILE'], support.TESTFN) ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.keylog_filename, None) ctx = ssl.create_default_context() self.assertEqual(ctx.keylog_filename, support.TESTFN) ctx = ssl._create_stdlib_context() self.assertEqual(ctx.keylog_filename, support.TESTFN) def test_msg_callback(self): client_context, server_context, hostname = testing_context() def msg_cb(conn, direction, version, content_type, msg_type, data): pass self.assertIs(client_context._msg_callback, None) client_context._msg_callback = msg_cb self.assertIs(client_context._msg_callback, msg_cb) with self.assertRaises(TypeError): client_context._msg_callback = object() def test_msg_callback_tls12(self): client_context, server_context, hostname = testing_context() client_context.options |= ssl.OP_NO_TLSv1_3 msg = [] def msg_cb(conn, direction, version, content_type, msg_type, data): self.assertIsInstance(conn, ssl.SSLSocket) self.assertIsInstance(data, bytes) self.assertIn(direction, {'read', 'write'}) msg.append((direction, version, content_type, msg_type)) client_context._msg_callback = msg_cb server = ThreadedEchoServer(context=server_context, chatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((HOST, server.port)) self.assertIn( ("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE, _TLSMessageType.SERVER_KEY_EXCHANGE), msg ) self.assertIn( ("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC, _TLSMessageType.CHANGE_CIPHER_SPEC), msg ) def test_main(verbose=False): if support.verbose: plats = { 'Mac': platform.mac_ver, 'Windows': platform.win32_ver, } for name, func in plats.items(): plat = func() if plat and plat[0]: plat = '%s %r' % (name, plat) break else: plat = repr(platform.platform()) print("test_ssl: testing with %r %r" % (ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO)) print(" under %s" % plat) print(" HAS_SNI = %r" % ssl.HAS_SNI) print(" OP_ALL = 0x%8x" % ssl.OP_ALL) try: print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1) except AttributeError: pass for filename in [ CERTFILE, BYTES_CERTFILE, ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY, SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA, BADCERT, BADKEY, EMPTYCERT]: if not os.path.exists(filename): raise support.TestFailed("Can't read certificate file %r" % filename) tests = [ ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests, SSLObjectTests, SimpleBackgroundTests, ThreadedTests, TestPostHandshakeAuth, TestSSLDebug ] if support.is_resource_enabled('network'): tests.append(NetworkedTests) thread_info = support.threading_setup() try: support.run_unittest(*tests) finally: support.threading_cleanup(*thread_info) if __name__ == "__main__": test_main()
handlers.py
# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation, and that the name of Vinay Sajip # not be used in advertising or publicity pertaining to distribution # of the software without specific, written prior permission. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ Additional handlers for the logging package for Python. The core package is based on PEP 282 and comments thereto in comp.lang.python. Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved. To use, simply 'import logging.handlers' and log away! """ import logging, socket, os, pickle, struct, time, re from stat import ST_DEV, ST_INO, ST_MTIME import queue import threading import copy # # Some constants... # DEFAULT_TCP_LOGGING_PORT = 9020 DEFAULT_UDP_LOGGING_PORT = 9021 DEFAULT_HTTP_LOGGING_PORT = 9022 DEFAULT_SOAP_LOGGING_PORT = 9023 SYSLOG_UDP_PORT = 514 SYSLOG_TCP_PORT = 514 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day class BaseRotatingHandler(logging.FileHandler): """ Base class for handlers that rotate log files at a certain point. Not meant to be instantiated directly. Instead, use RotatingFileHandler or TimedRotatingFileHandler. """ namer = None rotator = None def __init__(self, filename, mode, encoding=None, delay=False, errors=None): """ Use the specified filename for streamed logging """ logging.FileHandler.__init__(self, filename, mode=mode, encoding=encoding, delay=delay, errors=errors) self.mode = mode self.encoding = encoding self.errors = errors def emit(self, record): """ Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() logging.FileHandler.emit(self, record) except Exception: self.handleError(record) def rotation_filename(self, default_name): """ Modify the filename of a log file when rotating. This is provided so that a custom filename can be provided. The default implementation calls the 'namer' attribute of the handler, if it's callable, passing the default name to it. If the attribute isn't callable (the default is None), the name is returned unchanged. :param default_name: The default name for the log file. """ if not callable(self.namer): result = default_name else: result = self.namer(default_name) return result def rotate(self, source, dest): """ When rotating, rotate the current log. The default implementation calls the 'rotator' attribute of the handler, if it's callable, passing the source and dest arguments to it. If the attribute isn't callable (the default is None), the source is simply renamed to the destination. :param source: The source filename. This is normally the base filename, e.g. 'test.log' :param dest: The destination filename. This is normally what the source is rotated to, e.g. 'test.log.1'. """ if not callable(self.rotator): # Issue 18940: A file may not have been created if delay is True. if os.path.exists(source): os.rename(source, dest) else: self.rotator(source, dest) class RotatingFileHandler(BaseRotatingHandler): """ Handler for logging to a set of files, which switches from one file to the next when the current file reaches a certain size. """ def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False, errors=None): """ Open the specified file and use it as the stream for logging. By default, the file grows indefinitely. You can specify particular values of maxBytes and backupCount to allow the file to rollover at a predetermined size. Rollover occurs whenever the current log file is nearly maxBytes in length. If backupCount is >= 1, the system will successively create new files with the same pathname as the base file, but with extensions ".1", ".2" etc. appended to it. For example, with a backupCount of 5 and a base file name of "app.log", you would get "app.log", "app.log.1", "app.log.2", ... through to "app.log.5". The file being written to is always "app.log" - when it gets filled up, it is closed and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. exist, then they are renamed to "app.log.2", "app.log.3" etc. respectively. If maxBytes is zero, rollover never occurs. """ # If rotation/rollover is wanted, it doesn't make sense to use another # mode. If for example 'w' were specified, then if there were multiple # runs of the calling application, the logs from previous runs would be # lost if the 'w' is respected, because the log file would be truncated # on each run. if maxBytes > 0: mode = 'a' BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding, delay=delay, errors=errors) self.maxBytes = maxBytes self.backupCount = backupCount def doRollover(self): """ Do a rollover, as described in __init__(). """ if self.stream: self.stream.close() self.stream = None if self.backupCount > 0: for i in range(self.backupCount - 1, 0, -1): sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) dfn = self.rotation_filename("%s.%d" % (self.baseFilename, i + 1)) if os.path.exists(sfn): if os.path.exists(dfn): os.remove(dfn) os.rename(sfn, dfn) dfn = self.rotation_filename(self.baseFilename + ".1") if os.path.exists(dfn): os.remove(dfn) self.rotate(self.baseFilename, dfn) if not self.delay: self.stream = self._open() def shouldRollover(self, record): """ Determine if rollover should occur. Basically, see if the supplied record would cause the file to exceed the size limit we have. """ if self.stream is None: # delay was set... self.stream = self._open() if self.maxBytes > 0: # are we rolling over? msg = "%s\n" % self.format(record) self.stream.seek(0, 2) #due to non-posix-compliant Windows feature if self.stream.tell() + len(msg) >= self.maxBytes: return 1 return 0 class TimedRotatingFileHandler(BaseRotatingHandler): """ Handler for logging to a file, rotating the log file at certain timed intervals. If backupCount is > 0, when rollover is done, no more than backupCount files are kept - the oldest ones are deleted. """ def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None, errors=None): BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding, delay=delay, errors=errors) self.when = when.upper() self.backupCount = backupCount self.utc = utc self.atTime = atTime # Calculate the real rollover interval, which is just the number of # seconds between rollovers. Also set the filename suffix used when # a rollover occurs. Current 'when' events supported: # S - Seconds # M - Minutes # H - Hours # D - Days # midnight - roll over at midnight # W{0-6} - roll over on a certain day; 0 - Monday # # Case of the 'when' specifier is not important; lower or upper case # will work. if self.when == 'S': self.interval = 1 # one second self.suffix = "%Y-%m-%d_%H-%M-%S" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$" elif self.when == 'M': self.interval = 60 # one minute self.suffix = "%Y-%m-%d_%H-%M" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$" elif self.when == 'H': self.interval = 60 * 60 # one hour self.suffix = "%Y-%m-%d_%H" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$" elif self.when == 'D' or self.when == 'MIDNIGHT': self.interval = 60 * 60 * 24 # one day self.suffix = "%Y-%m-%d" self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" elif self.when.startswith('W'): self.interval = 60 * 60 * 24 * 7 # one week if len(self.when) != 2: raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when) if self.when[1] < '0' or self.when[1] > '6': raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) self.dayOfWeek = int(self.when[1]) self.suffix = "%Y-%m-%d" self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" else: raise ValueError("Invalid rollover interval specified: %s" % self.when) self.extMatch = re.compile(self.extMatch, re.ASCII) self.interval = self.interval * interval # multiply by units requested # The following line added because the filename passed in could be a # path object (see Issue #27493), but self.baseFilename will be a string filename = self.baseFilename if os.path.exists(filename): t = os.stat(filename)[ST_MTIME] else: t = int(time.time()) self.rolloverAt = self.computeRollover(t) def computeRollover(self, currentTime): """ Work out the rollover time based on the specified time. """ result = currentTime + self.interval # If we are rolling over at midnight or weekly, then the interval is already known. # What we need to figure out is WHEN the next interval is. In other words, # if you are rolling over at midnight, then your base interval is 1 day, # but you want to start that one day clock at midnight, not now. So, we # have to fudge the rolloverAt value in order to trigger the first rollover # at the right time. After that, the regular interval will take care of # the rest. Note that this code doesn't care about leap seconds. :) if self.when == 'MIDNIGHT' or self.when.startswith('W'): # This could be done with less code, but I wanted it to be clear if self.utc: t = time.gmtime(currentTime) else: t = time.localtime(currentTime) currentHour = t[3] currentMinute = t[4] currentSecond = t[5] currentDay = t[6] # r is the number of seconds left between now and the next rotation if self.atTime is None: rotate_ts = _MIDNIGHT else: rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 + self.atTime.second) r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 + currentSecond) if r < 0: # Rotate time is before the current time (for example when # self.rotateAt is 13:45 and it now 14:15), rotation is # tomorrow. r += _MIDNIGHT currentDay = (currentDay + 1) % 7 result = currentTime + r # If we are rolling over on a certain day, add in the number of days until # the next rollover, but offset by 1 since we just calculated the time # until the next day starts. There are three cases: # Case 1) The day to rollover is today; in this case, do nothing # Case 2) The day to rollover is further in the interval (i.e., today is # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to # next rollover is simply 6 - 2 - 1, or 3. # Case 3) The day to rollover is behind us in the interval (i.e., today # is day 5 (Saturday) and rollover is on day 3 (Thursday). # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the # number of days left in the current week (1) plus the number # of days in the next week until the rollover day (3). # The calculations described in 2) and 3) above need to have a day added. # This is because the above time calculation takes us to midnight on this # day, i.e. the start of the next day. if self.when.startswith('W'): day = currentDay # 0 is Monday if day != self.dayOfWeek: if day < self.dayOfWeek: daysToWait = self.dayOfWeek - day else: daysToWait = 6 - day + self.dayOfWeek + 1 newRolloverAt = result + (daysToWait * (60 * 60 * 24)) if not self.utc: dstNow = t[-1] dstAtRollover = time.localtime(newRolloverAt)[-1] if dstNow != dstAtRollover: if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour addend = -3600 else: # DST bows out before next rollover, so we need to add an hour addend = 3600 newRolloverAt += addend result = newRolloverAt return result def shouldRollover(self, record): """ Determine if rollover should occur. record is not used, as we are just comparing times, but it is needed so the method signatures are the same """ t = int(time.time()) if t >= self.rolloverAt: return 1 return 0 def getFilesToDelete(self): """ Determine the files to delete when rolling over. More specific than the earlier method, which just used glob.glob(). """ dirName, baseName = os.path.split(self.baseFilename) fileNames = os.listdir(dirName) result = [] prefix = baseName + "." plen = len(prefix) for fileName in fileNames: if fileName[:plen] == prefix: suffix = fileName[plen:] if self.extMatch.match(suffix): result.append(os.path.join(dirName, fileName)) if len(result) < self.backupCount: result = [] else: result.sort() result = result[:len(result) - self.backupCount] return result def doRollover(self): """ do a rollover; in this case, a date/time stamp is appended to the filename when the rollover happens. However, you want the file to be named for the start of the interval, not the current time. If there is a backup count, then we have to get a list of matching filenames, sort them and remove the one with the oldest suffix. """ if self.stream: self.stream.close() self.stream = None # get the time that this sequence started at and make it a TimeTuple currentTime = int(time.time()) dstNow = time.localtime(currentTime)[-1] t = self.rolloverAt - self.interval if self.utc: timeTuple = time.gmtime(t) else: timeTuple = time.localtime(t) dstThen = timeTuple[-1] if dstNow != dstThen: if dstNow: addend = 3600 else: addend = -3600 timeTuple = time.localtime(t + addend) dfn = self.rotation_filename(self.baseFilename + "." + time.strftime(self.suffix, timeTuple)) if os.path.exists(dfn): os.remove(dfn) self.rotate(self.baseFilename, dfn) if self.backupCount > 0: for s in self.getFilesToDelete(): os.remove(s) if not self.delay: self.stream = self._open() newRolloverAt = self.computeRollover(currentTime) while newRolloverAt <= currentTime: newRolloverAt = newRolloverAt + self.interval #If DST changes and midnight or weekly rollover, adjust for this. if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: dstAtRollover = time.localtime(newRolloverAt)[-1] if dstNow != dstAtRollover: if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour addend = -3600 else: # DST bows out before next rollover, so we need to add an hour addend = 3600 newRolloverAt += addend self.rolloverAt = newRolloverAt class WatchedFileHandler(logging.FileHandler): """ A handler for logging to a file, which watches the file to see if it has changed while in use. This can happen because of usage of programs such as newsyslog and logrotate which perform log file rotation. This handler, intended for use under Unix, watches the file to see if it has changed since the last emit. (A file has changed if its device or inode have changed.) If it has changed, the old file stream is closed, and the file opened to get a new stream. This handler is not appropriate for use under Windows, because under Windows open files cannot be moved or renamed - logging opens the files with exclusive locks - and so there is no need for such a handler. Furthermore, ST_INO is not supported under Windows; stat always returns zero for this value. This handler is based on a suggestion and patch by Chad J. Schroeder. """ def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None): logging.FileHandler.__init__(self, filename, mode=mode, encoding=encoding, delay=delay, errors=errors) self.dev, self.ino = -1, -1 self._statstream() def _statstream(self): if self.stream: sres = os.fstat(self.stream.fileno()) self.dev, self.ino = sres[ST_DEV], sres[ST_INO] def reopenIfNeeded(self): """ Reopen log file if needed. Checks if the underlying file has changed, and if it has, close the old stream and reopen the file to get the current stream. """ # Reduce the chance of race conditions by stat'ing by path only # once and then fstat'ing our new fd if we opened a new log stream. # See issue #14632: Thanks to John Mulligan for the problem report # and patch. try: # stat the file by path, checking for existence sres = os.stat(self.baseFilename) except FileNotFoundError: sres = None # compare file system stat with that of our stream file handle if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino: if self.stream is not None: # we have an open file handle, clean it up self.stream.flush() self.stream.close() self.stream = None # See Issue #21742: _open () might fail. # open a new file handle and get new stat info from that fd self.stream = self._open() self._statstream() def emit(self, record): """ Emit a record. If underlying file has changed, reopen the file before emitting the record to it. """ self.reopenIfNeeded() logging.FileHandler.emit(self, record) class SocketHandler(logging.Handler): """ A handler class which writes logging records, in pickle format, to a streaming socket. The socket is kept open across logging calls. If the peer resets it, an attempt is made to reconnect on the next call. The pickle which is sent is that of the LogRecord's attribute dictionary (__dict__), so that the receiver does not need to have the logging module installed in order to process the logging event. To unpickle the record at the receiving end into a LogRecord, use the makeLogRecord function. """ def __init__(self, host, port): """ Initializes the handler with a specific host address and port. When the attribute *closeOnError* is set to True - if a socket error occurs, the socket is silently closed and then reopened on the next logging call. """ logging.Handler.__init__(self) self.host = host self.port = port if port is None: self.address = host else: self.address = (host, port) self.sock = None self.closeOnError = False self.retryTime = None # # Exponential backoff parameters. # self.retryStart = 1.0 self.retryMax = 30.0 self.retryFactor = 2.0 def makeSocket(self, timeout=1): """ A factory method which allows subclasses to define the precise type of socket they want. """ if self.port is not None: result = socket.create_connection(self.address, timeout=timeout) else: result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) result.settimeout(timeout) try: result.connect(self.address) except OSError: result.close() # Issue 19182 raise return result def createSocket(self): """ Try to create a socket, using an exponential backoff with a max retry time. Thanks to Robert Olson for the original patch (SF #815911) which has been slightly refactored. """ now = time.time() # Either retryTime is None, in which case this # is the first time back after a disconnect, or # we've waited long enough. if self.retryTime is None: attempt = True else: attempt = (now >= self.retryTime) if attempt: try: self.sock = self.makeSocket() self.retryTime = None # next time, no delay before trying except OSError: #Creation failed, so set the retry time and return. if self.retryTime is None: self.retryPeriod = self.retryStart else: self.retryPeriod = self.retryPeriod * self.retryFactor if self.retryPeriod > self.retryMax: self.retryPeriod = self.retryMax self.retryTime = now + self.retryPeriod def send(self, s): """ Send a pickled string to the socket. This function allows for partial sends which can happen when the network is busy. """ if self.sock is None: self.createSocket() #self.sock can be None either because we haven't reached the retry #time yet, or because we have reached the retry time and retried, #but are still unable to connect. if self.sock: try: self.sock.sendall(s) except OSError: #pragma: no cover self.sock.close() self.sock = None # so we can call createSocket next time def makePickle(self, record): """ Pickles the record in binary format with a length prefix, and returns it ready for transmission across the socket. """ ei = record.exc_info if ei: # just to get traceback text into record.exc_text ... dummy = self.format(record) # See issue #14436: If msg or args are objects, they may not be # available on the receiving end. So we convert the msg % args # to a string, save it as msg and zap the args. d = dict(record.__dict__) d['msg'] = record.getMessage() d['args'] = None d['exc_info'] = None # Issue #25685: delete 'message' if present: redundant with 'msg' d.pop('message', None) s = pickle.dumps(d, 1) slen = struct.pack(">L", len(s)) return slen + s def handleError(self, record): """ Handle an error during logging. An error has occurred during logging. Most likely cause - connection lost. Close the socket so that we can retry on the next event. """ if self.closeOnError and self.sock: self.sock.close() self.sock = None #try to reconnect next time else: logging.Handler.handleError(self, record) def emit(self, record): """ Emit a record. Pickles the record and writes it to the socket in binary format. If there is an error with the socket, silently drop the packet. If there was a problem with the socket, re-establishes the socket. """ try: s = self.makePickle(record) self.send(s) except Exception: self.handleError(record) def close(self): """ Closes the socket. """ self.acquire() try: sock = self.sock if sock: self.sock = None sock.close() logging.Handler.close(self) finally: self.release() class DatagramHandler(SocketHandler): """ A handler class which writes logging records, in pickle format, to a datagram socket. The pickle which is sent is that of the LogRecord's attribute dictionary (__dict__), so that the receiver does not need to have the logging module installed in order to process the logging event. To unpickle the record at the receiving end into a LogRecord, use the makeLogRecord function. """ def __init__(self, host, port): """ Initializes the handler with a specific host address and port. """ SocketHandler.__init__(self, host, port) self.closeOnError = False def makeSocket(self): """ The factory method of SocketHandler is here overridden to create a UDP socket (SOCK_DGRAM). """ if self.port is None: family = socket.AF_UNIX else: family = socket.AF_INET s = socket.socket(family, socket.SOCK_DGRAM) return s def send(self, s): """ Send a pickled string to a socket. This function no longer allows for partial sends which can happen when the network is busy - UDP does not guarantee delivery and can deliver packets out of sequence. """ if self.sock is None: self.createSocket() self.sock.sendto(s, self.address) class SysLogHandler(logging.Handler): """ A handler class which sends formatted logging records to a syslog server. Based on Sam Rushing's syslog module: http://www.nightmare.com/squirl/python-ext/misc/syslog.py Contributed by Nicolas Untz (after which minor refactoring changes have been made). """ # from <linux/sys/syslog.h>: # ====================================================================== # priorities/facilities are encoded into a single 32-bit quantity, where # the bottom 3 bits are the priority (0-7) and the top 28 bits are the # facility (0-big number). Both the priorities and the facilities map # roughly one-to-one to strings in the syslogd(8) source code. This # mapping is included in this file. # # priorities (these are ordered) LOG_EMERG = 0 # system is unusable LOG_ALERT = 1 # action must be taken immediately LOG_CRIT = 2 # critical conditions LOG_ERR = 3 # error conditions LOG_WARNING = 4 # warning conditions LOG_NOTICE = 5 # normal but significant condition LOG_INFO = 6 # informational LOG_DEBUG = 7 # debug-level messages # facility codes LOG_KERN = 0 # kernel messages LOG_USER = 1 # random user-level messages LOG_MAIL = 2 # mail system LOG_DAEMON = 3 # system daemons LOG_AUTH = 4 # security/authorization messages LOG_SYSLOG = 5 # messages generated internally by syslogd LOG_LPR = 6 # line printer subsystem LOG_NEWS = 7 # network news subsystem LOG_UUCP = 8 # UUCP subsystem LOG_CRON = 9 # clock daemon LOG_AUTHPRIV = 10 # security/authorization messages (private) LOG_FTP = 11 # FTP daemon LOG_NTP = 12 # NTP subsystem LOG_SECURITY = 13 # Log audit LOG_CONSOLE = 14 # Log alert LOG_SOLCRON = 15 # Scheduling daemon (Solaris) # other codes through 15 reserved for system use LOG_LOCAL0 = 16 # reserved for local use LOG_LOCAL1 = 17 # reserved for local use LOG_LOCAL2 = 18 # reserved for local use LOG_LOCAL3 = 19 # reserved for local use LOG_LOCAL4 = 20 # reserved for local use LOG_LOCAL5 = 21 # reserved for local use LOG_LOCAL6 = 22 # reserved for local use LOG_LOCAL7 = 23 # reserved for local use priority_names = { "alert": LOG_ALERT, "crit": LOG_CRIT, "critical": LOG_CRIT, "debug": LOG_DEBUG, "emerg": LOG_EMERG, "err": LOG_ERR, "error": LOG_ERR, # DEPRECATED "info": LOG_INFO, "notice": LOG_NOTICE, "panic": LOG_EMERG, # DEPRECATED "warn": LOG_WARNING, # DEPRECATED "warning": LOG_WARNING, } facility_names = { "auth": LOG_AUTH, "authpriv": LOG_AUTHPRIV, "console": LOG_CONSOLE, "cron": LOG_CRON, "daemon": LOG_DAEMON, "ftp": LOG_FTP, "kern": LOG_KERN, "lpr": LOG_LPR, "mail": LOG_MAIL, "news": LOG_NEWS, "ntp": LOG_NTP, "security": LOG_SECURITY, "solaris-cron": LOG_SOLCRON, "syslog": LOG_SYSLOG, "user": LOG_USER, "uucp": LOG_UUCP, "local0": LOG_LOCAL0, "local1": LOG_LOCAL1, "local2": LOG_LOCAL2, "local3": LOG_LOCAL3, "local4": LOG_LOCAL4, "local5": LOG_LOCAL5, "local6": LOG_LOCAL6, "local7": LOG_LOCAL7, } #The map below appears to be trivially lowercasing the key. However, #there's more to it than meets the eye - in some locales, lowercasing #gives unexpected results. See SF #1524081: in the Turkish locale, #"INFO".lower() != "info" priority_map = { "DEBUG" : "debug", "INFO" : "info", "WARNING" : "warning", "ERROR" : "error", "CRITICAL" : "critical" } def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER, socktype=None): """ Initialize a handler. If address is specified as a string, a UNIX socket is used. To log to a local syslogd, "SysLogHandler(address="/dev/log")" can be used. If facility is not specified, LOG_USER is used. If socktype is specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific socket type will be used. For Unix sockets, you can also specify a socktype of None, in which case socket.SOCK_DGRAM will be used, falling back to socket.SOCK_STREAM. """ logging.Handler.__init__(self) self.address = address self.facility = facility self.socktype = socktype if isinstance(address, str): self.unixsocket = True # Syslog server may be unavailable during handler initialisation. # C's openlog() function also ignores connection errors. # Moreover, we ignore these errors while logging, so it not worse # to ignore it also here. try: self._connect_unixsocket(address) except OSError: pass else: self.unixsocket = False if socktype is None: socktype = socket.SOCK_DGRAM host, port = address ress = socket.getaddrinfo(host, port, 0, socktype) if not ress: raise OSError("getaddrinfo returns an empty list") for res in ress: af, socktype, proto, _, sa = res err = sock = None try: sock = socket.socket(af, socktype, proto) if socktype == socket.SOCK_STREAM: sock.connect(sa) break except OSError as exc: err = exc if sock is not None: sock.close() if err is not None: raise err self.socket = sock self.socktype = socktype def _connect_unixsocket(self, address): use_socktype = self.socktype if use_socktype is None: use_socktype = socket.SOCK_DGRAM self.socket = socket.socket(socket.AF_UNIX, use_socktype) try: self.socket.connect(address) # it worked, so set self.socktype to the used type self.socktype = use_socktype except OSError: self.socket.close() if self.socktype is not None: # user didn't specify falling back, so fail raise use_socktype = socket.SOCK_STREAM self.socket = socket.socket(socket.AF_UNIX, use_socktype) try: self.socket.connect(address) # it worked, so set self.socktype to the used type self.socktype = use_socktype except OSError: self.socket.close() raise def encodePriority(self, facility, priority): """ Encode the facility and priority. You can pass in strings or integers - if strings are passed, the facility_names and priority_names mapping dictionaries are used to convert them to integers. """ if isinstance(facility, str): facility = self.facility_names[facility] if isinstance(priority, str): priority = self.priority_names[priority] return (facility << 3) | priority def close(self): """ Closes the socket. """ self.acquire() try: self.socket.close() logging.Handler.close(self) finally: self.release() def mapPriority(self, levelName): """ Map a logging level name to a key in the priority_names map. This is useful in two scenarios: when custom levels are being used, and in the case where you can't do a straightforward mapping by lowercasing the logging level name because of locale- specific issues (see SF #1524081). """ return self.priority_map.get(levelName, "warning") ident = '' # prepended to all messages append_nul = True # some old syslog daemons expect a NUL terminator def emit(self, record): """ Emit a record. The record is formatted, and then sent to the syslog server. If exception information is present, it is NOT sent to the server. """ try: msg = self.format(record) if self.ident: msg = self.ident + msg if self.append_nul: msg += '\000' # We need to convert record level to lowercase, maybe this will # change in the future. prio = '<%d>' % self.encodePriority(self.facility, self.mapPriority(record.levelname)) prio = prio.encode('utf-8') # Message is a string. Convert to bytes as required by RFC 5424 msg = msg.encode('utf-8') msg = prio + msg if self.unixsocket: try: self.socket.send(msg) except OSError: self.socket.close() self._connect_unixsocket(self.address) self.socket.send(msg) elif self.socktype == socket.SOCK_DGRAM: self.socket.sendto(msg, self.address) else: self.socket.sendall(msg) except Exception: self.handleError(record) class SMTPHandler(logging.Handler): """ A handler class which sends an SMTP email for each logging event. """ def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=None, secure=None, timeout=5.0): """ Initialize the handler. Initialize the instance with the from and to addresses and subject line of the email. To specify a non-standard SMTP port, use the (host, port) tuple format for the mailhost argument. To specify authentication credentials, supply a (username, password) tuple for the credentials argument. To specify the use of a secure protocol (TLS), pass in a tuple for the secure argument. This will only be used when authentication credentials are supplied. The tuple will be either an empty tuple, or a single-value tuple with the name of a keyfile, or a 2-value tuple with the names of the keyfile and certificate file. (This tuple is passed to the `starttls` method). A timeout in seconds can be specified for the SMTP connection (the default is one second). """ logging.Handler.__init__(self) if isinstance(mailhost, (list, tuple)): self.mailhost, self.mailport = mailhost else: self.mailhost, self.mailport = mailhost, None if isinstance(credentials, (list, tuple)): self.username, self.password = credentials else: self.username = None self.fromaddr = fromaddr if isinstance(toaddrs, str): toaddrs = [toaddrs] self.toaddrs = toaddrs self.subject = subject self.secure = secure self.timeout = timeout def getSubject(self, record): """ Determine the subject for the email. If you want to specify a subject line which is record-dependent, override this method. """ return self.subject def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ try: import smtplib from email.message import EmailMessage import email.utils port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) msg = EmailMessage() msg['From'] = self.fromaddr msg['To'] = ','.join(self.toaddrs) msg['Subject'] = self.getSubject(record) msg['Date'] = email.utils.localtime() msg.set_content(self.format(record)) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.send_message(msg) smtp.quit() except Exception: self.handleError(record) class NTEventLogHandler(logging.Handler): """ A handler class which sends events to the NT Event Log. Adds a registry entry for the specified application name. If no dllname is provided, win32service.pyd (which contains some basic message placeholders) is used. Note that use of these placeholders will make your event logs big, as the entire message source is held in the log. If you want slimmer logs, you have to pass in the name of your own DLL which contains the message definitions you want to use in the event log. """ def __init__(self, appname, dllname=None, logtype="Application"): logging.Handler.__init__(self) try: import win32evtlogutil, win32evtlog self.appname = appname self._welu = win32evtlogutil if not dllname: dllname = os.path.split(self._welu.__file__) dllname = os.path.split(dllname[0]) dllname = os.path.join(dllname[0], r'win32service.pyd') self.dllname = dllname self.logtype = logtype self._welu.AddSourceToRegistry(appname, dllname, logtype) self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE self.typemap = { logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, } except ImportError: print("The Python Win32 extensions for NT (service, event "\ "logging) appear not to be available.") self._welu = None def getMessageID(self, record): """ Return the message ID for the event record. If you are using your own messages, you could do this by having the msg passed to the logger being an ID rather than a formatting string. Then, in here, you could use a dictionary lookup to get the message ID. This version returns 1, which is the base message ID in win32service.pyd. """ return 1 def getEventCategory(self, record): """ Return the event category for the record. Override this if you want to specify your own categories. This version returns 0. """ return 0 def getEventType(self, record): """ Return the event type for the record. Override this if you want to specify your own types. This version does a mapping using the handler's typemap attribute, which is set up in __init__() to a dictionary which contains mappings for DEBUG, INFO, WARNING, ERROR and CRITICAL. If you are using your own levels you will either need to override this method or place a suitable dictionary in the handler's typemap attribute. """ return self.typemap.get(record.levelno, self.deftype) def emit(self, record): """ Emit a record. Determine the message ID, event category and event type. Then log the message in the NT event log. """ if self._welu: try: id = self.getMessageID(record) cat = self.getEventCategory(record) type = self.getEventType(record) msg = self.format(record) self._welu.ReportEvent(self.appname, id, cat, type, [msg]) except Exception: self.handleError(record) def close(self): """ Clean up this handler. You can remove the application name from the registry as a source of event log entries. However, if you do this, you will not be able to see the events as you intended in the Event Log Viewer - it needs to be able to access the registry to get the DLL name. """ #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) logging.Handler.close(self) class HTTPHandler(logging.Handler): """ A class which sends records to a Web server, using either GET or POST semantics. """ def __init__(self, host, url, method="GET", secure=False, credentials=None, context=None): """ Initialize the instance with the host, the request URL, and the method ("GET" or "POST") """ logging.Handler.__init__(self) method = method.upper() if method not in ["GET", "POST"]: raise ValueError("method must be GET or POST") if not secure and context is not None: raise ValueError("context parameter only makes sense " "with secure=True") self.host = host self.url = url self.method = method self.secure = secure self.credentials = credentials self.context = context def mapLogRecord(self, record): """ Default implementation of mapping the log record into a dict that is sent as the CGI data. Overwrite in your class. Contributed by Franz Glasner. """ return record.__dict__ def emit(self, record): """ Emit a record. Send the record to the Web server as a percent-encoded dictionary """ try: import http.client, urllib.parse host = self.host if self.secure: h = http.client.HTTPSConnection(host, context=self.context) else: h = http.client.HTTPConnection(host) url = self.url data = urllib.parse.urlencode(self.mapLogRecord(record)) if self.method == "GET": if (url.find('?') >= 0): sep = '&' else: sep = '?' url = url + "%c%s" % (sep, data) h.putrequest(self.method, url) # support multiple hosts on one IP address... # need to strip optional :port from host, if present i = host.find(":") if i >= 0: host = host[:i] # See issue #30904: putrequest call above already adds this header # on Python 3.x. # h.putheader("Host", host) if self.method == "POST": h.putheader("Content-type", "application/x-www-form-urlencoded") h.putheader("Content-length", str(len(data))) if self.credentials: import base64 s = ('%s:%s' % self.credentials).encode('utf-8') s = 'Basic ' + base64.b64encode(s).strip().decode('ascii') h.putheader('Authorization', s) h.endheaders() if self.method == "POST": h.send(data.encode('utf-8')) h.getresponse() #can't do anything with the result except Exception: self.handleError(record) class BufferingHandler(logging.Handler): """ A handler class which buffers logging records in memory. Whenever each record is added to the buffer, a check is made to see if the buffer should be flushed. If it should, then flush() is expected to do what's needed. """ def __init__(self, capacity): """ Initialize the handler with the buffer size. """ logging.Handler.__init__(self) self.capacity = capacity self.buffer = [] def shouldFlush(self, record): """ Should the handler flush its buffer? Returns true if the buffer is up to capacity. This method can be overridden to implement custom flushing strategies. """ return (len(self.buffer) >= self.capacity) def emit(self, record): """ Emit a record. Append the record. If shouldFlush() tells us to, call flush() to process the buffer. """ self.buffer.append(record) if self.shouldFlush(record): self.flush() def flush(self): """ Override to implement custom flushing behaviour. This version just zaps the buffer to empty. """ self.acquire() try: self.buffer.clear() finally: self.release() def close(self): """ Close the handler. This version just flushes and chains to the parent class' close(). """ try: self.flush() finally: logging.Handler.close(self) class MemoryHandler(BufferingHandler): """ A handler class which buffers logging records in memory, periodically flushing them to a target handler. Flushing occurs whenever the buffer is full, or when an event of a certain severity or greater is seen. """ def __init__(self, capacity, flushLevel=logging.ERROR, target=None, flushOnClose=True): """ Initialize the handler with the buffer size, the level at which flushing should occur and an optional target. Note that without a target being set either here or via setTarget(), a MemoryHandler is no use to anyone! The ``flushOnClose`` argument is ``True`` for backward compatibility reasons - the old behaviour is that when the handler is closed, the buffer is flushed, even if the flush level hasn't been exceeded nor the capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. """ BufferingHandler.__init__(self, capacity) self.flushLevel = flushLevel self.target = target # See Issue #26559 for why this has been added self.flushOnClose = flushOnClose def shouldFlush(self, record): """ Check for buffer full or a record at the flushLevel or higher. """ return (len(self.buffer) >= self.capacity) or \ (record.levelno >= self.flushLevel) def setTarget(self, target): """ Set the target handler for this handler. """ self.target = target def flush(self): """ For a MemoryHandler, flushing means just sending the buffered records to the target, if there is one. Override if you want different behaviour. The record buffer is also cleared by this operation. """ self.acquire() try: if self.target: for record in self.buffer: self.target.handle(record) self.buffer.clear() finally: self.release() def close(self): """ Flush, if appropriately configured, set the target to None and lose the buffer. """ try: if self.flushOnClose: self.flush() finally: self.acquire() try: self.target = None BufferingHandler.close(self) finally: self.release() class QueueHandler(logging.Handler): """ This handler sends events to a queue. Typically, it would be used together with a multiprocessing Queue to centralise logging to file in one process (in a multi-process application), so as to avoid file write contention between processes. This code is new in Python 3.2, but this class can be copy pasted into user code for use with earlier Python versions. """ def __init__(self, queue): """ Initialise an instance, using the passed queue. """ logging.Handler.__init__(self) self.queue = queue def enqueue(self, record): """ Enqueue a record. The base implementation uses put_nowait. You may want to override this method if you want to use blocking, timeouts or custom queue implementations. """ self.queue.put_nowait(record) def prepare(self, record): """ Prepares a record for queuing. The object returned by this method is enqueued. The base implementation formats the record to merge the message and arguments, and removes unpickleable items from the record in-place. You might want to override this method if you want to convert the record to a dict or JSON string, or send a modified copy of the record while leaving the original intact. """ # The format operation gets traceback text into record.exc_text # (if there's exception data), and also returns the formatted # message. We can then use this to replace the original # msg + args, as these might be unpickleable. We also zap the # exc_info and exc_text attributes, as they are no longer # needed and, if not None, will typically not be pickleable. msg = self.format(record) # bpo-35726: make copy of record to avoid affecting other handlers in the chain. record = copy.copy(record) record.message = msg record.msg = msg record.args = None record.exc_info = None record.exc_text = None return record def emit(self, record): """ Emit a record. Writes the LogRecord to the queue, preparing it for pickling first. """ try: self.enqueue(self.prepare(record)) except Exception: self.handleError(record) class QueueListener(object): """ This class implements an internal threaded listener which watches for LogRecords being added to a queue, removes them and passes them to a list of handlers for processing. """ _sentinel = None def __init__(self, queue, *handlers, respect_handler_level=False): """ Initialise an instance with the specified queue and handlers. """ self.queue = queue self.handlers = handlers self._thread = None self.respect_handler_level = respect_handler_level def dequeue(self, block): """ Dequeue a record and return it, optionally blocking. The base implementation uses get. You may want to override this method if you want to use timeouts or work with custom queue implementations. """ return self.queue.get(block) def start(self): """ Start the listener. This starts up a background thread to monitor the queue for LogRecords to process. """ self._thread = t = threading.Thread(target=self._monitor) t.daemon = True t.start() def prepare(self, record): """ Prepare a record for handling. This method just returns the passed-in record. You may want to override this method if you need to do any custom marshalling or manipulation of the record before passing it to the handlers. """ return record def handle(self, record): """ Handle a record. This just loops through the handlers offering them the record to handle. """ record = self.prepare(record) for handler in self.handlers: if not self.respect_handler_level: process = True else: process = record.levelno >= handler.level if process: handler.handle(record) def _monitor(self): """ Monitor the queue for records, and ask the handler to deal with them. This method runs on a separate, internal thread. The thread will terminate if it sees a sentinel object in the queue. """ q = self.queue has_task_done = hasattr(q, 'task_done') while True: try: record = self.dequeue(True) if record is self._sentinel: if has_task_done: q.task_done() break self.handle(record) if has_task_done: q.task_done() except queue.Empty: break def enqueue_sentinel(self): """ This is used to enqueue the sentinel record. The base implementation uses put_nowait. You may want to override this method if you want to use timeouts or work with custom queue implementations. """ self.queue.put_nowait(self._sentinel) def stop(self): """ Stop the listener. This asks the thread to terminate, and then waits for it to do so. Note that if you don't call this before your application exits, there may be some records still left on the queue, which won't be processed. """ self.enqueue_sentinel() self._thread.join() self._thread = None
engine.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import json import threading import time import traceback import jsonschema import six from rally.common.i18n import _ from rally.common import logging from rally.common import objects from rally.common import utils from rally import consts from rally import exceptions from rally import osclients from rally.plugins.openstack.context.keystone import existing_users from rally.plugins.openstack.context.keystone import users as users_ctx from rally.task import context from rally.task import runner from rally.task import scenario from rally.task import sla LOG = logging.getLogger(__name__) class ResultConsumer(object): """ResultConsumer class stores results from ScenarioRunner, checks SLA.""" def __init__(self, key, task, runner, abort_on_sla_failure): """ResultConsumer constructor. :param key: Scenario identifier :param task: Instance of Task, task to run :param runner: ScenarioRunner instance that produces results to be consumed :param abort_on_sla_failure: True if the execution should be stopped when some SLA check fails """ self.key = key self.task = task self.runner = runner self.load_started_at = float("inf") self.load_finished_at = 0 self.sla_checker = sla.SLAChecker(key["kw"]) self.abort_on_sla_failure = abort_on_sla_failure self.is_done = threading.Event() self.unexpected_failure = {} self.results = [] self.thread = threading.Thread( target=self._consume_results ) self.aborting_checker = threading.Thread(target=self.wait_and_abort) def __enter__(self): self.thread.start() self.aborting_checker.start() self.start = time.time() return self def _consume_results(self): while True: if self.runner.result_queue: results = self.runner.result_queue.popleft() self.results.extend(results) for r in results: self.load_started_at = min(r["timestamp"], self.load_started_at) self.load_finished_at = max(r["duration"] + r["timestamp"], self.load_finished_at) success = self.sla_checker.add_iteration(r) if self.abort_on_sla_failure and not success: self.sla_checker.set_aborted_on_sla() self.runner.abort() elif self.is_done.isSet(): break else: time.sleep(0.1) def __exit__(self, exc_type, exc_value, exc_traceback): self.finish = time.time() self.is_done.set() self.aborting_checker.join() self.thread.join() if exc_type: self.sla_checker.set_unexpected_failure(exc_value) if objects.Task.get_status( self.task["uuid"]) == consts.TaskStatus.ABORTED: self.sla_checker.set_aborted_manually() # NOTE(boris-42): Sort in order of starting instead of order of ending self.results.sort(key=lambda x: x["timestamp"]) load_duration = max(self.load_finished_at - self.load_started_at, 0) LOG.info("Load duration is: %s" % utils.format_float_to_str( load_duration)) LOG.info("Full runner duration is: %s" % utils.format_float_to_str(self.runner.run_duration)) LOG.info("Full duration is %s" % utils.format_float_to_str( self.finish - self.start)) self.task.append_results(self.key, { "raw": self.results, "load_duration": load_duration, "full_duration": self.finish - self.start, "sla": self.sla_checker.results()}) @staticmethod def is_task_in_aborting_status(task_uuid, check_soft=True): """Checks task is in abort stages :param task_uuid: UUID of task to check status :type task_uuid: str :param check_soft: check or not SOFT_ABORTING status :type check_soft: bool """ stages = [consts.TaskStatus.ABORTING, consts.TaskStatus.ABORTED] if check_soft: stages.append(consts.TaskStatus.SOFT_ABORTING) return objects.Task.get_status(task_uuid) in stages def wait_and_abort(self): """Waits until abort signal is received and aborts runner in this case. Has to be run from different thread simultaneously with the runner.run method. """ while not self.is_done.isSet(): if self.is_task_in_aborting_status(self.task["uuid"], check_soft=False): self.runner.abort() self.task.update_status(consts.TaskStatus.ABORTED) break time.sleep(2.0) class TaskEngine(object): """The Task engine class is used to execute benchmark scenarios. An instance of this class is initialized by the API with the task configuration and then is used to validate and execute all specified in config subtasks. .. note:: Typical usage: ... admin = .... # contains dict representations of objects.Credential # with OpenStack admin credentials users = .... # contains a list of dicts of representations of # objects.Credential with OpenStack users credentials engine = TaskEngine(config, task, admin=admin, users=users) engine.validate() # to test config engine.run() # to run config """ def __init__(self, config, task, admin=None, users=None, abort_on_sla_failure=False): """TaskEngine constructor. :param config: Dict with configuration of specified benchmark scenarios :param task: Instance of Task, the current task which is being performed :param admin: Dict with admin credentials :param users: List of dicts with user credentials :param abort_on_sla_failure: True if the execution should be stopped when some SLA check fails """ try: self.config = TaskConfig(config) except Exception as e: task.set_failed(type(e).__name__, str(e), json.dumps(traceback.format_exc())) raise exceptions.InvalidTaskException(str(e)) self.task = task self.admin = admin and objects.Credential(**admin) or None self.existing_users = users or [] self.abort_on_sla_failure = abort_on_sla_failure @logging.log_task_wrapper(LOG.info, _("Task validation check cloud.")) def _check_cloud(self): clients = osclients.Clients(self.admin) clients.verified_keystone() @logging.log_task_wrapper(LOG.info, _("Task validation of scenarios names.")) def _validate_config_scenarios_name(self, config): available = set(s.get_name() for s in scenario.Scenario.get_all()) specified = set() for subtask in config.subtasks: for s in subtask.workloads: specified.add(s.name) if not specified.issubset(available): names = ", ".join(specified - available) raise exceptions.NotFoundScenarios(names=names) @logging.log_task_wrapper(LOG.info, _("Task validation of syntax.")) def _validate_config_syntax(self, config): for subtask in config.subtasks: for pos, workload in enumerate(subtask.workloads): try: runner.ScenarioRunner.validate(workload.runner) context.ContextManager.validate( workload.context, non_hidden=True) sla.SLA.validate(workload.sla) except (exceptions.RallyException, jsonschema.ValidationError) as e: kw = workload.make_exception_args( pos, six.text_type(e)) raise exceptions.InvalidTaskConfig(**kw) def _validate_config_semantic_helper(self, admin, user, workload, pos, deployment): try: scenario.Scenario.validate( workload.name, workload.to_dict(), admin=admin, users=[user], deployment=deployment) except exceptions.InvalidScenarioArgument as e: kw = workload.make_exception_args(pos, six.text_type(e)) raise exceptions.InvalidTaskConfig(**kw) def _get_user_ctx_for_validation(self, ctx): if self.existing_users: ctx["config"] = {"existing_users": self.existing_users} user_context = existing_users.ExistingUsers(ctx) else: user_context = users_ctx.UserGenerator(ctx) return user_context @logging.log_task_wrapper(LOG.info, _("Task validation of semantic.")) def _validate_config_semantic(self, config): self._check_cloud() ctx_conf = {"task": self.task, "admin": {"credential": self.admin}} deployment = objects.Deployment.get(self.task["deployment_uuid"]) # TODO(boris-42): It's quite hard at the moment to validate case # when both user context and existing_users are # specified. So after switching to plugin base # and refactoring validation mechanism this place # will be replaced with self._get_user_ctx_for_validation(ctx_conf) as ctx: ctx.setup() admin = osclients.Clients(self.admin) user = osclients.Clients(ctx_conf["users"][0]["credential"]) for u in ctx_conf["users"]: user = osclients.Clients(u["credential"]) for subtask in config.subtasks: for pos, workload in enumerate(subtask.workloads): self._validate_config_semantic_helper( admin, user, workload, pos, deployment) @logging.log_task_wrapper(LOG.info, _("Task validation.")) def validate(self): """Perform full task configuration validation.""" self.task.update_status(consts.TaskStatus.VERIFYING) try: self._validate_config_scenarios_name(self.config) self._validate_config_syntax(self.config) self._validate_config_semantic(self.config) except Exception as e: self.task.set_failed(type(e).__name__, str(e), json.dumps(traceback.format_exc())) raise exceptions.InvalidTaskException(str(e)) def _get_runner(self, config): config = config or {"type": "serial"} return runner.ScenarioRunner.get(config["type"])(self.task, config) def _prepare_context(self, ctx, name, credential): scenario_context = copy.deepcopy( scenario.Scenario.get(name)._meta_get("default_context")) if self.existing_users and "users" not in ctx: scenario_context.setdefault("existing_users", self.existing_users) elif "users" not in ctx: scenario_context.setdefault("users", {}) scenario_context.update(ctx) context_obj = { "task": self.task, "admin": {"credential": credential}, "scenario_name": name, "config": scenario_context } return context_obj @logging.log_task_wrapper(LOG.info, _("Benchmarking.")) def run(self): """Run the benchmark according to the test configuration. Test configuration is specified on engine initialization. :returns: List of dicts, each dict containing the results of all the corresponding benchmark test launches """ self.task.update_status(consts.TaskStatus.RUNNING) for subtask in self.config.subtasks: for pos, workload in enumerate(subtask.workloads): if ResultConsumer.is_task_in_aborting_status( self.task["uuid"]): LOG.info("Received aborting signal.") self.task.update_status(consts.TaskStatus.ABORTED) return key = workload.make_key(pos) LOG.info("Running benchmark with key: \n%s" % json.dumps(key, indent=2)) runner_obj = self._get_runner(workload.runner) context_obj = self._prepare_context( workload.context, workload.name, self.admin) try: with ResultConsumer(key, self.task, runner_obj, self.abort_on_sla_failure): with context.ContextManager(context_obj): runner_obj.run(workload.name, context_obj, workload.args) except Exception as e: LOG.exception(e) if objects.Task.get_status( self.task["uuid"]) != consts.TaskStatus.ABORTED: self.task.update_status(consts.TaskStatus.FINISHED) class TaskConfig(object): """Version-aware wrapper around task. """ CONFIG_SCHEMA_V1 = { "type": "object", "$schema": consts.JSON_SCHEMA, "patternProperties": { ".*": { "type": "array", "items": { "type": "object", "properties": { "args": {"type": "object"}, "runner": { "type": "object", "properties": {"type": {"type": "string"}}, "required": ["type"] }, "context": {"type": "object"}, "sla": {"type": "object"}, }, "additionalProperties": False } } } } CONFIG_SCHEMA_V2 = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "version": {"type": "number"}, "title": {"type": "string"}, "description": {"type": "string"}, "tags": { "type": "array", "items": {"type": "string"} }, "subtasks": { "type": "array", "minItems": 1, "items": { "type": "object", "properties": { "title": {"type": "string"}, "group": {"type": "string"}, "description": {"type": "string"}, "tags": { "type": "array", "items": {"type": "string"} }, "run_in_parallel": {"type": "boolean"}, "workloads": { "type": "array", "minItems": 1, "maxItems": 1, "items": { "type": "object", "properties": { "name": {"type": "string"}, "args": {"type": "object"}, "runner": { "type": "object", "properties": { "type": {"type": "string"} }, "required": ["type"] }, "sla": {"type": "object"}, "context": {"type": "object"} }, "additionalProperties": False, "required": ["name", "runner"] } } }, "additionalProperties": False, "required": ["title", "workloads"] } } }, "additionalProperties": False, "required": ["title", "subtasks"] } CONFIG_SCHEMAS = {1: CONFIG_SCHEMA_V1, 2: CONFIG_SCHEMA_V2} def __init__(self, config): """TaskConfig constructor. :param config: Dict with configuration of specified task """ if config is None: # NOTE(stpierre): This gets reraised as # InvalidTaskException. if we raise it here as # InvalidTaskException, then "Task config is invalid: " # gets prepended to the message twice. raise Exception(_("Input task is empty")) self.version = self._get_version(config) self._validate_version() self._validate_json(config) self.title = config.get("title", "Task") self.tags = config.get("tags", []) self.description = config.get("description") self.subtasks = self._make_subtasks(config) # if self.version == 1: # TODO(ikhudoshyn): Warn user about deprecated format @staticmethod def _get_version(config): return config.get("version", 1) def _validate_version(self): if self.version not in self.CONFIG_SCHEMAS: allowed = ", ".join([str(k) for k in self.CONFIG_SCHEMAS]) msg = (_("Task configuration version {0} is not supported. " "Supported versions: {1}")).format(self.version, allowed) raise exceptions.InvalidTaskException(msg) def _validate_json(self, config): try: jsonschema.validate(config, self.CONFIG_SCHEMAS[self.version]) except Exception as e: raise exceptions.InvalidTaskException(str(e)) def _make_subtasks(self, config): if self.version == 2: return [SubTask(s) for s in config["subtasks"]] elif self.version == 1: subtasks = [] for name, v1_workloads in six.iteritems(config): for v1_workload in v1_workloads: v2_workload = copy.deepcopy(v1_workload) v2_workload["name"] = name subtasks.append( SubTask({"title": name, "workloads": [v2_workload]})) return subtasks class SubTask(object): """Subtask -- unit of execution in Task """ def __init__(self, config): """Subtask constructor. :param config: Dict with configuration of specified subtask """ self.title = config["title"] self.tags = config.get("tags", []) self.group = config.get("group") self.description = config.get("description") self.workloads = [Workload(wconf) for wconf in config["workloads"]] self.context = config.get("context", {}) class Workload(object): """Workload -- workload configuration in SubTask. """ def __init__(self, config): self.name = config["name"] self.runner = config.get("runner", {}) self.sla = config.get("sla", {}) self.context = config.get("context", {}) self.args = config.get("args", {}) def to_dict(self): workload = {"runner": self.runner} for prop in "sla", "args", "context": value = getattr(self, prop) if value: workload[prop] = value return workload def to_task(self): """Make task configuration for the workload. This method returns a dict representing full configuration of the task containing a single subtask with this single workload. :return: dict containing full task configuration """ # NOTE(ikhudoshyn): Result of this method will be used # to store full task configuration in DB so that # subtask configuration in reports would be given # in the same format as it was provided by user. # Temporarily it returns to_dict() in order not # to break existing reports. It should be # properly implemented in a patch that will update reports. # return {self.name: [self.to_dict()]} return self.to_dict() def make_key(self, pos): return {"name": self.name, "pos": pos, "kw": self.to_task()} def make_exception_args(self, pos, reason): return {"name": self.name, "pos": pos, "config": self.to_dict(), "reason": reason}
mybot.py
# -*- coding: utf-8 -*- import telegram import os import sys from threading import Thread import logging from functools import wraps from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, ConversationHandler, CallbackQueryHandler from app.utils.loadini import read_config from app.utils.monthly import monthly_thread from app.utils.jav_thread import thread_javlib from app.utils.magnet import sukebei from app.utils.dmm import dmm_thread, prevideo, prephotos, dmmonecid, prevideolow, dmmsearch, dmmlinks,truevideo,dmmsearchall from app.utils.cloudflare import CloudFlare_handler from app.utils.get_update import Version from app.utils.identify import girl, acg import time import re logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) CHOOSE,GIRL,ACG,GITUPDATE = range(4) allconfig = read_config() ifproxy = allconfig['ifproxy'] proxy = allconfig['proxy'] token = allconfig['token'] users = allconfig['userid'] iftb = allconfig['tb'] TOKEN = token userid = users.split(',') userss = [] #userid = ','.join(userid) for i in userid: userss.append(int(i)) #print(userid) #userids = ','.join(userss) LIST_OF_ADMINS = userss REQUEST_KWARGS={ # "USERNAME:PASSWORD@" is optional, if you need authentication: 'proxy_url': 'http://%s/'%proxy, } class Msg: def __init__(self): self.chat_id = update.effective_chat.id def error(update, context): logger.warning('Update "%s" caused error "%s"', update, context.error) #update.message.reply_text(str(update.message.text)) try: update.message.reply_text(str(context.error)) except: update.callback_query.edit_message_text(str(context.error)) def send_typing_action(func): """Sends typing action while processing func command.""" @wraps(func) def command_func(update, context, *args, **kwargs): context.bot.send_chat_action(chat_id=update.message.chat_id, action=telegram.ChatAction.TYPING) return func(update, context, *args, **kwargs) return command_func def restricted(func): @wraps(func) def wrapped(update, context, *args, **kwargs): user_id = update.effective_user.id if user_id not in LIST_OF_ADMINS: error = "Unauthorized access denied for you!!you user id:{}.".format(user_id) update.message.reply_markdown(error) print(error) return return func(update, context, *args, **kwargs) return wrapped def long_message(update, context, text: str,mtype): max_length = telegram.constants.MAX_MESSAGE_LENGTH if len(text) <= max_length: if mtype=='markdown': return update.message.reply_markdown(text) else: return update.message.reply_text(text) parts = [] while len(text) > max_length: parts.append(text[:max_length]) text = text[max_length:] parts.append(text) msg = None for part in parts: update.message.reply_text(part) time.sleep(3) return msg def split_list(init_list, children_list_len): _list_of_groups = zip(*(iter(init_list),) *children_list_len) end_list = [list(i) for i in _list_of_groups] count = len(init_list) % children_list_len end_list.append(init_list[-count:]) if count !=0 else end_list return end_list @send_typing_action def start(update, context): text = ''' ** 欢迎使用imulti bot,请输入/help查看指令 ** ''' #print(telegram.constants.MAX_MESSAGE_LENGTH) update.message.reply_markdown(text) @send_typing_action def help(update, context): text = ''' *bot命令帮助* *start* - `欢迎 ` *help* - `帮助` *m* - `查询是否ikoa月额,num/cid皆可,可批量查询,半角逗号分隔 /m ssni-520,ssni-521` *uid* - `查询女优在dmm所有番号,需女优在dmm的数字id /uid 2333` *lib* - `查询女优在library所有番号,需女优页面完整https链接 /lib https://*****` *video* - `dmm预览视频,cid(准确),num(可能查询不到)` *photo* - `dmm预览图片,cid(准确),num(可能查询不到)` *cid* - `查询番号具体信息,cid(准确),num(可能查询不到) ` *magnet* - `搜索关键词在sukebei(磁力)` *search* - `搜索关键词在dmm,dmm官方支持正则,例如当长字符无结果,可利用空格分割` *all* - `搜索关键词在dmm所有区域的内容,dmm官方支持正则,例如当长字符无结果,可利用空格分割` *links* - `demo for links in dmm limit 30 project` *face* - `根据提示发送图片进行人脸识别` *new* - `dmm new video limit 30` *top* - `dmm hot video limit 30` *cf option* - `控制cf域名解析` *update* - `更新机器人` *restart* - `重启机器人` *cancel* - `处在对话命令内时,提前结束对话` *cloudflare help* ==> 1、 *ls* => `列出所有已拥有域名 example: /cf ls` 2、 *dns domain* => `查看单个域名解析情况 example: /cf dns domain` 3、 *add type domain host* => `添加域名解析 example: /cf add A test.domain.com ip-adress` 4、 *edit type domain host * => `编辑域名解析(option:cloudon/off ,ttl = ini) example:/cf edit A test.domain.com ip-adress cloudon` 5、 *del domain* => `删除域名解析 example: /cf del test.domain.com` ''' update.message.reply_markdown(text) @restricted @send_typing_action def monthlyy(update, context): #print(context.args) chat_id=update.message.chat_id id = context.args #print(id) idlist = ','.join(id) searchlist1 = idlist.split(',') leng3 = len(searchlist1) update.message.reply_text('正在查询%s个...请稍候......' %leng3) mon,leng,nomon,leng1,time,tbb,tbbb,noresult,noresultleng = monthly_thread(idlist) tb = str(tbb) usetime = '耗时:' + time + '秒' if leng == 0: if leng1 == 0: noresult = '无结果 (%s) => %s' %(noresultleng,noresult) msg = long_message(update, context, noresult, 'text') else: result1 = '非月额list (%s) => %s' %(leng1,nomon) if iftb == 'true': msg = long_message(update,context,tb,'text') msg = long_message(update,context,result1,'text') if noresultleng>0: noresult = '无结果 (%s) => %s' %(noresultleng,noresult) msg = long_message(update, context, noresult, 'text') update.message.reply_text(usetime) else: if leng1 == 0: if iftb == 'true': msg = long_message(update,context,tb,'text') result = '月额list (%s) => %s' %(leng,mon) msg = long_message(update,context,result,'text') if noresultleng>0: noresult = '无结果 (%s) => %s' %(noresultleng,noresult) msg = long_message(update, context, noresult, 'text') update.message.reply_text(usetime) else: result = '月额list (%s) => %s' %(leng,mon) result1 = '非月额list (%s) => %s' %(leng1,nomon) if iftb == 'true': msg = long_message(update,context,tb,'text') msg = long_message(update,context,result,'text') msg = long_message(update,context,result1,'text') if noresultleng>0: noresult = '无结果 (%s) => %s' %(noresultleng,noresult) msg = long_message(update, context, noresult, 'text') update.message.reply_text(usetime) @restricted @send_typing_action def dmmid(update, context): searchid = context.args[0] update.message.reply_text('search items for %s please wait...'%searchid) result,time = dmm_thread(searchid) usetime = '搜索完成,耗时:' + time + '秒' update.message.reply_text(usetime) #update.message.reply_text(result) if len(result) > 4096: mssg = '超出telegram消息限制,将分段截取,取最后10字符用于校验:' + result[-10:] update.message.reply_text(mssg) msg = long_message(update,context,result,'text') @restricted @send_typing_action def dmmcid(update,context): allid = ' '.join(context.args[:]) searchidlist = allid.split(',') for cid in searchidlist: searchidd = cid.replace('-',' ') text, notitle = dmmonecid(cid) if notitle == 1: #update.message.reply_text('没有找到%s的cid信息,自动尝试使用/search功能搜索' %searchid) boxlist,stitle = dmmsearch(searchidd,'onlysearch') if boxlist == '選択した条件で商品は存在しませんでした': update.message.reply_text('没有找到 %s 的cid信息,自动搜索无结果:%s' % (cid, boxlist)) return #print(boxlist) firstlist = boxlist[0] wcid = firstlist.get('cid') text, notitle = dmmonecid(wcid) update.message.reply_markdown(text) else: update.message.reply_markdown(text) time.sleep(2) @restricted @send_typing_action def lib(update, context): list1 = context.args url=' '.join(list1) #print(url) result, time = thread_javlib(url) text = str(result) + '\n' + str(time) msg = long_message(update,context,text,'text') @restricted @send_typing_action def dmmvideo(update, context): searchid = context.args[0] #text = str(prevideo(searchid)) #update.message.reply_video(text) nocid = 0 if len(context.args) == 1: gang = '-' searchidd = context.args[0] if gang in searchidd: nocid = 1 searchidd = searchidd.replace('-',' ') else: nocid = 1 searchidd = ' '.join(context.args[:]) if nocid == 1: boxlist,stitle = dmmsearch(searchidd,'onlysearch') if boxlist == '選択した条件で商品は存在しませんでした': update.message.reply_text('没有找到 %s 预览视频'%searchid) return #print(boxlist) firstlist = boxlist[0] searchid = firstlist.get('cid') try: result = str(truevideo(searchid)) except: print('尝试使用selenium引擎失败') result = str(prevideo(searchid)) try: update.message.reply_video(result) except: result_hd = result update.message.reply_text('原视频超出telegram大小限制,将发送低画质版本,可复制原画质链接到浏览器查看!---%s'%result_hd) result = result.replace('mhb','dmb') try: update.message.reply_video(result) except: result = result.replace('dmb','sm') update.message.reply_video(result) @restricted @send_typing_action def dmmphoto(update, context): searchid = context.args[0] #chat_id = update.message.chat_id list_of_urls = prephotos(searchid) if list_of_urls == []: if len(context.args) == 1: searchidd = context.args[0] searchidd = searchidd.replace('-',' ') else: searchidd = ' '.join(context.args[:]) boxlist,stitle = dmmsearch(searchidd,'onlysearch') if boxlist == '選択した条件で商品は存在しませんでした': update.message.reply_text('没有找到 %s 预览图片'%searchid) return #print(boxlist) firstlist = boxlist[0] wcid = firstlist.get('cid') list_of_urls = prephotos(wcid) if len(list_of_urls)<=10: media_group = [] for number, url in enumerate(list_of_urls): media_group.append(telegram.InputMediaPhoto(media=url, caption="Turtle" + str(number))) update.message.reply_media_group(media=media_group) else: list_of_urls = split_list(list_of_urls,10) for i in list_of_urls: media_group = [] for number, url in enumerate(i): #print(telegram.InputMediaPhoto(media=url, caption="Photos" + str(number))) media_group.append(telegram.InputMediaPhoto(media=url, caption="Photos" + str(number))) #print(media_group) update.message.reply_media_group(media=media_group) @restricted @send_typing_action def magnet(update, context): if len(context.args) == 1: searchid = context.args[0] else: searchid = ' '.join(context.args[:]) result = sukebei(searchid) msg = long_message(update, context, result,'markdown') @restricted @send_typing_action def dmmsearchh(update, context): if len(context.args) == 1: searchstr = context.args[0] else: searchstr = ' '.join(context.args[:]) #print(searchstr) text = dmmsearch(searchstr) msg = long_message(update,context,text,'markdown') @restricted @send_typing_action def searchall(update,context): if len(context.args) == 1: searchstr = context.args[0] else: searchstr = ' '.join(context.args[:]) #print(searchstr) text = dmmsearchall(searchstr) msg = long_message(update,context,text,'markdown') @restricted @send_typing_action def dmmlink(update, context): if len(context.args) == 1: searchlink = context.args[0] else: searchlink = ' '.join(context.args[:]) #print(searchlink) text = dmmlinks(searchlink) msg = long_message(update, context, text,'markdown') @restricted @send_typing_action def new30(update, context): searchlink = 'https://www.dmm.co.jp/digital/videoa/-/list/=/article=latest/limit=30/sort=date/' text = dmmlinks(searchlink) update.message.reply_markdown(text) @restricted @send_typing_action def top30(update, context): searchlink = 'https://www.dmm.co.jp/digital/videoa/-/list/=/limit=30/sort=ranking/' text = dmmlinks(searchlink) update.message.reply_markdown(text) @restricted @send_typing_action def cf(update, context): args = context.args cf = CloudFlare_handler(args) text = cf.option() update.message.reply_markdown(text) @restricted @send_typing_action def getupdate(update, context): repo = Version('https://github.com/horryruo/multi-bot.git') updatetime = repo.get_time() text = '最新版本:{} (UTC+8)'.format(updatetime) keyboard = [ [ telegram.InlineKeyboardButton('更新程序',callback_data="goupdate"), telegram.InlineKeyboardButton('取消',callback_data="cancel"), ], ] update.message.reply_markdown(text,reply_markup=telegram.InlineKeyboardMarkup(keyboard)) return GITUPDATE def gitupdate(update, context): repo = Version('https://github.com/horryruo/multi-bot.git') try: pull = repo.pull() except Exception as e: pull = str(e) print(pull) matchline = re.search( r'file changed|Already|merge', pull, re.M|re.I).group() #print(matchline) if matchline == 'Already': update.callback_query.edit_message_text('版本已是最新,无需更新') elif matchline == 'file changed': update.callback_query.edit_message_text('更新完成,请输入/restart 重启程序完成更新') elif matchline == 'merge': update.callback_query.edit_message_text('你可能修改过项目文件,无法自动更新,请手动解决或重新下载程序') else: update.callback_query.edit_message_text('未知错误,请重新配置项目') return ConversationHandler.END @restricted @send_typing_action def startface(update, context): keyboard = [ [ telegram.InlineKeyboardButton('识别女优',callback_data="girl"), telegram.InlineKeyboardButton('识别二次元图片(未完成)',callback_data="acg"), ], [ telegram.InlineKeyboardButton('取消',callback_data="cancel"), ], ] update.message.reply_text( '选择你要识别图片的类型', reply_markup=telegram.InlineKeyboardMarkup(keyboard), ) return CHOOSE def choosegirl(update, context): update.callback_query.answer() update.callback_query.edit_message_text('请发送图片',) return GIRL def chooseacg(update, context): update.callback_query.answer() update.callback_query.edit_message_text('请发送图片',) return ACG @send_typing_action def girl_ide(update, context): try: photo_file = update.message.effective_attachment.get_file() except: photo_file = update.message.photo[-1].get_file() photo_file.download('user_photo.jpg') update.message.reply_text('正在识别,请稍候') result = girl() msg = long_message(update, context, result,'markdown') return ConversationHandler.END @send_typing_action def acg_ide(update, context): try: photo_file = update.message.effective_attachment.get_file() except: photo_file = update.message.photo[-1].get_file() photo_file.download('user_photo.jpg') update.message.reply_text('本功能未完成,请等待作者咕咕咕') return ConversationHandler.END def cancel(update, context): update.message.reply_text('已取消') return ConversationHandler.END def cancell(update, context): update.callback_query.edit_message_text('已取消') return ConversationHandler.END def main(): if ifproxy == 'true': updater = Updater(TOKEN, use_context=True,request_kwargs=REQUEST_KWARGS) else: updater = Updater(TOKEN, use_context=True) #print(bot.get_me()) dp = updater.dispatcher def stop_and_restart(): """Gracefully stop the Updater and replace the current process with a new one""" updater.stop() os.execl(sys.executable, sys.executable, *sys.argv) def restart(update, context): update.message.reply_text('Bot 正在重启,请等待5~10秒') Thread(target=stop_and_restart).start() dp.add_handler(CommandHandler("start", start)) dp.add_handler(CommandHandler("help", help)) dp.add_handler(CommandHandler("m", monthlyy)) dp.add_handler(CommandHandler("uid", dmmid)) dp.add_handler(CommandHandler("lib", lib)) dp.add_handler(CommandHandler("video", dmmvideo)) dp.add_handler(CommandHandler("photo", dmmphoto)) dp.add_handler(CommandHandler("cid", dmmcid)) dp.add_handler(CommandHandler("magnet", magnet)) dp.add_handler(CommandHandler("search", dmmsearchh)) dp.add_handler(CommandHandler("links", dmmlink)) dp.add_handler(CommandHandler("new", new30)) dp.add_handler(CommandHandler("top", top30)) dp.add_handler(CommandHandler("cf", cf)) dp.add_handler(CommandHandler("all", searchall)) conv_handler = ConversationHandler( entry_points=[CommandHandler('face',startface), CommandHandler('update',getupdate), ], states={ CHOOSE:[ CallbackQueryHandler(choosegirl, pattern="girl"), CallbackQueryHandler(chooseacg, pattern="acg"), CallbackQueryHandler(cancell, pattern="cancel"), ], GIRL:[MessageHandler(Filters.photo|Filters.document,girl_ide)], ACG:[MessageHandler(Filters.photo|Filters.document,acg_ide)], GITUPDATE:[ CallbackQueryHandler(gitupdate, pattern="goupdate"), CallbackQueryHandler(cancell, pattern="cancel"), ], }, fallbacks=[CommandHandler('cancel',cancel)], ) dp.add_handler(conv_handler) dp.add_handler(CommandHandler('restart', restart, filters=Filters.user(user_id=LIST_OF_ADMINS[0]))) dp.add_error_handler(error) updater.start_polling() logger.info("iMulti-bot started") updater.idle() if __name__ == '__main__': main()
main.py
import os import requests import json import copy import kivy from random import randint from kivy.uix.button import Button from kivy.uix.popup import Popup from kivy.uix.boxlayout import BoxLayout from kivy.app import App from kivy.uix.gridlayout import GridLayout from kivy.uix.label import Label from kivy.uix.textinput import TextInput from kivy.clock import Clock from functools import partial from kivy.uix.progressbar import ProgressBar from kivy.uix.screenmanager import ScreenManager, Screen, SlideTransition from kivy.uix.scrollview import ScrollView from kivy.graphics import * from kivy.core.window import Window from kivy.uix.listview import ListItemButton from kivy.uix.actionbar import * from kivy.uix.anchorlayout import * from kivy.uix.checkbox import * import itertools import threading import time import sys from pynput.keyboard import Key, Listener from kivy.config import Config Config.set('input', 'mouse', 'mouse,multitouch_on_demand') Config.set('graphics', 'multisamples', '0') kivy.require('1.9.1') # replace with your current kivy version ! count = 0 sys.path.append(os.path.realpath('..')) def resource_path(relative_path): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS except Exception: base_path = os.path.abspath("..") return os.path.join(base_path, relative_path) class Translate(object): def __init__(self): self.a = [] with open(resource_path("json/words.json")) as file: self.perCopy = json.load(file) def clear(self): with open(resource_path("json/wordsBack.json")) as file: self.per = json.load(file) with open(resource_path("json/words.json"), 'w') as json_data: json_data.write( json.dumps(self.per, sort_keys=True, indent=4, separators=(',', ': ')) ) self.open_translation() print("Works") def translate(self, word): r = requests.get("http://api.mymemory.translated.net/get?q=" + word + "!&langpair=en|lt") return r.json()["matches"] def open_translation(self): with open(resource_path("json/words.json")) as file: return json.load(file) def save_file(self): pass class TranslatedWidget(Screen): def __init__(self, **kwargs): super(TranslatedWidget, self).__init__(**kwargs) self.update() def update(self): self.clear_widgets() with open(resource_path("json/words.json")) as file: self.perCopy = json.load(file) anchor = AnchorLayout() anchor.anchor_x = "center" anchor.anchor_y = "top" self.multi = GridLayout(cols=1, padding=10, size_hint_y=None) self.multi.padding = [0, 50, 0, 0] self.multi.bind(minimum_height=self.multi.setter('height')) self.word_objs = [] self.count = 0 for word in self.perCopy["words"]: word_obj = self.create_line(word, self.count) self.word_objs.append(word_obj) self.multi.add_widget(word_obj) self.count += 1 root = ScrollView(size_hint=(1, 1), do_scroll_x=False) root.add_widget(self.multi) anchor.add_widget(root) anchor.add_widget(self.top_widget()) Window.clearcolor = (0.1, 0.1, 0.1, 1) self.add_widget(anchor) def create_line(self, line, count): return TranslatedWord(line, count) def top_widget(self): menu_view = ActionView() menu_bar = ActionBar(pos_hint={'top':1.3}) menu_bar.add_widget(menu_view) menu_previous = ActionPrevious() menu_previous.app_icon = resource_path('assets/Trait_hd_action.png') menu_previous.bind(on_press=self.switch_screen) menu_view.add_widget(menu_previous) save_btn = ActionButton(text="Save") menu_view.add_widget(save_btn) save_btn.bind(on_press=self.clock_save) add_btn = ActionButton(text="Add") menu_view.add_widget(add_btn) add_btn.bind(on_press=self.add) check_btn = ActionButton(text="Check All") menu_view.add_widget(check_btn) check_btn.bind(on_press=self.check_all) translate_btn = ActionButton(text="Translate") menu_view.add_widget(translate_btn) translate_btn.bind(on_press=self.translate) delete_btn = ActionButton(text="Delete") menu_view.add_widget(delete_btn) delete_btn.bind(on_press=self.delete_checked) revert_btn = ActionButton(text="Revert") menu_view.add_widget(revert_btn) revert_btn.bind(on_press=self.revert) return menu_bar def switch_screen(self, instance): sm.transition.direction = "right" sm.current = "main" def add(self, instance): with open(resource_path("json/wordsBack.json")) as file: self.per = json.load(file) self.perCopy["words"].append(self.per["words"][0]) with open(resource_path("json/words.json"), 'w') as json_data: json_data.write( json.dumps(self.perCopy, sort_keys=True, indent=4, separators=(',', ': ')) ) word_obj = self.create_line(self.per["words"][0], self.count) self.word_objs.append(word_obj) self.multi.add_widget(word_obj) self.count += 1 def save_json(self, instance): self.clear() with open(resource_path("json/words.json")) as file: self.perCopy = json.load(file) for obj in self.word_objs: temp = obj.save_new() if temp != None: self.perCopy["words"].append(temp) with open(resource_path("json/words.json"), 'w') as json_data: json_data.write( json.dumps(self.perCopy, sort_keys=True, indent=4, separators=(',', ': ')) ) self.update() def delete_checked(self, instance): for obj in self.word_objs: if obj != None: if obj.check.active: obj.remove() def clock_save(self, instance): Clock.schedule_once(self.save_json, 0.05) def clear(self): with open(resource_path("json/wordsBack.json")) as file: self.per = json.load(file) del self.per["words"][0] with open(resource_path("json/words.json"), 'w') as json_data: json_data.write( json.dumps(self.per, sort_keys=True, indent=4, separators=(',', ': ')) ) def revert(self, instance): for obj in self.word_objs: if obj != None: if obj.disabled: obj.disabled = False def check_all(self, instance): for obj in self.word_objs: if obj != None: if not obj.checked_all: obj.checked_all = True if not obj.disabled: obj.check.active = True else: obj.checked_all = False if not obj.disabled: obj.check.active = False done = False def animate(self): # here is the animation self.popup = Popup(title='Please wait...', size_hint=(0.5, 0.1), auto_dismiss=False) if self.in_translation != []: self.popup.open() for c in itertools.cycle(['|', '/', '-', '\\']): if not self.in_translation[self.in_translation.__len__() - 1].in_translation: self.popup.dismiss() break self.popup.title = "Please wait... " + c time.sleep(0.1) def translate(self, instance): self.in_translation = [] for obj in self.word_objs: if obj != None: if obj.check.active: if not obj.disabled: obj.in_translation = True self.in_translation.append(obj) t = threading.Thread(target = obj.translate) t.start() a = threading.Thread(target=self.animate) a.start() class TranslatedWord(BoxLayout, ListItemButton): def __init__(self, word, count, **kwargs): self.word = word self.orientation = "horizontal" self.size_hint_y=None self.height = 30 self.background_normal = "bg.png" self.background_down = self.background_normal self.background_disabled_down = self.background_normal self.background_disabled_normal = self.background_normal super(TranslatedWord, self).__init__(**kwargs) self.in_translation = False self.match = 0 self.count = count if self.count % 2 == 0: self.background_color=(0.22, 0.22, 0.22, 1) else: self.background_color=(0.15, 0.15, 0.15, 1) self.check = CheckBox(size=(50, self.height), size_hint=(None, 1)) self.add_widget(self.check) self.input_word = TextInput(text=word["word"].capitalize()) self.add_widget(self.input_word) self.input_word.write_tab = False self.translation = TextInput(text=self.fix().capitalize()) self.add_widget(self.translation) self.translation.write_tab = False btn1 = Button(text="Next") btn1.bind(on_press=self.re_translate) self.add_widget(btn1) btn2 = Button(text="Del") btn2.bind(on_press=self.remove) self.add_widget(btn2) self.checked_all = False self.focus() def fix(self): if self.word["matches"].__len__() > 0: if not self.word["word"].endswith("!"): if self.word["matches"][self.match]["translation"].endswith("!"): return self.word["matches"][self.match]["translation"][:-1] return self.word["matches"][self.match]["translation"].lower().capitalize() return "" def remove(self, instance=""): self.disabled = True def re_translate(self, instance): if self.word["matches"].__len__() - 1 != self.match: self.match += 1 else: self.match = 0 self.translation.text = self.fix().capitalize() def save_new(self): if(self.disabled): self.parent.remove_widget(self) return None self.word["word"] = self.input_word.text if(self.word["matches"][self.match]["translation"] != self.translation.text): match_template = copy.copy(self.word["matches"][self.match]) match_template["translation"] = self.translation.text self.match = self.word["matches"].__len__() self.word["matches"].append(match_template) self.word["matches"][0], self.word["matches"][self.match] = self.word["matches"][self.match], self.word["matches"][0] self.match = 0 return self.word def translate(self, instance=""): translation = Translate().translate(self.input_word.text) self.word["matches"] = [] for match in translation: self.word["matches"].append(match) self.in_translation = False self.match = 0 self.translation.text = self.fix() def focus(self): self.input_word.focus = True class WordShower(Screen): def __init__(self, **kwargs): super(WordShower, self).__init__(**kwargs) Clock.schedule_interval(self.check_update, 1 / 30.) self.switch = False self.available_list = [] self.temp = 0 self.update() def update(self): self.clear_widgets() with open(resource_path("json/words.json")) as file: self.perCopy = json.load(file) self.perCopy = self.perCopy["words"] layout = BoxLayout(orientation="vertical", padding=[100,50,100,50]) self.add_widget(layout) self.word = Label(text="", font_size=40, size_hint=(1, 0.5)) self.translation = TextInput(text="", font_size=30, size_hint=(1, 0.2)) self.translation.multiline = False self.translation.write_tab = False self.translation.padding_x = [self.translation.width / 2, self.translation.width / 2] self.current_text = "" self.match = 0 self.match_word = "" self.make_list() self.next_word() layout.add_widget(self.word) layout.add_widget(self.translation) anchor = AnchorLayout() anchor.anchor_x = "center" anchor.anchor_y = "top" anchor.add_widget(self.top_widget()) self.add_widget(anchor) self.translation.focus = True def check_update(self, instance): if not self.switch: if self.current_text != self.translation.text: self.current_text = self.translation.text if self.translation.foreground_color != (0, 0, 0, 1): self.translation.foreground_color = (0, 0, 0, 1) def make_list(self): self.available_list = [] count = 0 for x in self.perCopy: self.available_list.append(count) count += 1 def next_word(self, instance=""): if self.available_list.__len__() == 0: print("Sorry, no words") self.word.text = "[No Translations]" return None if self.available_list.__len__() - 1 != 0: self.temp = randint(0, self.available_list.__len__() - 1) while self.temp == self.match: self.temp = randint(0, self.available_list.__len__() - 1) self.match = self.available_list[self.temp] else: self.match = self.available_list[0] if self.available_list.__len__() == 1: self.make_list() print(self.match) self.word.text = self.fix() self.match_word = self.perCopy[self.match]["word"].lower().capitalize() self.translation.text = "" self.translation.foreground_color = (0, 0, 0, 1) self.translation.focus = True def fix(self): if self.perCopy[self.match]["matches"].__len__() > 0: if not self.perCopy[self.match]["word"].endswith("!"): if self.perCopy[self.match]["matches"][0]["translation"].endswith("!"): return self.perCopy[self.match]["matches"][00]["translation"][:-1] return self.perCopy[self.match]["matches"][0]["translation"].lower().capitalize() return "" def top_widget(self): menu_view = ActionView() menu_bar = ActionBar(pos_hint={'top':1.3}) menu_bar.add_widget(menu_view) menu_previous = ActionPrevious() menu_previous.app_icon = resource_path('assets/Trait_hd_action.png') menu_previous.bind(on_press=self.switch_screen) menu_view.add_widget(menu_previous) save_btn = ActionButton(text="Show") menu_view.add_widget(save_btn) save_btn.bind(on_press=self.show) add_btn = ActionButton(text="Next") menu_view.add_widget(add_btn) add_btn.bind(on_press=self.next_word) check_btn = ActionButton(text="Check") menu_view.add_widget(check_btn) check_btn.bind(on_press=self.check) return menu_bar def start_switch(self): self.switch = True switch = threading.Thread(target=self._sleep_switch) switch.daemon = True switch.start() def start_focus_offset(self): focus_offset = threading.Thread(target=self._set_focus) focus_offset.daemon = True focus_offset.start() def show(self, instance): self.translation.text = self.match_word self.translation.foreground_color = (0.5, 0.5, 0, 1) self.start_switch() def check(self, instance=""): print(self.translation.text.strip().lower()) if self.translation.text.strip().lower() == self.match_word.strip().lower(): self.translation.foreground_color = (0, 0.6, 0, 1) del self.available_list[self.temp] self.start_switch() else: self.translation.foreground_color = (1, 0, 0, 1) self.start_focus_offset() def _set_focus(self): time.sleep(0.2) self.translation.focus = True def _sleep_switch(self): time.sleep(2) self.switch = False self.next_word() def switch_screen(self, instance): sm.transition.direction = "right" sm.current = "main" self.translation.foreground_color = (0, 0, 0, 1) class MainWidget(Screen): def __init__(self, **kwargs): super(MainWidget, self).__init__(**kwargs) multi = GridLayout(cols=2, rows=2, padding=10, spacing=5) self.add_widget(multi) # Adding Buttons btn1 = Button(text='Input', font_size=50) btn1.bind(on_press=self.open) btn2 = Button(text='Translate', font_size=50) btn2.bind(on_press=self.translate) btn3 = Button(text='Show', font_size=50) btn3.bind(on_press=self.switch_screen) btn4 = Button(text="Learn", font_size=50) btn4.bind(on_press=self.switch_learn) multi.add_widget(btn1) multi.add_widget(btn2) multi.add_widget(btn3) multi.add_widget(btn4) # Content for popup content = BoxLayout(orientation='vertical', spacing=5) # TextBox f = open(resource_path("json/words.txt"), "r") self.input_english = TextInput(text=f.read(), size_hint=(0.8, 0.9), pos_hint={'center_x': 0.5}) content.add_widget(self.input_english) # Save button save = Button(text='Save', size_hint=(0.5, 1)) save.bind(on_press=self.save) # Close button close = Button(text='Close', size_hint=(0.5, 1)) close.bind(on_press=self.dismiss) # BoxLayout for save and close buttons buttons = BoxLayout(orientation='horizontal', spacing=5, pos_hint={'center_x': 0.5}, size_hint=(0.8, 0.1)) buttons.add_widget(save) buttons.add_widget(close) content.add_widget(buttons) #Create popup self.popup = Popup(title='Input', content=content, size_hint=(0.5, 0.8), size=(400, 400), auto_dismiss=True) def switch_learn(self, instance): word_widget.update() sm.transition.direction = "left" sm.current = "words" word_widget.translation.focus = True def switch_screen(self, instance): translated_widget.update() sm.transition.direction = "left" sm.current = "translate" def open(self, instance): # Open popup self.popup.open() def dismiss(self, instance): # Dismiss popup self.popup.dismiss() def save(self, instance): # Save input with open(resource_path('json/words.txt'), 'w') as data: data.write(self.input_english.text) self.popup.dismiss() def translate(self, instance): Translate().clear() self.read_file() with open(resource_path("json/words.json")) as file: self.perCopy = json.load(file) template = copy.copy(self.perCopy["words"][0]) del self.perCopy["words"][0] self.add_progress() Clock.schedule_interval(partial(self.save_json, template=template), 1 / 30.) def add_progress(self): self.pb = ProgressBar(max=self.a.__len__()) self.pb.value = 0 self.progress_pop = Popup(title='Translating...', content=self.pb, size_hint=(0.5, 0.1), auto_dismiss=False) self.progress_pop.open() def read_file(self): f = open(resource_path("json/words.txt"), "r") self.a = [] for line in f: self.a.append(line.strip('\n')) def save_json(self, dt, template): global count template["word"] = self.a[count] template["matches"] = Translate().translate(self.a[count]) self.perCopy["words"].append(copy.copy(template)) print("Done...") count += 1 self.pb.value += 1 if self.a.__len__() == count: with open(resource_path("json/words.json"), 'w') as json_data: json_data.write( json.dumps(self.perCopy, sort_keys=True, indent=4, separators=(',', ': ')) ) count = 0 self.progress_pop.dismiss() Clock.schedule_once(self.switch_screen, 0.1) return False sm_transition = SlideTransition() sm = ScreenManager(transition=sm_transition) sm.add_widget(MainWidget(name="main")) translated_widget = TranslatedWidget(name="translate") sm.add_widget(translated_widget) word_widget = WordShower(name="words") sm.add_widget(word_widget) def on_press(key): if sm.current == "words": if key == Key.enter: print("enter pressed") word_widget.check() d = threading.Thread(target = delay) d.daemon = True d.start() def delay(): time.sleep(0.2) if not word_widget.translation.text.strip().lower() == word_widget.match_word.strip().lower(): word_widget.translation.focus = True class Listen(): # Collect events until released def __init__(self): with Listener(on_press=on_press) as listener: listener.join() t = threading.Thread(target=Listen) t.daemon = True t.start() class MainApp(App): def build(self): self.title = "Trait" self.icon = resource_path('assets/Trait_hd.png') return sm MainApp().run()
img_downloader.py
#! python3 ''' Sample script for downloading all images from website Pixel2008 All Rights Reserved ® ''' from typing import List import sys, requests, bs4, traceback, os, shutil import multiprocessing, json, smtplib, threading def get_url() -> str: def_url = "http://www.google.pl" def_url = "http://dru.pl" url = input("Enter url address [press enter key for default url " + def_url + "]: ") if len(url) == 0: return def_url return url # Simple parsing def check_url(url : str) -> bool: if len(url) == 0: return False return True def get_img_links(url : str) -> List: res = requests.get(url) res.raise_for_status() soup = bs4.BeautifulSoup(res.text,"html.parser") images = soup.select("img") if images == []: print("Couldn't find any img!") return list() lst = [] for img in images: lst.append(img.get("src")) return lst def prepare_download_dir(start_dir : str) -> str: path = os.path.join(start_dir,"tmp_download") if os.path.exists(path): shutil.rmtree(path) os.makedirs(path) return path def download_images(img_links: List, download_path : str): #print(img_links) allowed_extensions = ("png","jpg","jpeg") counter = 0 for img in img_links: url = img name = img[img.rfind("/")+1:] ext = img[img.rfind(".")+1:].lower() if ext not in allowed_extensions: continue if not url.startswith('http'): url = "http:" + img print("Downloading",name,url) try: res = requests.get(url) res.raise_for_status() imageFile = open(os.path.join(download_path,name),'wb') try: for chunk in res.iter_content(1024): imageFile.write(chunk) counter += 1 finally: imageFile.close() except requests.exceptions.InvalidURL: print("Invalid url",url) continue except: print("Error while downloading") continue print("Downloaded",counter) def download_all_images(img_links: List, download_path : str): cpu_count = multiprocessing.cpu_count() images_quantity = len(img_links) images_per_thread = images_quantity // cpu_count + 1 print("Creating " + str(cpu_count) + " threads to download " + str(images_quantity) + " images (" + str(images_per_thread) + " per thread)") i = 0 downloadThreads = [] for i in range(0, images_quantity, images_per_thread): downloadThread = threading.Thread(target=download_images, args=(img_links[i:i+images_per_thread], download_path)) downloadThreads.append(downloadThread) downloadThread.start() for downloadThread in downloadThreads: downloadThread.join() def get_top_10(download_path : str): all_files = [] for (path, _, files) in os.walk(download_path): for file in files: size = os.path.getsize(os.path.join(path,file)) all_files.append((os.path.join(path,file), size)) all_files.sort(key=lambda x:x[1]) return all_files[0:10] def read_mail_config(file: str): print("Reading configuration from",file) with open(file, "r") as read_file: return json.load(read_file) def mail(): config = read_mail_config(os.path.join(start_path,"mail_config.json")) cfg = config["smtp"] smtp = smtplib.SMTP_SSL(cfg["address"], int(cfg["port"]), timeout=int(cfg["timeout_minutes"])) try: smtp.ehlo() smtp.login(config["login"],config["password"]) subject = "Hello!" text = "Check this out." message = """\ Subject: %s %s """ % (subject, text) message = """\ Subject: Super images Check this out in attachment!""" smtp.sendmail(config["login"],[config["receiver"]], message) {} finally: smtp.quit() if __name__ == "__main__": try: #clear consloe clear = lambda: os.system("cls") clear() # url url = get_url() # parse if not check_url(url): print("Bad url! Quiting!") sys.exit(1) # get img links img_links = get_img_links(url) # prepare dirs start_path = os.path.dirname(os.path.realpath(__file__)) download_path = prepare_download_dir(start_path) # download all in multiple threads download_all_images(img_links,download_path) # get top 10 lowest size top_10_lowest = get_top_10(download_path) print("Lowest size images",top_10_lowest) # send them by mail mail() print("Done") except requests.exceptions.MissingSchema as e1: print("Error occured e1 = " + format(e1)) print("Call stack:") traceback.print_tb(e1.__traceback__) except Exception as e2: print("Error occured e2 = " + format(e2))